Merge remote-tracking branch 'origin/jetty-9.4.x' into issue-1640
This commit is contained in:
commit
16498333a4
|
@ -15,13 +15,6 @@
|
|||
<bundle-symbolic-name>${project.groupId}.embedded</bundle-symbolic-name>
|
||||
</properties>
|
||||
<dependencies>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>18.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.eclipse.jetty</groupId>
|
||||
<artifactId>jetty-util-ajax</artifactId>
|
||||
|
|
|
@ -122,7 +122,6 @@ public class Http2Server
|
|||
ALPN.debug=false;
|
||||
|
||||
server.start();
|
||||
//server.dumpStdErr();
|
||||
server.join();
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,8 @@ package org.eclipse.jetty.embedded;
|
|||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.security.Provider;
|
||||
import java.security.Security;
|
||||
|
||||
import org.eclipse.jetty.http.HttpVersion;
|
||||
import org.eclipse.jetty.server.Connector;
|
||||
|
@ -37,7 +39,7 @@ import org.eclipse.jetty.util.ssl.SslContextFactory;
|
|||
public class ManyConnectors
|
||||
{
|
||||
public static void main( String[] args ) throws Exception
|
||||
{
|
||||
{
|
||||
// Since this example shows off SSL configuration, we need a keystore
|
||||
// with the appropriate key. These lookup of jetty.home is purely a hack
|
||||
// to get access to a keystore that we use in many unit tests and should
|
||||
|
@ -84,6 +86,9 @@ public class ManyConnectors
|
|||
// to know about. Much more configuration is available the ssl context,
|
||||
// including things like choosing the particular certificate out of a
|
||||
// keystore to be used.
|
||||
|
||||
Security.addProvider((Provider)ClassLoader.getSystemClassLoader().loadClass("org.conscrypt.OpenSSLProvider").newInstance());
|
||||
|
||||
SslContextFactory sslContextFactory = new SslContextFactory();
|
||||
sslContextFactory.setKeyStorePath(keystoreFile.getAbsolutePath());
|
||||
sslContextFactory.setKeyStorePassword("OBF:1vny1zlo1x8e1vnw1vn61x8g1zlu1vn4");
|
||||
|
@ -126,6 +131,7 @@ public class ManyConnectors
|
|||
|
||||
// Start the server
|
||||
server.start();
|
||||
|
||||
server.join();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,10 +68,30 @@ public class AnnotationParser
|
|||
{
|
||||
private static final Logger LOG = Log.getLogger(AnnotationParser.class);
|
||||
|
||||
private static final int JVM_MAJOR_VER;
|
||||
|
||||
protected Set<String> _parsedClassNames = ConcurrentHashMap.newKeySet();
|
||||
|
||||
protected static int ASM_OPCODE_VERSION = Opcodes.ASM5; //compatibility of api
|
||||
|
||||
|
||||
static
|
||||
{
|
||||
// Determine JVM spec version
|
||||
// Using guidance from http://openjdk.java.net/jeps/223
|
||||
String jvmSpecVer = System.getProperty("java.vm.specification.version");
|
||||
|
||||
if (jvmSpecVer.indexOf('.') >= 0)
|
||||
{
|
||||
// Old spec version (Java 1.8 and older)
|
||||
String parts[] = jvmSpecVer.split("\\.");
|
||||
JVM_MAJOR_VER = Integer.parseInt(parts[1]);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Newer spec version (Java 9+)
|
||||
JVM_MAJOR_VER = Integer.parseInt(jvmSpecVer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert internal name to simple name
|
||||
|
@ -1017,10 +1037,61 @@ public class AnnotationParser
|
|||
if (path == null || path.length()==0)
|
||||
return false;
|
||||
|
||||
//skip any classfiles that are in a hidden directory
|
||||
if (path.startsWith("META-INF/versions/"))
|
||||
{
|
||||
// Handle JEP 238 - Multi-Release Jars
|
||||
if (JVM_MAJOR_VER < 9)
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
{
|
||||
LOG.debug("JEP-238 Multi-Release JAR not supported on Java " +
|
||||
System.getProperty("java.version") + ": " + path);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Safety check for ASM bytecode support level.
|
||||
// When ASM 6.0 is integrated, the below will start to work.
|
||||
if (ASM_OPCODE_VERSION <= Opcodes.ASM5)
|
||||
{
|
||||
// Cannot scan Java 9 classes with ASM version 5
|
||||
if (LOG.isDebugEnabled())
|
||||
{
|
||||
LOG.debug("Unable to scan newer Java bytecode (Java 9?) with ASM 5 (skipping): " + path);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int idxStart = "META-INF/versions/".length();
|
||||
int idxEnd = path.indexOf('/', idxStart + 1);
|
||||
try
|
||||
{
|
||||
int pathVersion = Integer.parseInt(path.substring(idxStart, idxEnd));
|
||||
if (pathVersion < JVM_MAJOR_VER)
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
{
|
||||
LOG.debug("JEP-238 Multi-Release JAR version " + pathVersion +
|
||||
" not supported on Java " + System.getProperty("java.version") +
|
||||
": " + path);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
catch (NumberFormatException e)
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
{
|
||||
LOG.debug("Not a valid JEP-238 Multi-Release path: " + path);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// skip any classfiles that are in a hidden directory
|
||||
if (path.startsWith(".") || path.contains("/."))
|
||||
{
|
||||
if (LOG.isDebugEnabled()) LOG.debug("Contains hidden dirs: {}"+path);
|
||||
if (LOG.isDebugEnabled()) LOG.debug("Contains hidden dirs: " + path);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,11 +18,15 @@
|
|||
|
||||
package org.eclipse.jetty.annotations;
|
||||
|
||||
|
||||
import javax.annotation.security.DeclareRoles;
|
||||
import javax.servlet.Servlet;
|
||||
|
||||
import org.eclipse.jetty.annotations.AnnotationIntrospector.AbstractIntrospectableAnnotationHandler;
|
||||
import org.eclipse.jetty.security.ConstraintAware;
|
||||
import org.eclipse.jetty.security.ConstraintSecurityHandler;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
import org.eclipse.jetty.webapp.WebAppContext;
|
||||
|
||||
/**
|
||||
|
@ -30,6 +34,7 @@ import org.eclipse.jetty.webapp.WebAppContext;
|
|||
*/
|
||||
public class DeclareRolesAnnotationHandler extends AbstractIntrospectableAnnotationHandler
|
||||
{
|
||||
private static final Logger LOG = Log.getLogger(DeclareRolesAnnotationHandler.class);
|
||||
|
||||
protected WebAppContext _context;
|
||||
|
||||
|
@ -48,6 +53,12 @@ public class DeclareRolesAnnotationHandler extends AbstractIntrospectableAnnotat
|
|||
if (!Servlet.class.isAssignableFrom(clazz))
|
||||
return; //only applicable on javax.servlet.Servlet derivatives
|
||||
|
||||
if (!(_context.getSecurityHandler() instanceof ConstraintAware))
|
||||
{
|
||||
LOG.warn("SecurityHandler not ConstraintAware, skipping security annotation processing");
|
||||
return;
|
||||
}
|
||||
|
||||
DeclareRoles declareRoles = (DeclareRoles) clazz.getAnnotation(DeclareRoles.class);
|
||||
if (declareRoles == null)
|
||||
return;
|
||||
|
|
|
@ -163,6 +163,26 @@ public class TestAnnotationParser
|
|||
// only the valid classes inside bad-classes.jar should be parsed. If any invalid classes are parsed and exception would be thrown here
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testModuleInfoClassInJar() throws Exception
|
||||
{
|
||||
File badClassesJar = MavenTestingUtils.getTestResourceFile("jdk9/slf4j-api-1.8.0-alpha2.jar");
|
||||
AnnotationParser parser = new AnnotationParser();
|
||||
Set<Handler> emptySet = Collections.emptySet();
|
||||
parser.parse(emptySet, badClassesJar.toURI());
|
||||
// Should throw no exceptions, and happily skip the module-info.class files
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testJep238MultiReleaseInJar() throws Exception
|
||||
{
|
||||
File badClassesJar = MavenTestingUtils.getTestResourceFile("jdk9/log4j-api-2.9.0.jar");
|
||||
AnnotationParser parser = new AnnotationParser();
|
||||
Set<Handler> emptySet = Collections.emptySet();
|
||||
parser.parse(emptySet, badClassesJar.toURI());
|
||||
// Should throw no exceptions, and skip the META-INF/versions/9/* files
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBasedirExclusion() throws Exception
|
||||
{
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -29,9 +29,7 @@ import javax.servlet.ServletException;
|
|||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.eclipse.jetty.client.api.Response;
|
||||
import org.eclipse.jetty.client.api.Result;
|
||||
import org.eclipse.jetty.http.HttpField;
|
||||
import org.eclipse.jetty.client.util.DeferredContentProvider;
|
||||
import org.eclipse.jetty.server.Request;
|
||||
import org.eclipse.jetty.server.handler.AbstractHandler;
|
||||
import org.eclipse.jetty.util.ssl.SslContextFactory;
|
||||
|
@ -53,22 +51,11 @@ public class HttpResponseAbortTest extends AbstractHttpClientServerTest
|
|||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
.onResponseBegin(new Response.BeginListener()
|
||||
.onResponseBegin(response -> response.abort(new Exception()))
|
||||
.send(result ->
|
||||
{
|
||||
@Override
|
||||
public void onBegin(Response response)
|
||||
{
|
||||
response.abort(new Exception());
|
||||
}
|
||||
})
|
||||
.send(new Response.CompleteListener()
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
Assert.assertTrue(result.isFailed());
|
||||
latch.countDown();
|
||||
}
|
||||
Assert.assertTrue(result.isFailed());
|
||||
latch.countDown();
|
||||
});
|
||||
Assert.assertTrue(latch.await(5, TimeUnit.SECONDS));
|
||||
}
|
||||
|
@ -81,23 +68,15 @@ public class HttpResponseAbortTest extends AbstractHttpClientServerTest
|
|||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
.onResponseHeader(new Response.HeaderListener()
|
||||
.onResponseHeader((response, field) ->
|
||||
{
|
||||
@Override
|
||||
public boolean onHeader(Response response, HttpField field)
|
||||
{
|
||||
response.abort(new Exception());
|
||||
return true;
|
||||
}
|
||||
response.abort(new Exception());
|
||||
return true;
|
||||
})
|
||||
.send(new Response.CompleteListener()
|
||||
.send(result ->
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
Assert.assertTrue(result.isFailed());
|
||||
latch.countDown();
|
||||
}
|
||||
Assert.assertTrue(result.isFailed());
|
||||
latch.countDown();
|
||||
});
|
||||
Assert.assertTrue(latch.await(5, TimeUnit.SECONDS));
|
||||
}
|
||||
|
@ -110,23 +89,11 @@ public class HttpResponseAbortTest extends AbstractHttpClientServerTest
|
|||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
.onResponseHeaders(new Response.HeadersListener()
|
||||
.onResponseHeaders(response -> response.abort(new Exception()))
|
||||
.send(result ->
|
||||
{
|
||||
@Override
|
||||
public void onHeaders(Response response)
|
||||
{
|
||||
response.abort(new Exception());
|
||||
}
|
||||
})
|
||||
.send(new Response.CompleteListener()
|
||||
{
|
||||
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
Assert.assertTrue(result.isFailed());
|
||||
latch.countDown();
|
||||
}
|
||||
Assert.assertTrue(result.isFailed());
|
||||
latch.countDown();
|
||||
});
|
||||
Assert.assertTrue(latch.await(5, TimeUnit.SECONDS));
|
||||
}
|
||||
|
@ -158,22 +125,11 @@ public class HttpResponseAbortTest extends AbstractHttpClientServerTest
|
|||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
.onResponseContent(new Response.ContentListener()
|
||||
.onResponseContent((response, content) -> response.abort(new Exception()))
|
||||
.send(result ->
|
||||
{
|
||||
@Override
|
||||
public void onContent(Response response, ByteBuffer content)
|
||||
{
|
||||
response.abort(new Exception());
|
||||
}
|
||||
})
|
||||
.send(new Response.CompleteListener()
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
Assert.assertTrue(result.isFailed());
|
||||
latch.countDown();
|
||||
}
|
||||
Assert.assertTrue(result.isFailed());
|
||||
latch.countDown();
|
||||
});
|
||||
Assert.assertTrue(latch.await(5, TimeUnit.SECONDS));
|
||||
}
|
||||
|
@ -202,53 +158,31 @@ public class HttpResponseAbortTest extends AbstractHttpClientServerTest
|
|||
}
|
||||
});
|
||||
|
||||
final CountDownLatch abortLatch = new CountDownLatch(1);
|
||||
final DeferredContentProvider contentProvider = new DeferredContentProvider(ByteBuffer.allocate(1));
|
||||
final AtomicInteger completes = new AtomicInteger();
|
||||
final CountDownLatch completeLatch = new CountDownLatch(1);
|
||||
client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
.onRequestSuccess(new org.eclipse.jetty.client.api.Request.SuccessListener()
|
||||
.content(contentProvider)
|
||||
.onResponseContent((response, content) ->
|
||||
{
|
||||
@Override
|
||||
public void onSuccess(org.eclipse.jetty.client.api.Request request)
|
||||
try
|
||||
{
|
||||
try
|
||||
{
|
||||
abortLatch.await(5, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (InterruptedException x)
|
||||
{
|
||||
x.printStackTrace();
|
||||
}
|
||||
response.abort(new Exception());
|
||||
contentProvider.close();
|
||||
// Delay to let the request side to finish its processing.
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
catch (InterruptedException x)
|
||||
{
|
||||
x.printStackTrace();
|
||||
}
|
||||
})
|
||||
.onResponseContent(new Response.ContentListener()
|
||||
.send(result ->
|
||||
{
|
||||
@Override
|
||||
public void onContent(Response response, ByteBuffer content)
|
||||
{
|
||||
try
|
||||
{
|
||||
response.abort(new Exception());
|
||||
abortLatch.countDown();
|
||||
// Delay to let the request side to finish its processing.
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
catch (InterruptedException x)
|
||||
{
|
||||
x.printStackTrace();
|
||||
}
|
||||
}
|
||||
})
|
||||
.send(new Response.CompleteListener()
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
completes.incrementAndGet();
|
||||
Assert.assertTrue(result.isFailed());
|
||||
completeLatch.countDown();
|
||||
}
|
||||
completes.incrementAndGet();
|
||||
Assert.assertTrue(result.isFailed());
|
||||
completeLatch.countDown();
|
||||
});
|
||||
Assert.assertTrue(completeLatch.await(5, TimeUnit.SECONDS));
|
||||
|
||||
|
|
|
@ -25,6 +25,8 @@ Jetty also offers more niche session managers that leverage backends such as Mon
|
|||
|
||||
include::session-hierarchy.adoc[]
|
||||
include::sessions-details.adoc[]
|
||||
include::session-configuration-housekeeper.adoc[]
|
||||
include::session-configuration-sessioncache.adoc[]
|
||||
include::session-configuration-memory.adoc[]
|
||||
include::session-configuration-file-system.adoc[]
|
||||
include::session-configuration-jdbc.adoc[]
|
||||
|
@ -32,6 +34,8 @@ include::session-configuration-mongodb.adoc[]
|
|||
include::session-configuration-infinispan.adoc[]
|
||||
include::session-configuration-hazelcast.adoc[]
|
||||
include::session-configuration-gcloud.adoc[]
|
||||
include::session-configuration-memcachedsessiondatastore.adoc[]
|
||||
include::sessions-usecases.adoc[]
|
||||
//include::setting-session-characteristics.adoc[]
|
||||
//include::using-persistent-sessions.adoc[]
|
||||
//include::session-clustering-jdbc.adoc[]
|
||||
|
|
|
@ -16,7 +16,9 @@
|
|||
|
||||
[[configuring-sessions-file-system]]
|
||||
|
||||
=== Non-Clustered Session Management: File System
|
||||
=== Persistent Sessions: File System
|
||||
|
||||
Note: Persisting sessions to the local file system should *not* be used in a clustered environment.
|
||||
|
||||
==== Enabling File System Sessions
|
||||
|
||||
|
|
|
@ -15,7 +15,8 @@
|
|||
// ========================================================================
|
||||
|
||||
[[configuring-sessions-gcloud]]
|
||||
=== Clustered Session Management: Google Cloud DataStore
|
||||
|
||||
=== Persistent Sessions: Google Cloud DataStore
|
||||
|
||||
==== Preparation
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
[[configuring-sessions-hazelcast]]
|
||||
|
||||
=== Clustered Session Management: Hazelcast
|
||||
=== Persistent Sessions: Hazelcast
|
||||
|
||||
==== Enabling Hazelcast Sessions
|
||||
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
// ========================================================================
|
||||
// Copyright (c) 1995-2017 Mort Bay Consulting Pty. Ltd.
|
||||
// ========================================================================
|
||||
// All rights reserved. This program and the accompanying materials
|
||||
// are made available under the terms of the Eclipse Public License v1.0
|
||||
// and Apache License v2.0 which accompanies this distribution.
|
||||
//
|
||||
// The Eclipse Public License is available at
|
||||
// http://www.eclipse.org/legal/epl-v10.html
|
||||
//
|
||||
// The Apache License v2.0 is available at
|
||||
// http://www.opensource.org/licenses/apache2.0.php
|
||||
//
|
||||
// You may elect to redistribute this code under either of these licenses.
|
||||
// ========================================================================
|
||||
|
||||
[[session-configuration-housekeeper]]
|
||||
=== The SessionIdManager and the Housekeeper
|
||||
|
||||
==== Default Settings
|
||||
By default, Jetty will instantiate a single instance of the `DefaultSessionIdManager` and `HouseKeeper` at startup with default settings.
|
||||
|
||||
The default settings are:
|
||||
|
||||
DefaultSessionIdManager: worker name::
|
||||
This uniquely identifies the jetty server instance within a cluster.
|
||||
It is set from the value of the `JETTY_WORKER_INSTANCE` environment variable, or `node0` if the environment value is not set.
|
||||
If you have more than one Jetty instance, it is *crucial* that you explicitly configure the worker name on each Jetty instance (see link:#session-idmanager-housekeeper-config[below] for how to configure).
|
||||
|
||||
HouseKeeper: scavenge interval::
|
||||
This is the period in seconds between runs of the session scavenger, and by default is set to the equivalent of 10 minutes.
|
||||
As a rule of thumb, you should ensure that the scavenge interval is shorter than the `maxInactiveInterval` of your sessions to ensure that they are promptly scavenged.
|
||||
See below for instructions on how to configure this.
|
||||
|
||||
[[session-idmanager-housekeeper-config]]
|
||||
==== Configuration
|
||||
To change the default values, use the link:#startup-modules[module system] to link:#startup-modules[enable] the `sessions` module.
|
||||
|
||||
This will enable the `$jetty.home/etc/sessions/id-manager.xml` file and generate a `$jetty.base/start.d/sessions.ini` file.
|
||||
|
||||
The `id-manager.xml` file instantiates a single `DefaultSessionIdManager` and `HouseKeeper` and configures them using the properties from the `sessions.ini` file.
|
||||
|
||||
Edit the ini file to change the properties to easily customize the `DefaultSessionIdManager` and `HouseKeeper`:
|
||||
|
||||
jetty.sessionIdManager.workerName::
|
||||
By default it is `node1`.
|
||||
This uniquely identifies the Jetty server instance within a cluster.
|
||||
If you have more than one Jetty instance, it is crucial that you configure the worker name differently on each jetty instance.
|
||||
|
||||
|
||||
jetty.sessionScavengeInterval.seconds::
|
||||
This is the period in seconds between runs of the session scavenger.
|
||||
By default it will run every 600 secs (ie 10 mins).
|
||||
As a rule of thumb, you should ensure that the scavenge interval is shorter than the maxInactiveInterval of your sessions to ensure that they are promptly scavenged.
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
[[configuring-sessions-infinispan]]
|
||||
|
||||
=== Clustered Session Management: Inifinspan
|
||||
=== Persistent Sessions: Inifinspan
|
||||
|
||||
==== Enabling Infinispan Sessions
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
[[configuring-sessions-jdbc]]
|
||||
|
||||
=== Clustered Session Management: JDBC
|
||||
=== Persistent Sessions: JDBC
|
||||
|
||||
==== Enabling JDBC Sessions
|
||||
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
// ========================================================================
|
||||
// Copyright (c) 1995-2017 Mort Bay Consulting Pty. Ltd.
|
||||
// ========================================================================
|
||||
// All rights reserved. This program and the accompanying materials
|
||||
// are made available under the terms of the Eclipse Public License v1.0
|
||||
// and Apache License v2.0 which accompanies this distribution.
|
||||
//
|
||||
// The Eclipse Public License is available at
|
||||
// http://www.eclipse.org/legal/epl-v10.html
|
||||
//
|
||||
// The Apache License v2.0 is available at
|
||||
// http://www.opensource.org/licenses/apache2.0.php
|
||||
//
|
||||
// You may elect to redistribute this code under either of these licenses.
|
||||
// ========================================================================
|
||||
|
||||
[[session-configuration-memcachedsessiondatastore]]
|
||||
|
||||
=== Persistent Sessions: The L2 Session Data Cache
|
||||
|
||||
If your chosen persistence technology is slow, it can be helpful to locally cache the session data.
|
||||
The `CachingSessionDataStore` is a special type of `SessionDataStore` that locally caches session data, which makes reads faster. It writes-through to your chosen type of `SessionDataStore` when session data changes.
|
||||
|
||||
==== MemcachedSessionDataMap
|
||||
|
||||
The `MemcachedSessionDataMap` uses `memcached` to perform caching.
|
||||
|
||||
To enable it with the Jetty distribution, enable the `session-store-cache` link:#startup-modules[module], along with your chosen `session-store-xxxx` module, and optionally the `session-cache-hash` or `session-cache-null` modules.
|
||||
|
||||
After enabling, the `$jetty.base/start.d/session-store-cache.ini` file will be generated:
|
||||
|
||||
[source, screen, subs="{sub-order}"]
|
||||
----
|
||||
--module=session-store-cache
|
||||
|
||||
|
||||
## Session Data Cache type: xmemcached
|
||||
session-data-cache=xmemcached
|
||||
#jetty.session.memcached.host=localhost
|
||||
#jetty.session.memcached.port=11211
|
||||
#jetty.session.memcached.expirySec=
|
||||
#jetty.session.memcached.heartbeats=true
|
||||
----
|
||||
|
||||
|
||||
The configuration properties are:
|
||||
|
||||
jetty.session.memcached.host::
|
||||
Default value is `localhost`.
|
||||
This is the host on which the memcached server resides.
|
||||
|
||||
jetty.session.memcached.port::
|
||||
Default value is `11211`.
|
||||
This is the port on which the memcached server is listening.
|
||||
|
||||
jetty.session.memcached.expirySec::
|
||||
Default value `0`.
|
||||
This is the length of time in seconds that an item can remain in the memcached cache, where 0 indicates indefinitely.
|
||||
|
||||
jetty.session.memcached.heartbeats::
|
||||
Default value `true`.
|
||||
Whether or not the memcached system should generate heartbeats.
|
|
@ -16,21 +16,16 @@
|
|||
|
||||
[[configuring-sessions-memory]]
|
||||
|
||||
=== Non-Clustered Session Management: Memory
|
||||
=== Non-Persistent Sessions
|
||||
|
||||
Non-clustered, in-memory-only is the default style of Session Management.
|
||||
In previous versions of Jetty this was referred to as "hash" sessions, as they were stored in a `HashMap` in memory.
|
||||
When using the Jetty distribution, if you do not configure any session module, this will be enabled by default.
|
||||
Non-clustered, non-persistent, in-memory-only is the default style of session management.
|
||||
In previous versions of Jetty this was referred to as "hash" sessions, as they were stored in a `HashMap` in memory.
|
||||
|
||||
Specifically, Jetty will hook up:
|
||||
This is delivered by a combination of the `DefaultSessionCache` (to keep sessions in memory) and a `NullSessionDataStore` (to avoid session persistence).
|
||||
|
||||
A `DefaultSessionIdManager`::
|
||||
* Produces unique session ids and supports cross-context dispatch re-use of session ids
|
||||
A `HouseKeeper`::
|
||||
* Scavenges for expired sessions every 10 mins
|
||||
A `DefaultSessionCache` per context::
|
||||
* Keeps session objects in memory
|
||||
A `NullSessionDataStore` per context::
|
||||
* No persistence of sessions
|
||||
If you do nothing, Jetty will instantiate one of each of these objects for each context at startup time using hard-coded defaults.
|
||||
|
||||
If you wish to change any of the default configuration, enable the `session-cache-hash` module.
|
||||
To explicitly set up non-persisted sessions using modules, use both the `session-cache-hash` and the `session-store-null` modules.
|
||||
|
||||
Enabling the modules allows you to configure behavior - see link:#session-configuration-sessioncache[the L1 Session Cache] for detailed information on configuration options for the `DefaultSessionCache`.
|
||||
The `NullSessionDataStore` has no customizable options.
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
[[configuring-sessions-mongo]]
|
||||
|
||||
=== Clustered Session Management: MongoDB
|
||||
=== Persistent Sessions: MongoDB
|
||||
|
||||
==== Enabling MongoDB Sessions
|
||||
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
// ========================================================================
|
||||
// Copyright (c) 1995-2017 Mort Bay Consulting Pty. Ltd.
|
||||
// ========================================================================
|
||||
// All rights reserved. This program and the accompanying materials
|
||||
// are made available under the terms of the Eclipse Public License v1.0
|
||||
// and Apache License v2.0 which accompanies this distribution.
|
||||
//
|
||||
// The Eclipse Public License is available at
|
||||
// http://www.eclipse.org/legal/epl-v10.html
|
||||
//
|
||||
// The Apache License v2.0 is available at
|
||||
// http://www.opensource.org/licenses/apache2.0.php
|
||||
//
|
||||
// You may elect to redistribute this code under either of these licenses.
|
||||
// ========================================================================
|
||||
|
||||
[[session-configuration-sessioncache]]
|
||||
=== The L1 Session Cache
|
||||
|
||||
==== The DefaultSessionCache
|
||||
|
||||
In the absence of any explicit configuration, Jetty will instantiate an instance of the `DefaultSessionCache` per context.
|
||||
If you wish to change any of the default values, you need to enable the `session-cache-hash` link:#startup-modules[module].
|
||||
|
||||
Once the `session-cache-hash` module has been enabled, you can view a list of all the configurable values by opening `start.d/session-cache-hash.ini`:
|
||||
|
||||
[source, screen, subs="{sub-order}"]
|
||||
----
|
||||
--module=session-cache-hash
|
||||
|
||||
#jetty.session.evictionPolicy=-1
|
||||
#jetty.session.saveOnInactiveEvict=false
|
||||
#jetty.session.saveOnCreate=false
|
||||
#jetty.session.removeUnloadableSessions=false
|
||||
----
|
||||
|
||||
jetty.session.evictionPolicy::
|
||||
Integer.
|
||||
Controls whether session objects that are held in memory are subject to eviction from the memory cache.
|
||||
Evicting sessions can reduce the memory footprint of the cache.
|
||||
Eviction is usually used in conjunction with a `SessionDataStore` that persists sessions.
|
||||
Values are:
|
||||
* -1 : sessions are never evicted from the cache
|
||||
* 0 : sessions are evicted from the cache as soon as the last active request for it finishes
|
||||
* >= 1 : any positive number is the time in seconds after which a session that is in the cache but has not experienced any activity will be evicted
|
||||
|
||||
____
|
||||
[NOTE]
|
||||
If you are not using a `SessionDataStore` that persists sessions, be aware that evicted sessions will be lost.
|
||||
____
|
||||
|
||||
jetty.session.saveOnInactiveEvict::
|
||||
Boolean, default `false`.
|
||||
Controls whether a session will be saved to the `SessionDataStore` just prior to its eviction.
|
||||
|
||||
jetty.session.saveOnCreate::
|
||||
Boolean, default `false`.
|
||||
Controls whether a session that is newly created will be immediately saved to the `SessionDataStore` or lazily saved as the last request for the session exits.
|
||||
|
||||
jetty.session.removeUnloadableSessions::
|
||||
Boolean, default `false`.
|
||||
Controls whether a session that cannot be restored - for example because it is corrupted - from the `SessionDataStore` is deleted by the `SessionDataStore`.
|
||||
|
||||
For more general information on the uses of these configuration properties, see link:#sessions-details[Session Components].
|
||||
|
||||
|
||||
==== The NullSessionCache
|
||||
|
||||
The `NullSessionCache` is a trivial implementation of the `SessionCache` that does not cache any session information.
|
||||
You may need to use it if your clustering setup does not have a sticky load balancer, or if you want absolutely minimal support for sessions.
|
||||
If you use this in conjunction with the `NullSessionDataStore`, then sessions will neither be retained in memory nor persisted.
|
||||
|
||||
To enable the `NullSessionCache`, enable the `sesssion-cache-null` link:#startup-modules[module].
|
|
@ -49,8 +49,11 @@ image::images/SessionsHierarchy.png[]
|
|||
|
||||
==== Configuring Sessions in the Jetty Distribution
|
||||
|
||||
Jetty provides support for several different Session Management technologies.
|
||||
Both link:#configuring-sessions-file-system[local file storage] and in-memory session management can be implemented for standard implementations.
|
||||
For implementations using clustered technologies, link:#configuring-sessions-jdbc[JDBC], link:#configuring-sessions-mongo[MongoDB], link:#configuring-sessions-infinispan[Inifinispan] and link:#configuring-sessions-gcloud[Google Cloud Datastore] are all supported.
|
||||
Setting up these technologies is as easy as enabling it's link:#startup-modules[module] and editing it's associated ini file with any usernames, passwords or changes you need to make for your instance.
|
||||
The following sections will cover how exactly to enable the required modules as well as an overview of what options are available for customization.
|
||||
Configuring session management involves selecting a link:#startup-modules[module] for the desired type of link:#session-configuration-sessioncache[session caching] behavior, and a module for the type of session persistence.
|
||||
|
||||
Jetty provides two different session caches: the `DefaultSessionCache` which holds sessions in memory, and the `NullSessionCache` which does not.
|
||||
There is more information on both of these types of session caching and the circumstances which would lead you to select one or the other in the link:#sessions-details[Session Components] section, and more information on the configuration options of each in link:#session-configuration-sessioncache[the L1 Session Cache] section.
|
||||
|
||||
For session persistence, Jetty provides a number of different implementations from which to choose including link:#configuring-sessions-memory[non-persistence], link:#configuring-sessions-file-system[local file storage], clustered technologies such as link:#configuring-sessions-jdbc[JDBC], link:#configuring-sessions-mongo[MongoDB], link:#configuring-sessions-infinispan[Inifinispan], link:#configuring-sessions-gcloud[Google Cloud Datastore], and link:#configuring-sessions-hazelcast[Hazelcast].
|
||||
|
||||
Depending on your persistence technology, to enhance performance, you may want to use an L2 cache for session data, in which case Jetty provides the link:#session-configuration-memcachedsessiondatastore[memcached L2 session data cache].
|
||||
|
|
|
@ -15,40 +15,43 @@
|
|||
// ========================================================================
|
||||
|
||||
[[sessions-details]]
|
||||
=== Session Configuration and Use Cases
|
||||
=== Session Components
|
||||
|
||||
==== Configuration
|
||||
==== SessionIdManager
|
||||
|
||||
===== SessionIdManager
|
||||
|
||||
There is a maximum of 1 `SessionIdManager` per jetty Server instance.
|
||||
There is a maximum of one (1) `SessionIdManager` per Jetty Server instance.
|
||||
Its purpose is to generate fresh, unique session ids and to coordinate the re-use of session ids amongst co-operating contexts.
|
||||
|
||||
Unlike in previous versions of Jetty, the `SessionIdManager` is agnostic with respect to the type of clustering technology chosen.
|
||||
|
||||
Jetty provides a default implementation - the `DefaultSessionIdManager` - which should meet the needs of most users.
|
||||
If you do not explicitly enable one of the session modules, or otherwise configure a `SessionIdManager`, the `DefaultSessionIdManager` will be used.
|
||||
If you do not explicitly enable one of the session modules or otherwise configure a `SessionIdManager`, the `DefaultSessionIdManager` will be used.
|
||||
|
||||
If the `DefaultSessionIdManager` does not meet your needs, you can extend the `org.eclipse.jetty.server.session.AbstractSessionIdManager` or do a fresh implementation of the `org.eclipse.jetty.server.session.SessionIdManager` interface.
|
||||
|
||||
===== HouseKeeper
|
||||
See link:#session-configuration-housekeeper[Configuring the SessionIdManager and HouseKeeper] for details on configuration.
|
||||
|
||||
There is a maximum of 1 `HouseKeeper` per `SessionIdManager`.
|
||||
==== HouseKeeper
|
||||
|
||||
There is a maximum of one (1) `HouseKeeper` per `SessionIdManager`.
|
||||
Its purpose is to periodically poll the `SessionHandlers` to clean out expired sessions.
|
||||
|
||||
By default the `HouseKeeper` will poll the `SessionHandlers` every 10 mins to find and delete expired sessions, although this interval is configurable.
|
||||
|
||||
See link:#session-configuration-housekeeper[Configuring the SessionIdManager and HouseKeeper] for details on configuration.
|
||||
|
||||
===== SessionCache
|
||||
|
||||
There is 1 `SessionCache` per context.
|
||||
==== SessionCache
|
||||
|
||||
There is one (1) `SessionCache` *per context.*
|
||||
Its purpose is to provide an L1 cache of Session objects.
|
||||
Having a working set of Session objects in memory allows multiple simultaneous requests for the same session to share the same Session object.
|
||||
|
||||
Jetty provides 2 `SessionCache` implementations: the `DefaultSessionCache` and the `NullSessionCache`.
|
||||
The `DefaultSessionCache` retains Session objects in memory in a cache and has a number of configuration options to control cache behavior.
|
||||
Jetty provides two (2) `SessionCache` implementations: the `DefaultSessionCache` and the `NullSessionCache`.
|
||||
The `DefaultSessionCache` retains Session objects in memory in a cache and has a number of link:#session-configuration-sessioncache[configuration options] to control cache behavior.
|
||||
It is the default that is used if no other `SessionCache` has been configured.
|
||||
It is suitable for non-clustered and clustered deployments with a sticky load balancer, as well as clustered deployments with a non-sticky load balancer, with some caveats.
|
||||
|
||||
The `NullSessionCache` does not actually cache any objects: each request uses a fresh Session object.
|
||||
It is suitable for clustered deployments without a sticky load balancer and non-clustered deployments when purely minimal support for sessions is needed.
|
||||
|
||||
|
@ -58,16 +61,19 @@ They can also be configured to do an immediate, eager write of a freshly created
|
|||
This can be useful if you are likely to experience multiple, near simultaneous requests referencing the same session, e.g. with HTTP/2 and you don't have a sticky load balancer.
|
||||
Alternatively, if the eager write is not done, application paths which create and then invalidate a session within a single request never incur the cost of writing to persistent storage.
|
||||
|
||||
Additionally, if the `EVICT_ON_INACTIVITY` eviction policy is in use, you can configure the `DefaultSessionCache` to force a write of the Session to the SessionDataStore just before the Session is evicted.
|
||||
Additionally, if the `EVICT_ON_INACTIVITY` eviction policy is in use, you can link:#session-configuration-sessioncache[configure] the `DefaultSessionCache` to force a write of the Session to the `SessionDataStore` just before the Session is evicted.
|
||||
|
||||
===== SessionDataStore
|
||||
See link:#session-configuration-sessioncache[the L1 Session Cache] for more information.
|
||||
|
||||
There is 1 `SessionDataStore` per context. Its purpose is to handle all persistence related operations on sessions.
|
||||
==== SessionDataStore
|
||||
|
||||
There is one (1) `SessionDataStore` per context.
|
||||
Its purpose is to handle all persistence related operations on sessions.
|
||||
|
||||
The common characteristics for all `SessionDataStores` are whether or not they support passivation, and the length of the grace period.
|
||||
|
||||
Supporting passivation means that session data is serialized.
|
||||
Some persistence mechanisms serialize, such as JDBC, GCloud Datastore etc, whereas others may store an object in shared memory eg Infinispan when configured with a local cache.
|
||||
Some persistence mechanisms serialize, such as JDBC, GCloud Datastore etc, whereas others may store an object in shared memory, e.g. Infinispan, when configured with a local cache.
|
||||
|
||||
Whether or not a clustering technology entails passivation controls whether or not the session passivation/activation listeners will be called.
|
||||
|
||||
|
@ -79,56 +85,17 @@ When `SessionDataStores` search their persistent store to find sessions that hav
|
|||
* The second finds sessions in the store that have expired which were last live on the current node
|
||||
* The third finds sessions that expired a "while" ago, irrespective of on which node they were last used: the definition of "a while" is based on the grace period.
|
||||
|
||||
Jetty instantiates the trivial `NullSessionDataStore` - which does not persist sessions - as the default.
|
||||
|
||||
===== CachingSessionDataStore
|
||||
The distribution provides a number of alternative `SessionDataStore` implementations such as link:#configuring-sessions-file-system[FileSessionDataStore], link:#configuring-sessions-gcloud[GCloudSessionDataStore], link:#configuring-sessions-jdbc[JDBCSessionDataStore], link:#configuring-sessions-mongodb[MongoSessionDataStore], link:#configuring-sessions-infinispan[InfinispanSessionDataStore], link:#configuring-sessions-hazelcast[HazelcastSessionDataStore].
|
||||
|
||||
The `CachingSessionDataStore` is a special type of `SessionDataStore` that inserts an L2 cache of SessionData - the `SessionDataMap` - in front of a delegate `SessionDataStore`.
|
||||
|
||||
==== CachingSessionDataStore
|
||||
|
||||
The `CachingSessionDataStore` is a special type of `SessionDataStore` that inserts an L2 cache of Session data - the `SessionDataMap` - in front of a delegate `SessionDataStore`.
|
||||
The `SessionDataMap` is preferentially consulted before the actual SessionDataStore on reads.
|
||||
This can improve the performance of slow stores.
|
||||
|
||||
Jetty provides one implementation of the this L2 cache based on `Memcached`, the `MemcachedSessionDataMap`.
|
||||
|
||||
|
||||
==== Use Cases
|
||||
|
||||
===== Clustering with a Sticky Load Balancer
|
||||
|
||||
Preferably, your cluster will utilize a sticky load balancer.
|
||||
This will route requests for the same session to the same Jetty instance.
|
||||
In this case, the `DefaultSessionCache` can be used to keep in-use Session objects in memory.
|
||||
You can fine-tune the cache by controlling how long Session objects remain in memory with the eviction policy settings.
|
||||
|
||||
If you have a large number of Sessions or very large Session objects, then you might want to manage your memory allocation by controlling the amount of time Session objects spend in the cache.
|
||||
The `EVICT_ON_SESSION_EXIT` eviction policy will remove a Session object from the cache as soon as the last simultaneous request referencing it exits.
|
||||
Alternatively, the `EVICT_ON_INACTIVITY` policy will remove a Session object from the cache after a configurable amount of time has passed without a request referencing it.
|
||||
|
||||
If your Sessions are very long lived and infrequently referenced, you might use the `EVICT_ON_INACTIVITY_POLICY` to control the size of the cache.
|
||||
|
||||
If your Sessions are small, or relatively few or stable in number or they are read-mostly, then you might select the `NEVER_EVICT` policy.
|
||||
With this policy, Session objects will remain in the cache until they either expire or are explicitly invalidated.
|
||||
|
||||
If you have a high likelihood of simultaneous requests for the same session object, then the `EVICT_ON_SESSION_EXIT` policy will ensure the Session object stays in the cache as long as it is needed.
|
||||
|
||||
|
||||
===== Clustering without a Sticky Load Balancer
|
||||
|
||||
Without a sticky load balancer requests for the same session may arrive on any node in the cluster.
|
||||
This means it is likely that the copy of the Session object in any `SessionCache` is likely to be out-of-date, as the Session was probably last accessed on a different node.
|
||||
In this case, your `choices` are to use either the `NullSessionCache` or to de-tuned the `DefaultSessionCache`.
|
||||
If you use the NullSessionCache all Session object caching is avoided.
|
||||
This means that every time a request references a session it must be brought in from persistent storage.
|
||||
It also means that there can be no sharing of Session objects for multiple requests for the same session: each will have their own Session object.
|
||||
Furthermore, the outcome of session writes are indeterminate because the Servlet Specification does not mandate ACID transactions for sessions.
|
||||
|
||||
If you use the `DefaultSessionCache`, there is a risk that the caches on some nodes will contain out-of-date session information as simultaneous requests for the same session are scattered over the cluster.
|
||||
To mitigate this somewhat you can use the `EVICT_ON_SESSION_EXIT` eviction policy: this will ensure that the Session is removed from the cache as soon as the last simultaneous request for it exits.
|
||||
Again, due to the lack of session transactionality, the ordering outcome of write operations cannot be guaranteed.
|
||||
As the Session is cached while at least one request is accessing it, it is possible for multiple simultaneous requests to share the same Session object.
|
||||
|
||||
|
||||
===== Handling corrupted or unloadable session data
|
||||
|
||||
For various reasons it might not be possible for the SessionDataStore to re-read a stored session.
|
||||
One scenario is that the session stores a serialized object in it's attributes, and after a redeployment there in an incompatible class change.
|
||||
Using the setter `SessionCache.setRemoveUnloadableSessions(true)` will allow the `SessionDataStore` to delete the unreadable session from persistent storage.
|
||||
This can be useful from preventing the scavenger from continually generating errors on the same expired, but un-restorable, session.
|
||||
See link:#session-configuration-memcachedsessiondatastore[the L2 SessionData Cache]for additional information.
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
// ========================================================================
|
||||
// Copyright (c) 1995-2017 Mort Bay Consulting Pty. Ltd.
|
||||
// ========================================================================
|
||||
// All rights reserved. This program and the accompanying materials
|
||||
// are made available under the terms of the Eclipse Public License v1.0
|
||||
// and Apache License v2.0 which accompanies this distribution.
|
||||
//
|
||||
// The Eclipse Public License is available at
|
||||
// http://www.eclipse.org/legal/epl-v10.html
|
||||
//
|
||||
// The Apache License v2.0 is available at
|
||||
// http://www.opensource.org/licenses/apache2.0.php
|
||||
//
|
||||
// You may elect to redistribute this code under either of these licenses.
|
||||
// ========================================================================
|
||||
|
||||
[[sessions-usecases]]
|
||||
=== Use Cases
|
||||
|
||||
===== Clustering with a Sticky Load Balancer
|
||||
|
||||
Preferably, your cluster will utilize a sticky load balancer.
|
||||
This will route requests for the same Session to the same Jetty instance.
|
||||
In this case, the `DefaultSessionCache` can be used to keep in-use Session objects in memory.
|
||||
You can fine-tune the cache by controlling how long Session objects remain in memory with the eviction policy settings.
|
||||
|
||||
If you have a large number of Sessions or very large Session objects, then you may want to manage your memory allocation by controlling the amount of time Session objects spend in the cache.
|
||||
The `EVICT_ON_SESSION_EXIT` eviction policy will remove a Session object from the cache as soon as the last simultaneous request referencing it exits.
|
||||
Alternatively, the `EVICT_ON_INACTIVITY` policy will remove a Session object from the cache after a configurable amount of time has passed without a request referencing it.
|
||||
|
||||
If your Sessions are very long lived and infrequently referenced, you might use the `EVICT_ON_INACTIVITY_POLICY` to control the size of the cache.
|
||||
|
||||
If your Sessions are small, or relatively few or stable in number or they are read-mostly, then you might select the `NEVER_EVICT` policy.
|
||||
With this policy, Session objects will remain in the cache until they either expire or are explicitly invalidated.
|
||||
|
||||
If you have a high likelihood of simultaneous requests for the same session object, then the `EVICT_ON_SESSION_EXIT` policy will ensure the Session object stays in the cache as long as it is needed.
|
||||
|
||||
|
||||
===== Clustering Without a Sticky Load Balancer
|
||||
|
||||
Without a sticky load balancer requests for the same session may arrive on any node in the cluster.
|
||||
This means it is likely that the copy of the Session object in any `SessionCache` is likely to be out-of-date, as the Session was probably last accessed on a different node.
|
||||
In this case, your `choices` are to use either the `NullSessionCache` or to de-tune the `DefaultSessionCache`.
|
||||
If you use the NullSessionCache all Session object caching is avoided.
|
||||
This means that every time a request references a session it must be brought in from persistent storage.
|
||||
It also means that there can be no sharing of Session objects for multiple requests for the same session: each will have their own Session object.
|
||||
Furthermore, the outcome of session writes are indeterminate because the Servlet Specification does not mandate ACID transactions for sessions.
|
||||
|
||||
If you use the `DefaultSessionCache`, there is a risk that the caches on some nodes will contain out-of-date Session information as simultaneous requests for the same session are scattered over the cluster.
|
||||
To mitigate this somewhat you can use the `EVICT_ON_SESSION_EXIT` eviction policy: this will ensure that the Session is removed from the cache as soon as the last simultaneous request for it exits.
|
||||
Again, due to the lack of Session transactionality, the ordering outcome of write operations cannot be guaranteed.
|
||||
As the Session is cached while at least one request is accessing it, it is possible for multiple simultaneous requests to share the same Session object.
|
||||
|
||||
|
||||
===== Handling corrupted or unloadable session data
|
||||
|
||||
For various reasons it might not be possible for the `SessionDataStore` to re-read a stored session.
|
||||
One scenario is that the session stores a serialized object in it's attributes, and after a redeployment there in an incompatible class change.
|
||||
Using the setter `SessionCache.setRemoveUnloadableSessions(true)` will allow the `SessionDataStore` to delete the unreadable session from persistent storage.
|
||||
This can be useful from preventing the scavenger from continually generating errors on the same expired, but un-restorable, session.
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
[source, screen, subs="{sub-order}"]
|
||||
....
|
||||
[mybase]$ java -jar /opt/jetty-distribution/start.jar --list-config
|
||||
[mybase]$ java -jar $JETTY_HOME/start.jar --list-config
|
||||
|
||||
Java Environment:
|
||||
-----------------
|
||||
|
@ -28,7 +28,7 @@ Java Environment:
|
|||
java.runtime.name = Java(TM) SE Runtime Environment
|
||||
java.runtime.version = 1.8.0_92-b14
|
||||
java.io.tmpdir = /var/folders/h6/yb_lbnnn11g0y1jjlvqg631h0000gn/T/
|
||||
user.dir = /Users/staff/installs/repository/jetty-distribution-9.4.0/mybase
|
||||
user.dir = /Users/staff/installs/repository/jetty-distribution-{VERSION}/mybase
|
||||
user.language = en
|
||||
user.country = US
|
||||
|
||||
|
@ -36,14 +36,14 @@ Jetty Environment:
|
|||
-----------------
|
||||
jetty.version = {VERSION}
|
||||
jetty.tag.version = master
|
||||
jetty.home = /Users/staff/installs/repository/jetty-distribution-9.4.0
|
||||
jetty.base = /Users/staff/installs/repository/jetty-distribution-9.4.0/mybase
|
||||
jetty.home = /Users/staff/installs/repository/jetty-distribution-{VERSION}
|
||||
jetty.base = /Users/staff/installs/repository/jetty-distribution-{VERSION}/mybase
|
||||
|
||||
Config Search Order:
|
||||
--------------------
|
||||
<command-line>
|
||||
${jetty.base} -> /Users/staff/installs/repository/jetty-distribution-9.4.0/mybase
|
||||
${jetty.home} -> /Users/staff/installs/repository/jetty-distribution-9.4.0
|
||||
${jetty.base} -> /Users/staff/installs/repository/jetty-distribution-{VERSION}/mybase
|
||||
${jetty.home} -> /Users/staff/installs/repository/jetty-distribution-{VERSION}
|
||||
|
||||
|
||||
JVM Arguments:
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
[source, screen, subs="{sub-order}"]
|
||||
....
|
||||
[mybase]$ java -jar ../start.jar --list-config
|
||||
[mybase]$ java -jar $JETTY_HOME/start.jar --list-config
|
||||
|
||||
Java Environment:
|
||||
-----------------
|
||||
|
@ -28,7 +28,7 @@ Java Environment:
|
|||
java.runtime.name = Java(TM) SE Runtime Environment
|
||||
java.runtime.version = 1.8.0_92-b14
|
||||
java.io.tmpdir = /var/folders/h6/yb_lbnnn11g0y1jjlvqg631h0000gn/T/
|
||||
user.dir = /Users/staff/installs/repository/jetty-distribution-9.4.0/mybase
|
||||
user.dir = /Users/staff/installs/repository/jetty-distribution-{VERSION}/mybase
|
||||
user.language = en
|
||||
user.country = US
|
||||
|
||||
|
@ -36,14 +36,14 @@ Jetty Environment:
|
|||
-----------------
|
||||
jetty.version = {VERSION}
|
||||
jetty.tag.version = master
|
||||
jetty.home = /Users/staff/installs/repository/jetty-distribution-9.4.0
|
||||
jetty.base = /Users/staff/installs/repository/jetty-distribution-9.4.0/mybase
|
||||
jetty.home = /Users/staff/installs/repository/jetty-distribution-{VERSION}
|
||||
jetty.base = /Users/staff/installs/repository/jetty-distribution-{VERSION}/mybase
|
||||
|
||||
Config Search Order:
|
||||
--------------------
|
||||
<command-line>
|
||||
${jetty.base} -> /Users/staff/installs/repository/jetty-distribution-9.4.0/mybase
|
||||
${jetty.home} -> /Users/staff/installs/repository/jetty-distribution-9.4.0
|
||||
${jetty.base} -> /Users/staff/installs/repository/jetty-distribution-{VERSION}/mybase
|
||||
${jetty.home} -> /Users/staff/installs/repository/jetty-distribution-{VERSION}
|
||||
|
||||
|
||||
JVM Arguments:
|
||||
|
@ -81,7 +81,8 @@ Note: order presented here is how they would appear on the classpath.
|
|||
|
||||
Jetty Active XMLs:
|
||||
------------------
|
||||
${jetty.home}/etc/jetty.xml
|
||||
${jetty.home}/etc/jetty-deploy.xml
|
||||
${jetty.home}/etc/jetty-http.xml
|
||||
${jetty.home}/etc/jetty.xml
|
||||
${jetty.home}/etc/jetty-webapp.xml
|
||||
${jetty.home}/etc/jetty-deploy.xml
|
||||
${jetty.home}/etc/jetty-http.xml
|
||||
....
|
||||
|
|
|
@ -16,11 +16,13 @@
|
|||
|
||||
[source, screen, subs="{sub-order}"]
|
||||
....
|
||||
[mybase]$ java -jar ../start.jar --add-to-start=http,webapp,deploy
|
||||
INFO : webapp initialised in ${jetty.base}/start.d/webapp.ini
|
||||
INFO : server initialised (transitively) in ${jetty.base}/start.d/server.ini
|
||||
INFO : http initialised in ${jetty.base}/start.d/http.ini
|
||||
INFO : deploy initialised in ${jetty.base}/start.d/deploy.ini
|
||||
MKDIR: ${jetty.base}/webapps
|
||||
INFO : Base directory was modified
|
||||
[mybase]$ java -jar $JETTY_HOME/start.jar --add-to-start=http,webapp,deploy
|
||||
INFO : webapp initialized in ${jetty.base}/start.ini
|
||||
INFO : server transitively enabled, ini template available with --add-to-start=server
|
||||
INFO : security transitively enabled
|
||||
INFO : servlet transitively enabled
|
||||
INFO : http initialized in ${jetty.base}/start.ini
|
||||
INFO : deploy initialized in ${jetty.base}/start.ini
|
||||
MKDIR : ${jetty.base}/webapps
|
||||
INFO : Base directory was modified
|
||||
....
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
[source, screen, subs="{sub-order}"]
|
||||
....
|
||||
[mybase]$ java -jar ../start.jar --list-modules=logging,-internal
|
||||
[mybase]$ java -jar $JETTY_HOME/start.jar --list-modules=logging,-internal
|
||||
|
||||
Available Modules:
|
||||
==================
|
||||
|
|
|
@ -18,7 +18,8 @@
|
|||
=== Managing Startup Modules
|
||||
|
||||
The standard Jetty Distribution ships with several modules defined in `${jetty.home}/modules/`.
|
||||
These modules allow flexibility for implementations and make configuration a much more plug-and-play set up.
|
||||
Modules interact with Jetty XML files to configure options and parameters for the server and are the primary configuration method for Jetty distributions.
|
||||
Modules allow flexibility for implementations and their plug-and-play nature makes adding or removing server functionality virtually painless.
|
||||
|
||||
[[enabling-modules]]
|
||||
==== Enabling Modules
|
||||
|
@ -28,27 +29,27 @@ The default distribution has a co-mingled `${jetty.home}` and `${jetty.base}` wh
|
|||
It is highly encouraged that you learn about the differences in link:#startup-base-and-home[Jetty Base vs Jetty Home] and take full advantage of this setup.
|
||||
____
|
||||
|
||||
Jetty ships with many modules defined in `${jetty.home}/modules`.
|
||||
Enabling a module is a simple process: simply add the `--add-to-start` syntax on the command line.
|
||||
Doing this will enable the module and any dependent modules.
|
||||
|
||||
An example of this, with a new, empty, base directory.
|
||||
We can see from this output, that the directory is new.
|
||||
An example of this with a new, empty, base directory:
|
||||
|
||||
If we try to start the Jetty server with no configuration or modules enabled, it will promptly exit:
|
||||
|
||||
include::screen-empty-base.adoc[]
|
||||
|
||||
Lets see what the configuration looks like so far:
|
||||
By using the `--list-config` parameter to our startup command, we can see that there are no modules enabled and no Jetty XML files are active:
|
||||
|
||||
include::screen-empty-base-listconfig.adoc[]
|
||||
|
||||
Lets try adding some basic support for webapps, with automatic deploy (hot deploy), and a single basic HTTP/1.1 connector.
|
||||
Let's try adding some basic support for webapps, with automatic deploy (hot deploy), and a single basic HTTP/1.1 connector.
|
||||
|
||||
include::screen-http-webapp-deploy.adoc[]
|
||||
|
||||
This created the webapps directory in our `mybase` directory and appended the `start.ini` file with the ini template arguments from the associated module files.
|
||||
Additionally, where needed, Jetty enabled any module dependencies and added their module ini template properties.
|
||||
This creates the webapps directory in our `mybase` directory and appended the `start.ini` file with the ini template arguments from the associated module files.
|
||||
Additionally, where needed, Jetty enabled any module dependencies.
|
||||
|
||||
Lets see what it looks like configuration wise.
|
||||
Now that we have added some modules to our server, let's run `--list-config` again to review our new configuration.
|
||||
|
||||
include::screen-http-webapp-deploy-listconfig.adoc[]
|
||||
|
||||
|
|
|
@ -24,13 +24,274 @@ Configuring a connector is a combination of configuring the following:
|
|||
* Services the connector uses (for example: executors, schedulers).
|
||||
* Connection factories that instantiate and configure the protocol for an accepted connection.
|
||||
|
||||
Jetty primarily uses a single connector type called link:{JDURL}/org/eclipse/jetty/server/ServerConnector.html[ServerConnector].
|
||||
Typically connectors require very little configuration aside from setting the listening port, and enabling `X-Forwarded-For` customization when applicable.
|
||||
Additional settings, including construction your own constructor Jetty XML files, are for expert configuration only.
|
||||
|
||||
==== Enabling Connectors
|
||||
|
||||
Out of the box, Jetty provides several link:#startup-modules[modules] for enabling different types of connectors, from HTTP to HTTPS, HTTP/2, and others.
|
||||
If you startup Jetty with the `--list-modules=connector` command, you can see a list of all available connector modules:
|
||||
|
||||
[source, screen, subs="{sub-order}"]
|
||||
....
|
||||
[mybase]$ java -jar $JETTY_HOME/start.jar --list-modules=connector
|
||||
|
||||
Available Modules:
|
||||
==================
|
||||
tags: [connector]
|
||||
|
||||
Modules for tag 'connector':
|
||||
----------------------------
|
||||
|
||||
Module: http
|
||||
: Enables a HTTP connector on the server.
|
||||
: By default HTTP/1 is support, but HTTP2C can
|
||||
: be added to the connector with the http2c module.
|
||||
Tags: connector, http
|
||||
Depend: server
|
||||
XML: etc/jetty-http.xml
|
||||
Enabled: ${jetty.base}/start.ini
|
||||
|
||||
Module: http-forwarded
|
||||
: Adds a forwarded request customizer to the HTTP Connector
|
||||
: to process forwarded-for style headers from a proxy.
|
||||
Tags: connector
|
||||
Depend: http
|
||||
XML: etc/jetty-http-forwarded.xml
|
||||
|
||||
Module: http2
|
||||
: Enables HTTP2 protocol support on the TLS(SSL) Connector,
|
||||
: using the ALPN extension to select which protocol to use.
|
||||
Tags: connector, http2, http, ssl
|
||||
Depend: ssl, alpn
|
||||
LIB: lib/http2/*.jar
|
||||
XML: etc/jetty-http2.xml
|
||||
|
||||
Module: http2c
|
||||
: Enables the HTTP2C protocol on the HTTP Connector
|
||||
: The connector will accept both HTTP/1 and HTTP/2 connections.
|
||||
Tags: connector, http2, http
|
||||
Depend: http
|
||||
LIB: lib/http2/*.jar
|
||||
XML: etc/jetty-http2c.xml
|
||||
|
||||
Module: https
|
||||
: Adds HTTPS protocol support to the TLS(SSL) Connector
|
||||
Tags: connector, https, http, ssl
|
||||
Depend: ssl
|
||||
Optional: http-forwarded, http2
|
||||
XML: etc/jetty-https.xml
|
||||
|
||||
Module: proxy-protocol-ssl
|
||||
: Enables the Proxy Protocol on the TLS(SSL) Connector.
|
||||
: http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt
|
||||
: This allows a Proxy operating in TCP mode to transport
|
||||
: details of the proxied connection to the server.
|
||||
: Both V1 and V2 versions of the protocol are supported.
|
||||
Tags: connector, ssl
|
||||
Depend: ssl
|
||||
XML: etc/jetty-proxy-protocol-ssl.xml
|
||||
|
||||
Module: ssl
|
||||
: Enables a TLS(SSL) Connector on the server.
|
||||
: This may be used for HTTPS and/or HTTP2 by enabling
|
||||
: the associated support modules.
|
||||
Tags: connector, ssl
|
||||
Depend: server
|
||||
XML: etc/jetty-ssl.xml
|
||||
XML: etc/jetty-ssl-context.xml
|
||||
|
||||
Module: unixsocket
|
||||
: Enables a Unix Domain Socket Connector that can receive
|
||||
: requests from a local proxy and/or SSL offloader (eg haproxy) in either
|
||||
: HTTP or TCP mode. Unix Domain Sockets are more efficient than
|
||||
: localhost TCP/IP connections as they reduce data copies, avoid
|
||||
: needless fragmentation and have better dispatch behaviours.
|
||||
: When enabled with corresponding support modules, the connector can
|
||||
: accept HTTP, HTTPS or HTTP2C traffic.
|
||||
Tags: connector
|
||||
Depend: server
|
||||
LIB: lib/jetty-unixsocket-${jetty.version}.jar
|
||||
LIB: lib/jnr/*.jar
|
||||
XML: etc/jetty-unixsocket.xml
|
||||
|
||||
Module: unixsocket-forwarded
|
||||
: Adds a forwarded request customizer to the HTTP configuration used
|
||||
: by the Unix Domain Socket connector, for use when behind a proxy operating
|
||||
: in HTTP mode that adds forwarded-for style HTTP headers. Typically this
|
||||
: is an alternate to the Proxy Protocol used mostly for TCP mode.
|
||||
Tags: connector
|
||||
Depend: unixsocket-http
|
||||
XML: etc/jetty-unixsocket-forwarded.xml
|
||||
|
||||
Module: unixsocket-http
|
||||
: Adds a HTTP protocol support to the Unix Domain Socket connector.
|
||||
: It should be used when a proxy is forwarding either HTTP or decrypted
|
||||
: HTTPS traffic to the connector and may be used with the
|
||||
: unix-socket-http2c modules to upgrade to HTTP/2.
|
||||
Tags: connector, http
|
||||
Depend: unixsocket
|
||||
XML: etc/jetty-unixsocket-http.xml
|
||||
|
||||
Module: unixsocket-http2c
|
||||
: Adds a HTTP2C connetion factory to the Unix Domain Socket Connector
|
||||
: It can be used when either the proxy forwards direct
|
||||
: HTTP/2C (unecrypted) or decrypted HTTP/2 traffic.
|
||||
Tags: connector, http2
|
||||
Depend: unixsocket-http
|
||||
LIB: lib/http2/*.jar
|
||||
XML: etc/jetty-unixsocket-http2c.xml
|
||||
|
||||
Module: unixsocket-proxy-protocol
|
||||
: Enables the proxy protocol on the Unix Domain Socket Connector
|
||||
: http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt
|
||||
: This allows information about the proxied connection to be
|
||||
: efficiently forwarded as the connection is accepted.
|
||||
: Both V1 and V2 versions of the protocol are supported and any
|
||||
: SSL properties may be interpreted by the unixsocket-secure
|
||||
: module to indicate secure HTTPS traffic. Typically this
|
||||
: is an alternate to the forwarded module.
|
||||
Tags: connector
|
||||
Depend: unixsocket
|
||||
XML: etc/jetty-unixsocket-proxy-protocol.xml
|
||||
|
||||
Module: unixsocket-secure
|
||||
: Enable a secure request customizer on the HTTP Configuration
|
||||
: used by the Unix Domain Socket Connector.
|
||||
: This looks for a secure scheme transported either by the
|
||||
: unixsocket-forwarded, unixsocket-proxy-protocol or in a
|
||||
: HTTP2 request.
|
||||
Tags: connector
|
||||
Depend: unixsocket-http
|
||||
XML: etc/jetty-unixsocket-secure.xml
|
||||
...
|
||||
....
|
||||
|
||||
To enable a connector, simply activate the associated module.
|
||||
Below is an example of activating both the `http` and `https` modules in a fresh link:#startup-base-and-home[Jetty base] using the link:#start-vs-startd[start.d directory]:
|
||||
|
||||
[source, screen, subs="{sub-order}"]
|
||||
....
|
||||
[mybase] java -jar $JETTY_HOME/start.jar --create-startd
|
||||
MKDIR : ${jetty.base}/start.d
|
||||
INFO : Base directory was modified
|
||||
|
||||
[mybase] java -jar $JETTY_HOME/start.jar --add-to-start=http,https
|
||||
INFO : server transitively enabled, ini template available with --add-to-start=server
|
||||
INFO : http initialized in ${jetty.base}/start.d/http.ini
|
||||
INFO : https initialized in ${jetty.base}/start.d/https.ini
|
||||
INFO : ssl transitively enabled, ini template available with --add-to-start=ssl
|
||||
MKDIR : ${jetty.base}/etc
|
||||
COPY : ${jetty.home}/modules/ssl/keystore to ${jetty.base}/etc/keystore
|
||||
INFO : Base directory was modified
|
||||
[mybase] tree
|
||||
.
|
||||
├── etc
|
||||
│ └── keystore
|
||||
└── start.d
|
||||
├── http.ini
|
||||
└── https.ini
|
||||
....
|
||||
|
||||
When the `http` and `https` modules were activated, so too were any modules they were dependent on, in this case `server` and `ssl`, as well as any dependencies for those modules, such as the `etc` and `ketystore` directories for `ssl`.
|
||||
|
||||
At this point the server has been configured with connectors for both HTTP and HTTPS and can be started:
|
||||
|
||||
[source, screen, subs="{sub-order}"]
|
||||
....
|
||||
[mybase] java -jar $JETTY_HOME/start.jar
|
||||
2017-08-31 10:19:58.855:INFO::main: Logging initialized @372ms to org.eclipse.jetty.util.log.StdErrLog
|
||||
2017-08-31 10:19:59.076:INFO:oejs.Server:main: jetty-9.4.6.v20170531
|
||||
2017-08-31 10:19:59.125:INFO:oejs.AbstractConnector:main: Started ServerConnector@421e98e0{HTTP/1.1,[http/1.1]}{0.0.0.0:8080}
|
||||
2017-08-31 10:19:59.150:INFO:oejus.SslContextFactory:main: x509=X509@5315b42e(jetty,h=[jetty.eclipse.org],w=[]) for SslContextFactory@2ef9b8bc(file:///Users/staff/installs/repository/jetty-distribution-{VERSION}/mybase/etc/keystore,file:///Users/staff/installs/repository/jetty-distribution-{VERSION}/mybase/etc/keystore)
|
||||
2017-08-31 10:19:59.151:INFO:oejus.SslContextFactory:main: x509=X509@5d624da6(mykey,h=[],w=[]) for SslContextFactory@2ef9b8bc(file:///Users/staff/installs/repository/jetty-distribution-{VERSION}/mybase/etc/keystore,file:///Users/staff/installs/repository/jetty-distribution-{VERSION}/mybase/etc/keystore)
|
||||
2017-08-31 10:19:59.273:INFO:oejs.AbstractConnector:main: Started ServerConnector@2b98378d{SSL,[ssl, http/1.1]}{0.0.0.0:8443}
|
||||
2017-08-31 10:19:59.274:INFO:oejs.Server:main: Started @791ms
|
||||
....
|
||||
|
||||
When modules are enabled, they are loaded with several default options.
|
||||
These can be changed by editing the associated module ini file in the `start.d` directory (or the associated lines in `server.ini` if your implementation does not use `start.d`).
|
||||
For example, if we examine the `http.ini` file in our `start.d` directory created above, we will see the following settings:
|
||||
|
||||
[source, screen, subs="{sub-order}"]
|
||||
....
|
||||
# ---------------------------------------
|
||||
# Module: http
|
||||
# Enables a HTTP connector on the server.
|
||||
# By default HTTP/1 is support, but HTTP2C can
|
||||
# be added to the connector with the http2c module.
|
||||
# ---------------------------------------
|
||||
--module=http
|
||||
|
||||
### HTTP Connector Configuration
|
||||
|
||||
## Connector host/address to bind to
|
||||
# jetty.http.host=0.0.0.0
|
||||
|
||||
## Connector port to listen on
|
||||
# jetty.http.port=8080
|
||||
|
||||
## Connector idle timeout in milliseconds
|
||||
# jetty.http.idleTimeout=30000
|
||||
|
||||
## Connector socket linger time in seconds (-1 to disable)
|
||||
# jetty.http.soLingerTime=-1
|
||||
|
||||
## Number of acceptors (-1 picks default based on number of cores)
|
||||
# jetty.http.acceptors=-1
|
||||
|
||||
## Number of selectors (-1 picks default based on number of cores)
|
||||
# jetty.http.selectors=-1
|
||||
|
||||
## ServerSocketChannel backlog (0 picks platform default)
|
||||
# jetty.http.acceptorQueueSize=0
|
||||
|
||||
## Thread priority delta to give to acceptor threads
|
||||
# jetty.http.acceptorPriorityDelta=0
|
||||
|
||||
## HTTP Compliance: RFC7230, RFC2616, LEGACY
|
||||
# jetty.http.compliance=RFC7230
|
||||
....
|
||||
|
||||
To make a change to these settings, uncomment the line (by removing the #) and change the property to the desired value.
|
||||
For example, if you wanted to change the HTTP port to 5231, you would edit the line as follows:
|
||||
|
||||
[source, screen, subs="{sub-order}"]
|
||||
....
|
||||
...
|
||||
## Connector port to listen on
|
||||
jetty.http.port=5231
|
||||
...
|
||||
....
|
||||
|
||||
Now when the server is started, HTTP connections will enter on port 5231:
|
||||
|
||||
[source, screen, subs="{sub-order}"]
|
||||
....
|
||||
[mybase] java -jar ../start.jar
|
||||
2017-08-31 10:31:32.955:INFO::main: Logging initialized @366ms to org.eclipse.jetty.util.log.StdErrLog
|
||||
2017-08-31 10:31:33.109:INFO:oejs.Server:main: jetty-9.4.6.v20170531
|
||||
2017-08-31 10:31:33.146:INFO:oejs.AbstractConnector:main: Started ServerConnector@2ef9b8bc{HTTP/1.1,[http/1.1]}{0.0.0.0:5231}
|
||||
...
|
||||
2017-08-31 10:31:33.263:INFO:oejs.Server:main: Started @675ms
|
||||
....
|
||||
|
||||
Every module has their own set of configuration options, and reviewing them all is recommended.
|
||||
For additional information on the module system, please refer to our documentation on link:#startup-modules[Startup Modules].
|
||||
|
||||
____
|
||||
[NOTE]
|
||||
Editing these module files is the recommended way to edit the configuration of your server.
|
||||
Making changes to the associated Jetty XML file for connectors is *not* recommended, and is for advanced users only.
|
||||
If you do wish to edit Jetty XML, please see our section on managing link:#[Jetty Home and Jetty Base] to ensure your Jetty Home remains a standard of truth for your implementation.
|
||||
____
|
||||
|
||||
==== Advanced Configuration
|
||||
|
||||
Jetty primarily uses a single connector type called link:{JDURL}/org/eclipse/jetty/server/ServerConnector.html[ServerConnector].
|
||||
|
||||
Prior to Jetty 9, the type of the connector specified both the protocol and the implementation used; for example, selector-based non blocking I/O vs blocking I/O, or SSL connector vs non-SSL connector.
|
||||
Jetty 9 has a single selector-based non-blocking I/O connector, and a collection of link:{JDURL}/org/eclipse/jetty/server/ConnectionFactory.html[`ConnectionFactories`] now configure the protocol on the connector.
|
||||
____
|
||||
|
||||
The standard Jetty distribution comes with the following Jetty XML files that create and configure connectors; you should examine them as you read this section:
|
||||
|
||||
|
@ -48,15 +309,6 @@ link:{GITBROWSEURL}/jetty-http2/http2-server/src/main/config/etc/jetty-http2.xml
|
|||
link:{GITBROWSEURL}/jetty-alpn/jetty-alpn-server/src/main/config/etc/jetty-alpn.xml[`jetty-alpn.xml`]::
|
||||
Adds an link:{JDURL}/org/eclipse/jetty/alpn/server/ALPNServerConnectionFactory.html[`ALPNServerConnectionFactory`] to the link:{JDURL}/org/eclipse/jetty/server/ServerConnector.html[`ServerConnector`] configured by `jetty-ssl.xml` which allows the one SSL connector to support multiple protocols with the ALPN extension used to select the protocol to be used for each connection.
|
||||
|
||||
Typically connectors require very little configuration aside from setting the listening port (see link:#jetty-connectors-network-settings[Network Settings]), and enabling `X-Forwarded-For` customization when applicable. (see link:#jetty-connectors-http-configuration[HTTP Configuration]).
|
||||
Additional settings are for expert configuration only.
|
||||
|
||||
____
|
||||
[NOTE]
|
||||
All the connectors discussed in this chapter can be enabled in the Jetty Distribution by enabling them via the module system.
|
||||
Please refer to our chapter on link:#startup-modules[Managing Startup Modules] for more information.
|
||||
____
|
||||
|
||||
==== Constructing a ServerConnector
|
||||
|
||||
The services a link:{JDURL}/org/eclipse/jetty/server/ServerConnector.html[`ServerConnector`] instance uses are set by constructor injection and once instantiated cannot be changed.
|
||||
|
@ -79,9 +331,9 @@ You can see the other arguments that can be passed when constructing a `ServerCo
|
|||
Typically the defaults are sufficient for almost all deployments.
|
||||
|
||||
[[jetty-connectors-network-settings]]
|
||||
==== Network Settings.
|
||||
==== Network Settings
|
||||
|
||||
You configure connector network settings by calling setters on the connector before it is started.
|
||||
You can configure connector network settings by calling setters on the connector before it is started.
|
||||
For example, you can set the port with the Jetty XML:
|
||||
|
||||
[source, xml, subs="{sub-order}"]
|
||||
|
@ -107,7 +359,7 @@ Thus typically the port is set within Jetty XML, but uses the `Property` element
|
|||
</New>
|
||||
----
|
||||
|
||||
The network settings that you can set on the link:{JDURL}/org/eclipse/jetty/server/ServerConnector.html[`ServerConnector`] include:
|
||||
The network settings available for configuration on the link:{JDURL}/org/eclipse/jetty/server/ServerConnector.html[`ServerConnector`] include:
|
||||
|
||||
.Connector Configuration
|
||||
[width="100%",cols="22%,78%",options="header",]
|
||||
|
@ -238,13 +490,13 @@ These headers can be interpreted by an instance of link:{JDURL}/org/eclipse/jett
|
|||
|
||||
===== Proxy Protocol
|
||||
|
||||
The http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt[Proxy Protocol] is a de facto standard created by HAProxy and used by environments such as Amazon Elastic Cloud.
|
||||
The http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt[Proxy Protocol] is the _de facto_ standard created by HAProxy and used by environments such as Amazon Elastic Cloud.
|
||||
This mechanism is independent of any protocol, so it can be used for HTTP2, TLS etc.
|
||||
The information about the client connection is sent as a small data frame on each newly established connection.
|
||||
In Jetty, this protocol can be handled by the link:{JDURL}/org/eclipse/jetty/server/ProxyConnectionFactory.html[`ProxyConnectionFactory`] which parses the data frame and then instantiates the next `ConnectionFactory` on the connection with an end point that has been customized with the data obtained about the original client connection.
|
||||
The connection factory can be added to any link:{JDURL}/org/eclipse/jetty/server/ServerConnector.html[`ServerConnector`] and should be the first link:{JDURL}/org/eclipse/jetty/server/ConnectionFactory.html[`ConnectionFactory`].
|
||||
|
||||
An example of adding the factory to a HTTP connector is:
|
||||
An example of adding the factory to a HTTP connector is shown below:
|
||||
|
||||
[source, xml, subs="{sub-order}"]
|
||||
----
|
||||
|
|
|
@ -60,8 +60,8 @@ Unfortunately this approach denies all aliases, including symbolic links, which
|
|||
==== Serving Aliases and Symbolic Links
|
||||
|
||||
Not all aliases are bad nor should be seen as attempts to subvert security constraints.
|
||||
Specifically symbolic links can be very useful when assembling complex web applications, yet by default Jetty will not serve them.
|
||||
Thus Jetty contexts support an extensible `AliasCheck` mechanism to allow aliases resources to be inspected an conditionally served.
|
||||
Specifically, symbolic links can be very useful when assembling complex web applications.
|
||||
As such, Jetty contexts support an extensible `AliasCheck` mechanism to allow aliases resources to be inspected and conditionally served.
|
||||
In this way, "good" aliases can be detected and served.
|
||||
Jetty provides several utility implementations of the `AliasCheck` interface as nested classes with `ContextHandler`:
|
||||
|
||||
|
@ -70,6 +70,11 @@ ApproveAliases::
|
|||
AllowSymLinkAliasChecker::
|
||||
Approve Aliases using the java-7 `Files.readSymbolicLink(path)` and `Path.toRealPath(...)` APIs to check that aliases are valid symbolic links.
|
||||
|
||||
____
|
||||
[NOTE]
|
||||
By default, Jetty serves aliased files for implementations running on UNIX as Contexts are created with both the {JDURL}/org/eclipse/jetty/server/handler/AllowSymLinkAliasChecker.html[`AllowSymLinkAliasChecker`] and {JDURL}/org/eclipse/jetty/server/handler/ContextHandler.ApproveNonExistentDirectoryAliases.html[`ApproveNonExistentDirectoryAliases`] alias checkers.
|
||||
____
|
||||
|
||||
An application is free to implement its own Alias checking.
|
||||
Alias Checkers can be installed in a context via the following XML used in a context deployer file or `WEB-INF/jetty-web.xml`:
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ The default system classes are:
|
|||
.Default System Classes
|
||||
[width="100%",cols="8%,92%",options="header",]
|
||||
|=======================================================================
|
||||
|System Classes
|
||||
|System Classes | Note
|
||||
|java. |Java SE classes (per servlet spec v2.5 / SRV.9.7.2).
|
||||
|javax. |Java SE classes (per servlet spec v2.5 / SRV.9.7.2).
|
||||
|org.xml. |Needed by javax.xml.
|
||||
|
@ -98,7 +98,7 @@ The default system classes are:
|
|||
|org.eclipse.jetty.servlet.DefaultServlet |Webapp can see and not change default servlet.
|
||||
|=======================================================================
|
||||
|
||||
Absolute classname can be passed, names ending with . are treated as packages names, and names starting with - are treated as negative matches and must be listed before any enclosing packages.
|
||||
Absolute classname can be passed, names ending with `.` are treated as packages names, and names starting with `-` are treated as negative matches and must be listed before any enclosing packages.
|
||||
|
||||
[[setting-server-classes]]
|
||||
===== Setting Server Classes
|
||||
|
|
|
@ -129,13 +129,13 @@ xmlns:date="http://exslt.org/dates-and-times"
|
|||
|
||||
</td>
|
||||
<td style="width: 50%">
|
||||
<script type="text/javascript"> (function() {
|
||||
var cx = '016459005284625897022:obd4lsai2ds';
|
||||
<script>
|
||||
(function() {
|
||||
var cx = '005120552842603642412:peimxy9z8nu';
|
||||
var gcse = document.createElement('script');
|
||||
gcse.type = 'text/javascript';
|
||||
gcse.async = true;
|
||||
gcse.src = (document.location.protocol == 'https:' ? 'https:' : 'http:') +
|
||||
'//www.google.com/cse/cse.js?cx=' + cx;
|
||||
gcse.src = 'https://cse.google.com/cse.js?cx=' + cx;
|
||||
var s = document.getElementsByTagName('script')[0];
|
||||
s.parentNode.insertBefore(gcse, s);
|
||||
})();
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.HashMap;
|
|||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
import java.util.Map;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Set;
|
||||
|
@ -98,6 +99,12 @@ public class HttpFields implements Iterable<HttpField>
|
|||
{
|
||||
return new Itr();
|
||||
}
|
||||
|
||||
public ListIterator<HttpField> listIterator()
|
||||
{
|
||||
return new Itr();
|
||||
}
|
||||
|
||||
|
||||
public Stream<HttpField> stream()
|
||||
{
|
||||
|
@ -1063,7 +1070,7 @@ public class HttpFields implements Iterable<HttpField>
|
|||
}
|
||||
|
||||
|
||||
private class Itr implements Iterator<HttpField>
|
||||
private class Itr implements ListIterator<HttpField>
|
||||
{
|
||||
int _cursor; // index of next element to return
|
||||
int _last=-1;
|
||||
|
@ -1091,6 +1098,49 @@ public class HttpFields implements Iterable<HttpField>
|
|||
_cursor=_last;
|
||||
_last=-1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPrevious()
|
||||
{
|
||||
return _cursor>0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HttpField previous()
|
||||
{
|
||||
if (_cursor == 0)
|
||||
throw new NoSuchElementException();
|
||||
return _fields[_last=--_cursor];
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextIndex()
|
||||
{
|
||||
return _cursor+1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int previousIndex()
|
||||
{
|
||||
return _cursor-1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void set(HttpField field)
|
||||
{
|
||||
if (_last<0)
|
||||
throw new IllegalStateException();
|
||||
_fields[_last] = field;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void add(HttpField field)
|
||||
{
|
||||
_fields = Arrays.copyOf(_fields,_fields.length+1);
|
||||
System.arraycopy(_fields,_cursor,_fields,_cursor+1,_size++);
|
||||
_fields[_cursor++] = field;
|
||||
_last=-1;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -560,10 +560,6 @@ public class HttpGenerator
|
|||
for (int f=0;f<n;f++)
|
||||
{
|
||||
HttpField field = trailer.getField(f);
|
||||
String v = field.getValue();
|
||||
if (v==null || v.length()==0)
|
||||
continue; // rfc7230 does not allow no value
|
||||
|
||||
putTo(field,buffer);
|
||||
}
|
||||
|
||||
|
@ -662,10 +658,6 @@ public class HttpGenerator
|
|||
for (int f=0;f<n;f++)
|
||||
{
|
||||
HttpField field = fields.getField(f);
|
||||
String v = field.getValue();
|
||||
if (v==null || v.length()==0)
|
||||
continue; // rfc7230 does not allow no value
|
||||
|
||||
HttpHeader h = field.getHeader();
|
||||
if (h==null)
|
||||
putTo(field,header);
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.eclipse.jetty.http.HttpTokens.EndOfContent;
|
|||
import org.eclipse.jetty.util.ArrayTernaryTrie;
|
||||
import org.eclipse.jetty.util.ArrayTrie;
|
||||
import org.eclipse.jetty.util.BufferUtil;
|
||||
import org.eclipse.jetty.util.StringUtil;
|
||||
import org.eclipse.jetty.util.Trie;
|
||||
import org.eclipse.jetty.util.TypeUtil;
|
||||
import org.eclipse.jetty.util.Utf8StringBuilder;
|
||||
|
@ -618,8 +619,8 @@ public class HttpParser
|
|||
while (_state.ordinal()<State.HEADER.ordinal() && buffer.hasRemaining() && !handle)
|
||||
{
|
||||
// process each character
|
||||
byte ch=next(buffer);
|
||||
if (ch==0)
|
||||
byte b=next(buffer);
|
||||
if (b==0)
|
||||
break;
|
||||
|
||||
if (_maxHeaderBytes>0 && ++_headerBytes>_maxHeaderBytes)
|
||||
|
@ -642,7 +643,7 @@ public class HttpParser
|
|||
switch (_state)
|
||||
{
|
||||
case METHOD:
|
||||
if (ch == SPACE)
|
||||
if (b == SPACE)
|
||||
{
|
||||
_length=_string.length();
|
||||
_methodString=takeString();
|
||||
|
@ -651,19 +652,19 @@ public class HttpParser
|
|||
_methodString=legacyString(_methodString,method.asString());
|
||||
setState(State.SPACE1);
|
||||
}
|
||||
else if (ch < SPACE)
|
||||
else if (b < SPACE)
|
||||
{
|
||||
if (ch==LINE_FEED)
|
||||
if (b==LINE_FEED)
|
||||
throw new BadMessageException("No URI");
|
||||
else
|
||||
throw new IllegalCharacterException(_state,ch,buffer);
|
||||
throw new IllegalCharacterException(_state,b,buffer);
|
||||
}
|
||||
else
|
||||
_string.append((char)ch);
|
||||
_string.append((char)b);
|
||||
break;
|
||||
|
||||
case RESPONSE_VERSION:
|
||||
if (ch == HttpTokens.SPACE)
|
||||
if (b == HttpTokens.SPACE)
|
||||
{
|
||||
_length=_string.length();
|
||||
String version=takeString();
|
||||
|
@ -672,19 +673,19 @@ public class HttpParser
|
|||
throw new BadMessageException(HttpStatus.BAD_REQUEST_400,"Unknown Version");
|
||||
setState(State.SPACE1);
|
||||
}
|
||||
else if (ch < HttpTokens.SPACE)
|
||||
throw new IllegalCharacterException(_state,ch,buffer);
|
||||
else if (b < HttpTokens.SPACE)
|
||||
throw new IllegalCharacterException(_state,b,buffer);
|
||||
else
|
||||
_string.append((char)ch);
|
||||
_string.append((char)b);
|
||||
break;
|
||||
|
||||
case SPACE1:
|
||||
if (ch > HttpTokens.SPACE || ch<0)
|
||||
if (b > HttpTokens.SPACE || b<0)
|
||||
{
|
||||
if (_responseHandler!=null)
|
||||
{
|
||||
setState(State.STATUS);
|
||||
setResponseStatus(ch-'0');
|
||||
setResponseStatus(b-'0');
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -712,25 +713,25 @@ public class HttpParser
|
|||
buffer.position(i-buffer.arrayOffset());
|
||||
}
|
||||
else
|
||||
_uri.append(ch);
|
||||
_uri.append(b);
|
||||
}
|
||||
}
|
||||
else if (ch < HttpTokens.SPACE)
|
||||
else if (b < HttpTokens.SPACE)
|
||||
{
|
||||
throw new BadMessageException(HttpStatus.BAD_REQUEST_400,_requestHandler!=null?"No URI":"No Status");
|
||||
}
|
||||
break;
|
||||
|
||||
case STATUS:
|
||||
if (ch == HttpTokens.SPACE)
|
||||
if (b == HttpTokens.SPACE)
|
||||
{
|
||||
setState(State.SPACE2);
|
||||
}
|
||||
else if (ch>='0' && ch<='9')
|
||||
else if (b>='0' && b<='9')
|
||||
{
|
||||
_responseStatus=_responseStatus*10+(ch-'0');
|
||||
_responseStatus=_responseStatus*10+(b-'0');
|
||||
}
|
||||
else if (ch < HttpTokens.SPACE && ch>=0)
|
||||
else if (b < HttpTokens.SPACE && b>=0)
|
||||
{
|
||||
setState(State.HEADER);
|
||||
handle=_responseHandler.startResponse(_version, _responseStatus, null)||handle;
|
||||
|
@ -742,11 +743,11 @@ public class HttpParser
|
|||
break;
|
||||
|
||||
case URI:
|
||||
if (ch == HttpTokens.SPACE)
|
||||
if (b == HttpTokens.SPACE)
|
||||
{
|
||||
setState(State.SPACE2);
|
||||
}
|
||||
else if (ch < HttpTokens.SPACE && ch>=0)
|
||||
else if (b < HttpTokens.SPACE && b>=0)
|
||||
{
|
||||
// HTTP/0.9
|
||||
if (complianceViolation(RFC7230,"HTTP/0.9"))
|
||||
|
@ -758,15 +759,15 @@ public class HttpParser
|
|||
}
|
||||
else
|
||||
{
|
||||
_uri.append(ch);
|
||||
_uri.append(b);
|
||||
}
|
||||
break;
|
||||
|
||||
case SPACE2:
|
||||
if (ch > HttpTokens.SPACE)
|
||||
if (b > HttpTokens.SPACE)
|
||||
{
|
||||
_string.setLength(0);
|
||||
_string.append((char)ch);
|
||||
_string.append((char)b);
|
||||
if (_responseHandler!=null)
|
||||
{
|
||||
_length=1;
|
||||
|
@ -806,7 +807,7 @@ public class HttpParser
|
|||
}
|
||||
}
|
||||
}
|
||||
else if (ch == HttpTokens.LINE_FEED)
|
||||
else if (b == HttpTokens.LINE_FEED)
|
||||
{
|
||||
if (_responseHandler!=null)
|
||||
{
|
||||
|
@ -825,12 +826,12 @@ public class HttpParser
|
|||
handle= handleHeaderContentMessage() || handle;
|
||||
}
|
||||
}
|
||||
else if (ch<0)
|
||||
else if (b<0)
|
||||
throw new BadMessageException();
|
||||
break;
|
||||
|
||||
case REQUEST_VERSION:
|
||||
if (ch == HttpTokens.LINE_FEED)
|
||||
if (b == HttpTokens.LINE_FEED)
|
||||
{
|
||||
if (_version==null)
|
||||
{
|
||||
|
@ -852,25 +853,25 @@ public class HttpParser
|
|||
handle=_requestHandler.startRequest(_methodString,_uri.toString(), _version)||handle;
|
||||
continue;
|
||||
}
|
||||
else if (ch>=HttpTokens.SPACE)
|
||||
_string.append((char)ch);
|
||||
else if (b>=HttpTokens.SPACE)
|
||||
_string.append((char)b);
|
||||
else
|
||||
throw new BadMessageException();
|
||||
|
||||
break;
|
||||
|
||||
case REASON:
|
||||
if (ch == HttpTokens.LINE_FEED)
|
||||
if (b == HttpTokens.LINE_FEED)
|
||||
{
|
||||
String reason=takeString();
|
||||
setState(State.HEADER);
|
||||
handle=_responseHandler.startResponse(_version, _responseStatus, reason)||handle;
|
||||
continue;
|
||||
}
|
||||
else if (ch>=HttpTokens.SPACE)
|
||||
else if (b>=HttpTokens.SPACE || ((b<0) && (b>=-96)))
|
||||
{
|
||||
_string.append((char)ch);
|
||||
if (ch!=' '&&ch!='\t')
|
||||
_string.append((char)(0xff&b));
|
||||
if (b!=' '&&b!='\t')
|
||||
_length=_string.length();
|
||||
}
|
||||
else
|
||||
|
@ -1008,8 +1009,8 @@ public class HttpParser
|
|||
while ((_state==State.HEADER || _state==State.TRAILER) && buffer.hasRemaining())
|
||||
{
|
||||
// process each character
|
||||
byte ch=next(buffer);
|
||||
if (ch==0)
|
||||
byte b=next(buffer);
|
||||
if (b==0)
|
||||
break;
|
||||
|
||||
if (_maxHeaderBytes>0 && ++_headerBytes>_maxHeaderBytes)
|
||||
|
@ -1024,7 +1025,7 @@ public class HttpParser
|
|||
switch (_fieldState)
|
||||
{
|
||||
case FIELD:
|
||||
switch(ch)
|
||||
switch(b)
|
||||
{
|
||||
case HttpTokens.COLON:
|
||||
case HttpTokens.SPACE:
|
||||
|
@ -1127,7 +1128,7 @@ public class HttpParser
|
|||
default:
|
||||
{
|
||||
// now handle the ch
|
||||
if (ch<HttpTokens.SPACE)
|
||||
if (b<HttpTokens.SPACE)
|
||||
throw new BadMessageException();
|
||||
|
||||
// process previous header
|
||||
|
@ -1185,15 +1186,15 @@ public class HttpParser
|
|||
{
|
||||
// Header and value
|
||||
int pos=buffer.position()+n.length()+v.length()+1;
|
||||
byte b=buffer.get(pos);
|
||||
byte peek=buffer.get(pos);
|
||||
|
||||
if (b==HttpTokens.CARRIAGE_RETURN || b==HttpTokens.LINE_FEED)
|
||||
if (peek==HttpTokens.CARRIAGE_RETURN || peek==HttpTokens.LINE_FEED)
|
||||
{
|
||||
_field=field;
|
||||
_valueString=v;
|
||||
setState(FieldState.IN_VALUE);
|
||||
|
||||
if (b==HttpTokens.CARRIAGE_RETURN)
|
||||
if (peek==HttpTokens.CARRIAGE_RETURN)
|
||||
{
|
||||
_cr=true;
|
||||
buffer.position(pos+1);
|
||||
|
@ -1216,7 +1217,7 @@ public class HttpParser
|
|||
// New header
|
||||
setState(FieldState.IN_NAME);
|
||||
_string.setLength(0);
|
||||
_string.append((char)ch);
|
||||
_string.append((char)b);
|
||||
_length=1;
|
||||
|
||||
}
|
||||
|
@ -1224,7 +1225,7 @@ public class HttpParser
|
|||
break;
|
||||
|
||||
case IN_NAME:
|
||||
if (ch==HttpTokens.COLON)
|
||||
if (b==HttpTokens.COLON)
|
||||
{
|
||||
if (_headerString==null)
|
||||
{
|
||||
|
@ -1237,7 +1238,7 @@ public class HttpParser
|
|||
break;
|
||||
}
|
||||
|
||||
if (ch>HttpTokens.SPACE)
|
||||
if (b>HttpTokens.SPACE)
|
||||
{
|
||||
if (_header!=null)
|
||||
{
|
||||
|
@ -1246,13 +1247,13 @@ public class HttpParser
|
|||
_headerString=null;
|
||||
}
|
||||
|
||||
_string.append((char)ch);
|
||||
if (ch>HttpTokens.SPACE)
|
||||
_string.append((char)b);
|
||||
if (b>HttpTokens.SPACE)
|
||||
_length=_string.length();
|
||||
break;
|
||||
}
|
||||
|
||||
if (ch==HttpTokens.LINE_FEED && !complianceViolation(RFC7230,"name only header"))
|
||||
if (b==HttpTokens.LINE_FEED && !complianceViolation(RFC7230,"name only header"))
|
||||
{
|
||||
if (_headerString==null)
|
||||
{
|
||||
|
@ -1268,21 +1269,21 @@ public class HttpParser
|
|||
break;
|
||||
}
|
||||
|
||||
throw new IllegalCharacterException(_state,ch,buffer);
|
||||
throw new IllegalCharacterException(_state,b,buffer);
|
||||
|
||||
case VALUE:
|
||||
if (ch>HttpTokens.SPACE || ch<0)
|
||||
if (b>HttpTokens.SPACE || b<0)
|
||||
{
|
||||
_string.append((char)(0xff&ch));
|
||||
_string.append((char)(0xff&b));
|
||||
_length=_string.length();
|
||||
setState(FieldState.IN_VALUE);
|
||||
break;
|
||||
}
|
||||
|
||||
if (ch==HttpTokens.SPACE || ch==HttpTokens.TAB)
|
||||
if (b==HttpTokens.SPACE || b==HttpTokens.TAB)
|
||||
break;
|
||||
|
||||
if (ch==HttpTokens.LINE_FEED)
|
||||
if (b==HttpTokens.LINE_FEED)
|
||||
{
|
||||
_value=null;
|
||||
_string.setLength(0);
|
||||
|
@ -1292,10 +1293,10 @@ public class HttpParser
|
|||
setState(FieldState.FIELD);
|
||||
break;
|
||||
}
|
||||
throw new IllegalCharacterException(_state,ch,buffer);
|
||||
throw new IllegalCharacterException(_state,b,buffer);
|
||||
|
||||
case IN_VALUE:
|
||||
if (ch>=HttpTokens.SPACE || ch<0 || ch==HttpTokens.TAB)
|
||||
if (b>=HttpTokens.SPACE || b<0 || b==HttpTokens.TAB)
|
||||
{
|
||||
if (_valueString!=null)
|
||||
{
|
||||
|
@ -1303,13 +1304,13 @@ public class HttpParser
|
|||
_valueString=null;
|
||||
_field=null;
|
||||
}
|
||||
_string.append((char)(0xff&ch));
|
||||
if (ch>HttpTokens.SPACE || ch<0)
|
||||
_string.append((char)(0xff&b));
|
||||
if (b>HttpTokens.SPACE || b<0)
|
||||
_length=_string.length();
|
||||
break;
|
||||
}
|
||||
|
||||
if (ch==HttpTokens.LINE_FEED)
|
||||
if (b==HttpTokens.LINE_FEED)
|
||||
{
|
||||
if (_length > 0)
|
||||
{
|
||||
|
@ -1321,7 +1322,7 @@ public class HttpParser
|
|||
break;
|
||||
}
|
||||
|
||||
throw new IllegalCharacterException(_state,ch,buffer);
|
||||
throw new IllegalCharacterException(_state,b,buffer);
|
||||
|
||||
default:
|
||||
throw new IllegalStateException(_state.toString());
|
||||
|
|
|
@ -21,7 +21,9 @@ package org.eclipse.jetty.http;
|
|||
import java.nio.ByteBuffer;
|
||||
import java.util.Collections;
|
||||
import java.util.Enumeration;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
import java.util.Locale;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
|
@ -31,6 +33,7 @@ import org.junit.Assert;
|
|||
import org.junit.Test;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNull;
|
||||
|
@ -628,4 +631,68 @@ public class HttpFieldsTest
|
|||
|
||||
assertFalse(header.containsKey("n11"));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testIteration() throws Exception
|
||||
{
|
||||
HttpFields header = new HttpFields();
|
||||
Iterator<HttpField> i = header.iterator();
|
||||
assertThat(i.hasNext(),is(false));
|
||||
|
||||
header.put("name1", "valueA");
|
||||
header.put("name2", "valueB");
|
||||
header.add("name3", "valueC");
|
||||
|
||||
i = header.iterator();
|
||||
assertThat(i.hasNext(),is(true));
|
||||
assertThat(i.next().getName(),is("name1"));
|
||||
assertThat(i.next().getName(),is("name2"));
|
||||
i.remove();
|
||||
assertThat(i.next().getName(),is("name3"));
|
||||
assertThat(i.hasNext(),is(false));
|
||||
|
||||
i = header.iterator();
|
||||
assertThat(i.hasNext(),is(true));
|
||||
assertThat(i.next().getName(),is("name1"));
|
||||
assertThat(i.next().getName(),is("name3"));
|
||||
assertThat(i.hasNext(),is(false));
|
||||
|
||||
|
||||
ListIterator<HttpField> l = header.listIterator();
|
||||
assertThat(l.hasNext(),is(true));
|
||||
l.add(new HttpField("name0","value"));
|
||||
assertThat(l.hasNext(),is(true));
|
||||
assertThat(l.next().getName(),is("name1"));
|
||||
l.set(new HttpField("NAME1","value"));
|
||||
assertThat(l.hasNext(),is(true));
|
||||
assertThat(l.hasPrevious(),is(true));
|
||||
assertThat(l.previous().getName(),is("NAME1"));
|
||||
assertThat(l.hasNext(),is(true));
|
||||
assertThat(l.hasPrevious(),is(true));
|
||||
assertThat(l.previous().getName(),is("name0"));
|
||||
assertThat(l.hasNext(),is(true));
|
||||
assertThat(l.hasPrevious(),is(false));
|
||||
assertThat(l.next().getName(),is("name0"));
|
||||
assertThat(l.hasNext(),is(true));
|
||||
assertThat(l.hasPrevious(),is(true));
|
||||
assertThat(l.next().getName(),is("NAME1"));
|
||||
l.add(new HttpField("name2","value"));
|
||||
assertThat(l.next().getName(),is("name3"));
|
||||
assertThat(l.hasNext(),is(false));
|
||||
assertThat(l.hasPrevious(),is(true));
|
||||
l.add(new HttpField("name4","value"));
|
||||
assertThat(l.hasNext(),is(false));
|
||||
assertThat(l.hasPrevious(),is(true));
|
||||
assertThat(l.previous().getName(),is("name4"));
|
||||
|
||||
i = header.iterator();
|
||||
assertThat(i.hasNext(),is(true));
|
||||
assertThat(i.next().getName(),is("name0"));
|
||||
assertThat(i.next().getName(),is("NAME1"));
|
||||
assertThat(i.next().getName(),is("name2"));
|
||||
assertThat(i.next().getName(),is("name3"));
|
||||
assertThat(i.next().getName(),is("name4"));
|
||||
assertThat(i.hasNext(),is(false));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,6 +79,47 @@ public class HttpGeneratorClientTest
|
|||
Assert.assertThat(out, Matchers.not(Matchers.containsString("Content-Length")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEmptyHeaders() throws Exception
|
||||
{
|
||||
ByteBuffer header=BufferUtil.allocate(2048);
|
||||
HttpGenerator gen = new HttpGenerator();
|
||||
|
||||
HttpGenerator.Result
|
||||
result=gen.generateRequest(null,null,null,null, true);
|
||||
Assert.assertEquals(HttpGenerator.Result.NEED_INFO, result);
|
||||
Assert.assertEquals(HttpGenerator.State.START, gen.getState());
|
||||
|
||||
Info info = new Info("GET","/index.html");
|
||||
info.getFields().add("Host","something");
|
||||
info.getFields().add("Null",null);
|
||||
info.getFields().add("Empty","");
|
||||
Assert.assertTrue(!gen.isChunking());
|
||||
|
||||
result=gen.generateRequest(info,null,null,null, true);
|
||||
Assert.assertEquals(HttpGenerator.Result.NEED_HEADER, result);
|
||||
Assert.assertEquals(HttpGenerator.State.START, gen.getState());
|
||||
|
||||
result=gen.generateRequest(info,header,null,null, true);
|
||||
Assert.assertEquals(HttpGenerator.Result.FLUSH, result);
|
||||
Assert.assertEquals(HttpGenerator.State.COMPLETING, gen.getState());
|
||||
Assert.assertTrue(!gen.isChunking());
|
||||
String out = BufferUtil.toString(header);
|
||||
BufferUtil.clear(header);
|
||||
|
||||
result=gen.generateResponse(null,false,null,null, null, false);
|
||||
Assert.assertEquals(HttpGenerator.Result.DONE, result);
|
||||
Assert.assertEquals(HttpGenerator.State.END, gen.getState());
|
||||
Assert.assertTrue(!gen.isChunking());
|
||||
|
||||
Assert.assertEquals(0, gen.getContentPrepared());
|
||||
Assert.assertThat(out, Matchers.containsString("GET /index.html HTTP/1.1"));
|
||||
Assert.assertThat(out, Matchers.not(Matchers.containsString("Content-Length")));
|
||||
Assert.assertThat(out, Matchers.containsString("Empty:"));
|
||||
Assert.assertThat(out, Matchers.not(Matchers.containsString("Null:")));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testPOSTRequestNoContent() throws Exception
|
||||
{
|
||||
|
|
|
@ -155,7 +155,7 @@ public class HttpGeneratorServerTest
|
|||
assertEquals(HttpGenerator.Result.NEED_INFO, result);
|
||||
assertEquals(HttpGenerator.State.START, gen.getState());
|
||||
|
||||
MetaData.Response info = new MetaData.Response(HttpVersion.HTTP_1_1, 200, null, new HttpFields(), 10);
|
||||
MetaData.Response info = new MetaData.Response(HttpVersion.HTTP_1_1, 200, "ØÆ", new HttpFields(), 10);
|
||||
info.getFields().add("Content-Type", "test/data;\r\nextra=value");
|
||||
info.getFields().add("Last-Modified", DateGenerator.__01Jan1970);
|
||||
|
||||
|
@ -176,7 +176,7 @@ public class HttpGeneratorServerTest
|
|||
|
||||
assertEquals(10, gen.getContentPrepared());
|
||||
|
||||
assertThat(response, containsString("HTTP/1.1 200 OK"));
|
||||
assertThat(response, containsString("HTTP/1.1 200 ØÆ"));
|
||||
assertThat(response, containsString("Last-Modified: Thu, 01 Jan 1970 00:00:00 GMT"));
|
||||
assertThat(response, containsString("Content-Type: test/data; extra=value"));
|
||||
assertThat(response, containsString("Content-Length: 10"));
|
||||
|
|
|
@ -573,7 +573,9 @@ public class HttpParserTest
|
|||
BufferUtil.put(BufferUtil.toBuffer(" HTTP/1.0\r\n"), buffer);
|
||||
BufferUtil.put(BufferUtil.toBuffer("Header1: "), buffer);
|
||||
buffer.put("\u00e6 \u00e6".getBytes(StandardCharsets.ISO_8859_1));
|
||||
BufferUtil.put(BufferUtil.toBuffer(" \r\n\r\n"), buffer);
|
||||
BufferUtil.put(BufferUtil.toBuffer(" \r\nHeader2: "), buffer);
|
||||
buffer.put((byte)-1);
|
||||
BufferUtil.put(BufferUtil.toBuffer("\r\n\r\n"), buffer);
|
||||
BufferUtil.flipToFlush(buffer, 0);
|
||||
|
||||
HttpParser.RequestHandler handler = new Handler();
|
||||
|
@ -585,7 +587,9 @@ public class HttpParserTest
|
|||
Assert.assertEquals("HTTP/1.0", _versionOrReason);
|
||||
Assert.assertEquals("Header1", _hdr[0]);
|
||||
Assert.assertEquals("\u00e6 \u00e6", _val[0]);
|
||||
Assert.assertEquals(0, _headers);
|
||||
Assert.assertEquals("Header2", _hdr[1]);
|
||||
Assert.assertEquals(""+(char)255, _val[1]);
|
||||
Assert.assertEquals(1, _headers);
|
||||
Assert.assertEquals(null, _bad);
|
||||
}
|
||||
|
||||
|
@ -1304,6 +1308,22 @@ public class HttpParserTest
|
|||
Assert.assertTrue(_messageCompleted);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResponseReasonIso8859_1() throws Exception
|
||||
{
|
||||
ByteBuffer buffer = BufferUtil.toBuffer(
|
||||
"HTTP/1.1 302 déplacé temporairement\r\n"
|
||||
+ "Content-Length: 0\r\n"
|
||||
+ "\r\n",StandardCharsets.ISO_8859_1);
|
||||
|
||||
HttpParser.ResponseHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler);
|
||||
parser.parseNext(buffer);
|
||||
Assert.assertEquals("HTTP/1.1", _methodOrVersion);
|
||||
Assert.assertEquals("302", _uriOrStatus);
|
||||
Assert.assertEquals("déplacé temporairement", _versionOrReason);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSeekEOF() throws Exception
|
||||
{
|
||||
|
|
|
@ -47,7 +47,6 @@ public class HTTP2ClientConnectionFactory implements ClientConnectionFactory
|
|||
public static final String CLIENT_CONTEXT_KEY = "http2.client";
|
||||
public static final String BYTE_BUFFER_POOL_CONTEXT_KEY = "http2.client.byteBufferPool";
|
||||
public static final String EXECUTOR_CONTEXT_KEY = "http2.client.executor";
|
||||
public static final String PREALLOCATED_EXECUTOR_CONTEXT_KEY = "http2.client.preallocatedExecutor";
|
||||
public static final String SCHEDULER_CONTEXT_KEY = "http2.client.scheduler";
|
||||
public static final String SESSION_LISTENER_CONTEXT_KEY = "http2.client.sessionListener";
|
||||
public static final String SESSION_PROMISE_CONTEXT_KEY = "http2.client.sessionPromise";
|
||||
|
@ -60,7 +59,6 @@ public class HTTP2ClientConnectionFactory implements ClientConnectionFactory
|
|||
HTTP2Client client = (HTTP2Client)context.get(CLIENT_CONTEXT_KEY);
|
||||
ByteBufferPool byteBufferPool = (ByteBufferPool)context.get(BYTE_BUFFER_POOL_CONTEXT_KEY);
|
||||
Executor executor = (Executor)context.get(EXECUTOR_CONTEXT_KEY);
|
||||
ReservedThreadExecutor preallocatedExecutor = (ReservedThreadExecutor)context.get(PREALLOCATED_EXECUTOR_CONTEXT_KEY);
|
||||
Scheduler scheduler = (Scheduler)context.get(SCHEDULER_CONTEXT_KEY);
|
||||
Session.Listener listener = (Session.Listener)context.get(SESSION_LISTENER_CONTEXT_KEY);
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -71,37 +69,29 @@ public class HTTP2ClientConnectionFactory implements ClientConnectionFactory
|
|||
HTTP2ClientSession session = new HTTP2ClientSession(scheduler, endPoint, generator, listener, flowControl);
|
||||
Parser parser = new Parser(byteBufferPool, session, 4096, 8192);
|
||||
|
||||
if (preallocatedExecutor==null)
|
||||
{
|
||||
// TODO move this to non lazy construction
|
||||
preallocatedExecutor=client.getBean(ReservedThreadExecutor.class);
|
||||
if (preallocatedExecutor==null)
|
||||
{
|
||||
synchronized (this)
|
||||
{
|
||||
if (preallocatedExecutor==null)
|
||||
{
|
||||
try
|
||||
{
|
||||
preallocatedExecutor = new ReservedThreadExecutor(executor,1); // TODO configure size
|
||||
preallocatedExecutor.start();
|
||||
client.addBean(preallocatedExecutor,true);
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HTTP2ClientConnection connection = new HTTP2ClientConnection(client, byteBufferPool, preallocatedExecutor, endPoint,
|
||||
ReservedThreadExecutor reservedExecutor = provideReservedThreadExecutor(client, executor);
|
||||
|
||||
HTTP2ClientConnection connection = new HTTP2ClientConnection(client, byteBufferPool, reservedExecutor, endPoint,
|
||||
parser, session, client.getInputBufferSize(), promise, listener);
|
||||
connection.addListener(connectionListener);
|
||||
return customize(connection, context);
|
||||
}
|
||||
|
||||
protected ReservedThreadExecutor provideReservedThreadExecutor(HTTP2Client client, Executor executor)
|
||||
{
|
||||
synchronized (this)
|
||||
{
|
||||
ReservedThreadExecutor reservedExecutor = client.getBean(ReservedThreadExecutor.class);
|
||||
if (reservedExecutor == null)
|
||||
{
|
||||
// TODO: see HTTP2Connection.FillableCallback
|
||||
reservedExecutor = new ReservedThreadExecutor(executor, 0);
|
||||
client.addManaged(reservedExecutor);
|
||||
}
|
||||
return reservedExecutor;
|
||||
}
|
||||
}
|
||||
|
||||
private class HTTP2ClientConnection extends HTTP2Connection implements Callback
|
||||
{
|
||||
private final HTTP2Client client;
|
||||
|
|
|
@ -58,7 +58,6 @@ public class HTTP2Connection extends AbstractConnection
|
|||
this.session = session;
|
||||
this.bufferSize = bufferSize;
|
||||
this.strategy = new EatWhatYouKill(producer, executor.getExecutor(), executor);
|
||||
|
||||
LifeCycle.start(strategy);
|
||||
}
|
||||
|
||||
|
@ -274,6 +273,8 @@ public class HTTP2Connection extends AbstractConnection
|
|||
@Override
|
||||
public InvocationType getInvocationType()
|
||||
{
|
||||
// TODO: see also AbstractHTTP2ServerConnectionFactory.reservedThreads.
|
||||
// TODO: it's non blocking here because reservedThreads=0.
|
||||
return InvocationType.NON_BLOCKING;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -154,6 +154,7 @@ public abstract class AbstractHTTP2ServerConnectionFactory extends AbstractConne
|
|||
|
||||
public void setReservedThreads(int threads)
|
||||
{
|
||||
// TODO: see also HTTP2Connection.FillableCallback.
|
||||
// TODO: currently disabled since the only value that works is 0.
|
||||
// this.reservedThreads = threads;
|
||||
}
|
||||
|
@ -182,29 +183,8 @@ public abstract class AbstractHTTP2ServerConnectionFactory extends AbstractConne
|
|||
streamIdleTimeout = endPoint.getIdleTimeout();
|
||||
session.setStreamIdleTimeout(streamIdleTimeout);
|
||||
session.setInitialSessionRecvWindow(getInitialSessionRecvWindow());
|
||||
|
||||
ReservedThreadExecutor executor = connector.getBean(ReservedThreadExecutor.class);
|
||||
if (executor==null)
|
||||
{
|
||||
synchronized (this)
|
||||
{
|
||||
executor = connector.getBean(ReservedThreadExecutor.class);
|
||||
if (executor==null)
|
||||
{
|
||||
|
||||
try
|
||||
{
|
||||
executor = new ReservedThreadExecutor(connector.getExecutor(), getReservedThreads());
|
||||
executor.start();
|
||||
connector.addBean(executor,true);
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ReservedThreadExecutor executor = provideReservedThreadExecutor(connector);
|
||||
|
||||
ServerParser parser = newServerParser(connector, session);
|
||||
HTTP2Connection connection = new HTTP2ServerConnection(connector.getByteBufferPool(), executor,
|
||||
|
@ -213,6 +193,20 @@ public abstract class AbstractHTTP2ServerConnectionFactory extends AbstractConne
|
|||
return configure(connection, connector, endPoint);
|
||||
}
|
||||
|
||||
protected ReservedThreadExecutor provideReservedThreadExecutor(Connector connector)
|
||||
{
|
||||
synchronized (this)
|
||||
{
|
||||
ReservedThreadExecutor executor = getBean(ReservedThreadExecutor.class);
|
||||
if (executor == null)
|
||||
{
|
||||
executor = new ReservedThreadExecutor(connector.getExecutor(), getReservedThreads());
|
||||
addManaged(executor);
|
||||
}
|
||||
return executor;
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract ServerSessionListener newSessionListener(Connector connector, EndPoint endPoint);
|
||||
|
||||
protected ServerParser newServerParser(Connector connector, ServerParser.Listener listener)
|
||||
|
|
|
@ -79,6 +79,11 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
setStopTimeout(5000);
|
||||
}
|
||||
|
||||
public Selector getSelector()
|
||||
{
|
||||
return _selector;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() throws Exception
|
||||
{
|
||||
|
@ -140,6 +145,116 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
selector.wakeup();
|
||||
}
|
||||
|
||||
private Runnable processConnect(SelectionKey key, final Connect connect)
|
||||
{
|
||||
SelectableChannel channel = key.channel();
|
||||
try
|
||||
{
|
||||
key.attach(connect.attachment);
|
||||
boolean connected = _selectorManager.doFinishConnect(channel);
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Connected {} {}", connected, channel);
|
||||
if (connected)
|
||||
{
|
||||
if (connect.timeout.cancel())
|
||||
{
|
||||
key.interestOps(0);
|
||||
return new CreateEndPoint(channel, key)
|
||||
{
|
||||
@Override
|
||||
protected void failed(Throwable failure)
|
||||
{
|
||||
super.failed(failure);
|
||||
connect.failed(failure);
|
||||
}
|
||||
};
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new SocketTimeoutException("Concurrent Connect Timeout");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new ConnectException();
|
||||
}
|
||||
}
|
||||
catch (Throwable x)
|
||||
{
|
||||
connect.failed(x);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private void closeNoExceptions(Closeable closeable)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (closeable != null)
|
||||
closeable.close();
|
||||
}
|
||||
catch (Throwable x)
|
||||
{
|
||||
LOG.ignore(x);
|
||||
}
|
||||
}
|
||||
|
||||
private EndPoint createEndPoint(SelectableChannel channel, SelectionKey selectionKey) throws IOException
|
||||
{
|
||||
EndPoint endPoint = _selectorManager.newEndPoint(channel, this, selectionKey);
|
||||
endPoint.onOpen();
|
||||
_selectorManager.endPointOpened(endPoint);
|
||||
Connection connection = _selectorManager.newConnection(channel, endPoint, selectionKey.attachment());
|
||||
endPoint.setConnection(connection);
|
||||
selectionKey.attach(endPoint);
|
||||
_selectorManager.connectionOpened(connection);
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Created {}", endPoint);
|
||||
return endPoint;
|
||||
}
|
||||
|
||||
public void destroyEndPoint(final EndPoint endPoint)
|
||||
{
|
||||
submit(new DestroyEndPoint(endPoint));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String dump()
|
||||
{
|
||||
super.dump();
|
||||
return ContainerLifeCycle.dump(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dump(Appendable out, String indent) throws IOException
|
||||
{
|
||||
Selector selector = _selector;
|
||||
if (selector == null || !selector.isOpen())
|
||||
dumpBeans(out, indent);
|
||||
else
|
||||
{
|
||||
final ArrayList<Object> dump = new ArrayList<>(selector.keys().size() * 2);
|
||||
DumpKeys dumpKeys = new DumpKeys(dump);
|
||||
submit(dumpKeys);
|
||||
dumpKeys.await(5, TimeUnit.SECONDS);
|
||||
if (dump.isEmpty())
|
||||
dumpBeans(out, indent);
|
||||
else
|
||||
dumpBeans(out, indent, dump);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
Selector selector = _selector;
|
||||
return String.format("%s id=%s keys=%d selected=%d",
|
||||
super.toString(),
|
||||
_id,
|
||||
selector != null && selector.isOpen() ? selector.keys().size() : -1,
|
||||
selector != null && selector.isOpen() ? selector.selectedKeys().size() : -1);
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@link Selectable} is an {@link EndPoint} that wish to be
|
||||
* notified of non-blocking events by the {@link ManagedSelector}.
|
||||
|
@ -161,7 +276,6 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
void updateKey();
|
||||
}
|
||||
|
||||
|
||||
private class SelectorProducer implements ExecutionStrategy.Producer
|
||||
{
|
||||
private Set<SelectionKey> _keys = Collections.emptySet();
|
||||
|
@ -331,7 +445,7 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
return String.format("%s@%x", getClass().getSimpleName(), hashCode());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private abstract static class NonBlockingAction implements Runnable, Invocable
|
||||
{
|
||||
@Override
|
||||
|
@ -341,124 +455,6 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
}
|
||||
}
|
||||
|
||||
private Runnable processConnect(SelectionKey key, final Connect connect)
|
||||
{
|
||||
SelectableChannel channel = key.channel();
|
||||
try
|
||||
{
|
||||
key.attach(connect.attachment);
|
||||
boolean connected = _selectorManager.doFinishConnect(channel);
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Connected {} {}", connected, channel);
|
||||
if (connected)
|
||||
{
|
||||
if (connect.timeout.cancel())
|
||||
{
|
||||
key.interestOps(0);
|
||||
return new CreateEndPoint(channel, key)
|
||||
{
|
||||
@Override
|
||||
protected void failed(Throwable failure)
|
||||
{
|
||||
super.failed(failure);
|
||||
connect.failed(failure);
|
||||
}
|
||||
};
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new SocketTimeoutException("Concurrent Connect Timeout");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
throw new ConnectException();
|
||||
}
|
||||
}
|
||||
catch (Throwable x)
|
||||
{
|
||||
connect.failed(x);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private void closeNoExceptions(Closeable closeable)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (closeable != null)
|
||||
closeable.close();
|
||||
}
|
||||
catch (Throwable x)
|
||||
{
|
||||
LOG.ignore(x);
|
||||
}
|
||||
}
|
||||
|
||||
private EndPoint createEndPoint(SelectableChannel channel, SelectionKey selectionKey) throws IOException
|
||||
{
|
||||
EndPoint endPoint = _selectorManager.newEndPoint(channel, this, selectionKey);
|
||||
endPoint.onOpen();
|
||||
_selectorManager.endPointOpened(endPoint);
|
||||
Connection connection = _selectorManager.newConnection(channel, endPoint, selectionKey.attachment());
|
||||
endPoint.setConnection(connection);
|
||||
selectionKey.attach(endPoint);
|
||||
_selectorManager.connectionOpened(connection);
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Created {}", endPoint);
|
||||
return endPoint;
|
||||
}
|
||||
|
||||
public void destroyEndPoint(final EndPoint endPoint)
|
||||
{
|
||||
final Connection connection = endPoint.getConnection();
|
||||
submit(() ->
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Destroyed {}", endPoint);
|
||||
if (connection != null)
|
||||
_selectorManager.connectionClosed(connection);
|
||||
_selectorManager.endPointClosed(endPoint);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public String dump()
|
||||
{
|
||||
super.dump();
|
||||
return ContainerLifeCycle.dump(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dump(Appendable out, String indent) throws IOException
|
||||
{
|
||||
Selector selector = _selector;
|
||||
if (selector == null || !selector.isOpen())
|
||||
dumpBeans(out, indent);
|
||||
else
|
||||
{
|
||||
final ArrayList<Object> dump = new ArrayList<>(selector.keys().size() * 2);
|
||||
DumpKeys dumpKeys = new DumpKeys(dump);
|
||||
submit(dumpKeys);
|
||||
dumpKeys.await(5, TimeUnit.SECONDS);
|
||||
if (dump.isEmpty())
|
||||
dumpBeans(out, indent);
|
||||
else
|
||||
dumpBeans(out, indent, dump);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
Selector selector = _selector;
|
||||
return String.format("%s id=%s keys=%d selected=%d",
|
||||
super.toString(),
|
||||
_id,
|
||||
selector != null && selector.isOpen() ? selector.keys().size() : -1,
|
||||
selector != null && selector.isOpen() ? selector.selectedKeys().size() : -1);
|
||||
}
|
||||
|
||||
private class DumpKeys implements Runnable
|
||||
{
|
||||
private final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
@ -523,8 +519,7 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
if (_key==null)
|
||||
{
|
||||
_key = _channel.register(_selector, SelectionKey.OP_ACCEPT, this);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("{} acceptor={}", this, _key);
|
||||
|
@ -609,7 +604,7 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
}
|
||||
}
|
||||
|
||||
private class CreateEndPoint implements Runnable, Closeable
|
||||
private class CreateEndPoint extends NonBlockingAction implements Closeable
|
||||
{
|
||||
private final SelectableChannel channel;
|
||||
private final SelectionKey key;
|
||||
|
@ -800,8 +795,24 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
}
|
||||
}
|
||||
|
||||
public Selector getSelector()
|
||||
private class DestroyEndPoint extends NonBlockingAction
|
||||
{
|
||||
return _selector;
|
||||
private final EndPoint endPoint;
|
||||
|
||||
public DestroyEndPoint(EndPoint endPoint)
|
||||
{
|
||||
this.endPoint = endPoint;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Destroyed {}", endPoint);
|
||||
Connection connection = endPoint.getConnection();
|
||||
if (connection != null)
|
||||
_selectorManager.connectionClosed(connection);
|
||||
_selectorManager.endPointClosed(endPoint);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1168,7 +1168,7 @@ public class SslConnection extends AbstractConnection
|
|||
@Override
|
||||
public boolean isInputShutdown()
|
||||
{
|
||||
return _sslEngine.isInboundDone();
|
||||
return getEndPoint().isInputShutdown() || _sslEngine.isInboundDone();
|
||||
}
|
||||
|
||||
private void notifyHandshakeSucceeded(SSLEngine sslEngine)
|
||||
|
|
|
@ -178,11 +178,11 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
|
||||
private DirContext _rootContext;
|
||||
|
||||
|
||||
|
||||
public class LDAPUserInfo extends UserInfo
|
||||
{
|
||||
Attributes attributes;
|
||||
|
||||
Attributes attributes;
|
||||
|
||||
/**
|
||||
* @param userName
|
||||
* @param credential
|
||||
|
@ -198,10 +198,10 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
{
|
||||
return getUserRoles(_rootContext, getUserName(), attributes);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* get the available information about the user
|
||||
* <p>
|
||||
|
@ -216,7 +216,7 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
*/
|
||||
public UserInfo getUserInfo(String username) throws Exception
|
||||
{
|
||||
Attributes attributes = getUserAttributes(username);
|
||||
Attributes attributes = getUserAttributes(username);
|
||||
String pwdCredential = getUserCredentials(attributes);
|
||||
|
||||
if (pwdCredential == null)
|
||||
|
@ -271,30 +271,32 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
*/
|
||||
private Attributes getUserAttributes(String username) throws LoginException
|
||||
{
|
||||
Attributes attributes = null;
|
||||
Attributes attributes = null;
|
||||
|
||||
SearchResult result;
|
||||
try {
|
||||
result = findUser(username);
|
||||
attributes = result.getAttributes();
|
||||
}
|
||||
catch (NamingException e) {
|
||||
SearchResult result;
|
||||
try
|
||||
{
|
||||
result = findUser(username);
|
||||
attributes = result.getAttributes();
|
||||
}
|
||||
catch (NamingException e)
|
||||
{
|
||||
throw new LoginException("Root context binding failure.");
|
||||
}
|
||||
|
||||
return attributes;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return attributes;
|
||||
}
|
||||
|
||||
private String getUserCredentials(Attributes attributes) throws LoginException
|
||||
{
|
||||
String ldapCredential = null;
|
||||
String ldapCredential = null;
|
||||
|
||||
Attribute attribute = attributes.get(_userPasswordAttribute);
|
||||
if (attribute != null)
|
||||
{
|
||||
try
|
||||
{
|
||||
byte[] value = (byte[]) attribute.get();
|
||||
byte[] value = (byte[])attribute.get();
|
||||
|
||||
ldapCredential = new String(value);
|
||||
}
|
||||
|
@ -323,16 +325,16 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
{
|
||||
String rdnValue = username;
|
||||
Attribute attribute = attributes.get(_userRdnAttribute);
|
||||
if (attribute != null)
|
||||
{
|
||||
try
|
||||
{
|
||||
rdnValue = (String) attribute.get(); // switch to the value stored in the _userRdnAttribute if we can
|
||||
}
|
||||
catch (NamingException e)
|
||||
{
|
||||
}
|
||||
}
|
||||
if (attribute != null)
|
||||
{
|
||||
try
|
||||
{
|
||||
rdnValue = (String)attribute.get(); // switch to the value stored in the _userRdnAttribute if we can
|
||||
}
|
||||
catch (NamingException e)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
String userDn = _userRdnAttribute + "=" + rdnValue + "," + _userBaseDn;
|
||||
|
||||
|
@ -361,7 +363,7 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
|
||||
while (results.hasMoreElements())
|
||||
{
|
||||
SearchResult result = (SearchResult) results.nextElement();
|
||||
SearchResult result = (SearchResult)results.nextElement();
|
||||
|
||||
Attributes attributes = result.getAttributes();
|
||||
|
||||
|
@ -410,8 +412,8 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
Callback[] callbacks = configureCallbacks();
|
||||
getCallbackHandler().handle(callbacks);
|
||||
|
||||
String webUserName = ((NameCallback) callbacks[0]).getName();
|
||||
Object webCredential = ((ObjectCallback) callbacks[1]).getObject();
|
||||
String webUserName = ((NameCallback)callbacks[0]).getName();
|
||||
Object webCredential = ((ObjectCallback)callbacks[1]).getObject();
|
||||
|
||||
if (webUserName == null || webCredential == null)
|
||||
{
|
||||
|
@ -424,8 +426,7 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
if (_forceBindingLogin)
|
||||
{
|
||||
authed = bindingLogin(webUserName, webCredential);
|
||||
}
|
||||
else
|
||||
} else
|
||||
{
|
||||
// This sets read and the credential
|
||||
UserInfo userInfo = getUserInfo(webUserName);
|
||||
|
@ -439,7 +440,7 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
setCurrentUser(new JAASUserInfo(userInfo));
|
||||
|
||||
if (webCredential instanceof String)
|
||||
authed = credentialLogin(Credential.getCredential((String) webCredential));
|
||||
authed = credentialLogin(Credential.getCredential((String)webCredential));
|
||||
else
|
||||
authed = credentialLogin(webCredential);
|
||||
}
|
||||
|
@ -494,7 +495,7 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
* @param username the user name
|
||||
* @param password the password
|
||||
* @return true always
|
||||
* @throws LoginException if unable to bind the login
|
||||
* @throws LoginException if unable to bind the login
|
||||
* @throws NamingException if failure to bind login
|
||||
*/
|
||||
public boolean bindingLogin(String username, Object password) throws LoginException, NamingException
|
||||
|
@ -505,15 +506,15 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
|
||||
LOG.info("Attempting authentication: " + userDn);
|
||||
|
||||
Hashtable<Object,Object> environment = getEnvironment();
|
||||
Hashtable<Object, Object> environment = getEnvironment();
|
||||
|
||||
if ( userDn == null || "".equals(userDn) )
|
||||
if (userDn == null || "".equals(userDn))
|
||||
{
|
||||
throw new NamingException("username may not be empty");
|
||||
}
|
||||
environment.put(Context.SECURITY_PRINCIPAL, userDn);
|
||||
// RFC 4513 section 6.3.1, protect against ldap server implementations that allow successful binding on empty passwords
|
||||
if ( password == null || "".equals(password))
|
||||
if (password == null || "".equals(password))
|
||||
{
|
||||
throw new NamingException("password may not be empty");
|
||||
}
|
||||
|
@ -542,9 +543,9 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
LOG.debug("Searching for user " + username + " with filter: \'" + filter + "\'" + " from base dn: " + _userBaseDn);
|
||||
|
||||
Object[] filterArguments = new Object[]{
|
||||
_userObjectClass,
|
||||
_userIdAttribute,
|
||||
username
|
||||
_userObjectClass,
|
||||
_userIdAttribute,
|
||||
username
|
||||
};
|
||||
NamingEnumeration<SearchResult> results = _rootContext.search(_userBaseDn, filter, filterArguments, ctls);
|
||||
|
||||
|
@ -556,7 +557,7 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
throw new LoginException("User not found.");
|
||||
}
|
||||
|
||||
return (SearchResult) results.nextElement();
|
||||
return (SearchResult)results.nextElement();
|
||||
}
|
||||
|
||||
|
||||
|
@ -565,37 +566,37 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
* <p>
|
||||
* Called once by JAAS after new instance is created.
|
||||
*
|
||||
* @param subject the subect
|
||||
* @param subject the subect
|
||||
* @param callbackHandler the callback handler
|
||||
* @param sharedState the shared state map
|
||||
* @param options the option map
|
||||
* @param sharedState the shared state map
|
||||
* @param options the option map
|
||||
*/
|
||||
public void initialize(Subject subject,
|
||||
CallbackHandler callbackHandler,
|
||||
Map<String,?> sharedState,
|
||||
Map<String,?> options)
|
||||
Map<String, ?> sharedState,
|
||||
Map<String, ?> options)
|
||||
{
|
||||
super.initialize(subject, callbackHandler, sharedState, options);
|
||||
|
||||
_hostname = (String) options.get("hostname");
|
||||
_port = Integer.parseInt((String) options.get("port"));
|
||||
_contextFactory = (String) options.get("contextFactory");
|
||||
_bindDn = (String) options.get("bindDn");
|
||||
_bindPassword = (String) options.get("bindPassword");
|
||||
_authenticationMethod = (String) options.get("authenticationMethod");
|
||||
_hostname = (String)options.get("hostname");
|
||||
_port = Integer.parseInt((String)options.get("port"));
|
||||
_contextFactory = (String)options.get("contextFactory");
|
||||
_bindDn = (String)options.get("bindDn");
|
||||
_bindPassword = (String)options.get("bindPassword");
|
||||
_authenticationMethod = (String)options.get("authenticationMethod");
|
||||
|
||||
_userBaseDn = (String) options.get("userBaseDn");
|
||||
_userBaseDn = (String)options.get("userBaseDn");
|
||||
|
||||
_roleBaseDn = (String) options.get("roleBaseDn");
|
||||
_roleBaseDn = (String)options.get("roleBaseDn");
|
||||
|
||||
if (options.containsKey("forceBindingLogin"))
|
||||
{
|
||||
_forceBindingLogin = Boolean.parseBoolean((String) options.get("forceBindingLogin"));
|
||||
_forceBindingLogin = Boolean.parseBoolean((String)options.get("forceBindingLogin"));
|
||||
}
|
||||
|
||||
if (options.containsKey("useLdaps"))
|
||||
{
|
||||
_useLdaps = Boolean.parseBoolean((String) options.get("useLdaps"));
|
||||
_useLdaps = Boolean.parseBoolean((String)options.get("useLdaps"));
|
||||
}
|
||||
|
||||
_userObjectClass = getOption(options, "userObjectClass", _userObjectClass);
|
||||
|
@ -625,7 +626,7 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
}
|
||||
catch (NamingException e)
|
||||
{
|
||||
throw new LoginException( "error closing root context: " + e.getMessage() );
|
||||
throw new LoginException("error closing root context: " + e.getMessage());
|
||||
}
|
||||
|
||||
return super.commit();
|
||||
|
@ -639,13 +640,13 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
}
|
||||
catch (NamingException e)
|
||||
{
|
||||
throw new LoginException( "error closing root context: " + e.getMessage() );
|
||||
throw new LoginException("error closing root context: " + e.getMessage());
|
||||
}
|
||||
|
||||
return super.abort();
|
||||
}
|
||||
|
||||
private String getOption(Map<String,?> options, String key, String defaultValue)
|
||||
private String getOption(Map<String, ?> options, String key, String defaultValue)
|
||||
{
|
||||
Object value = options.get(key);
|
||||
|
||||
|
@ -654,7 +655,7 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
return defaultValue;
|
||||
}
|
||||
|
||||
return (String) value;
|
||||
return (String)value;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -670,7 +671,7 @@ public class LdapLoginModule extends AbstractLoginModule
|
|||
|
||||
if (_hostname != null)
|
||||
{
|
||||
env.put(Context.PROVIDER_URL, (_useLdaps?"ldaps://":"ldap://") + _hostname + (_port==0?"":":"+_port) +"/");
|
||||
env.put(Context.PROVIDER_URL, (_useLdaps ? "ldaps://" : "ldap://") + _hostname + (_port == 0 ? "" : ":" + _port) + "/");
|
||||
}
|
||||
|
||||
if (_authenticationMethod != null)
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.eclipse.jetty.jmx;
|
|||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -46,14 +47,11 @@ public class MBeanContainer implements Container.InheritedListener, Dumpable, De
|
|||
{
|
||||
private final static Logger LOG = Log.getLogger(MBeanContainer.class.getName());
|
||||
private final static ConcurrentMap<String, AtomicInteger> __unique = new ConcurrentHashMap<>();
|
||||
|
||||
public static void resetUnique()
|
||||
{
|
||||
__unique.clear();
|
||||
}
|
||||
private static final Container ROOT = new ContainerLifeCycle();
|
||||
|
||||
private final MBeanServer _mbeanServer;
|
||||
private final Map<Object, ObjectName> _beans = new ConcurrentHashMap<>();
|
||||
private final ConcurrentMap<Object, Container> _beans = new ConcurrentHashMap<>();
|
||||
private final ConcurrentMap<Object, ObjectName> _mbeans = new ConcurrentHashMap<>();
|
||||
private String _domain = null;
|
||||
|
||||
/**
|
||||
|
@ -64,7 +62,7 @@ public class MBeanContainer implements Container.InheritedListener, Dumpable, De
|
|||
*/
|
||||
public ObjectName findMBean(Object object)
|
||||
{
|
||||
return _beans.get(object);
|
||||
return _mbeans.get(object);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -75,7 +73,7 @@ public class MBeanContainer implements Container.InheritedListener, Dumpable, De
|
|||
*/
|
||||
public Object findBean(ObjectName objectName)
|
||||
{
|
||||
for (Map.Entry<Object, ObjectName> entry : _beans.entrySet())
|
||||
for (Map.Entry<Object, ObjectName> entry : _mbeans.entrySet())
|
||||
{
|
||||
if (entry.getValue().equals(objectName))
|
||||
return entry.getKey();
|
||||
|
@ -130,9 +128,19 @@ public class MBeanContainer implements Container.InheritedListener, Dumpable, De
|
|||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("beanAdded {}->{}", parent, obj);
|
||||
|
||||
if (obj == null)
|
||||
return;
|
||||
|
||||
if (parent == null)
|
||||
parent = ROOT;
|
||||
|
||||
// Is the bean already tracked ?
|
||||
if (_beans.putIfAbsent(obj, parent) != null)
|
||||
return;
|
||||
|
||||
// Is there an object name for the parent ?
|
||||
ObjectName parentObjectName = null;
|
||||
if (parent != null)
|
||||
if (parent != ROOT)
|
||||
{
|
||||
parentObjectName = findMBean(parent);
|
||||
if (parentObjectName == null)
|
||||
|
@ -143,10 +151,6 @@ public class MBeanContainer implements Container.InheritedListener, Dumpable, De
|
|||
}
|
||||
}
|
||||
|
||||
// Does the mbean already exist ?
|
||||
if (obj == null || _beans.containsKey(obj))
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
// Create an MBean for the object.
|
||||
|
@ -207,7 +211,7 @@ public class MBeanContainer implements Container.InheritedListener, Dumpable, De
|
|||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Registered {}", objectName);
|
||||
|
||||
_beans.put(obj, objectName);
|
||||
_mbeans.put(obj, objectName);
|
||||
}
|
||||
catch (Throwable x)
|
||||
{
|
||||
|
@ -219,12 +223,17 @@ public class MBeanContainer implements Container.InheritedListener, Dumpable, De
|
|||
public void beanRemoved(Container parent, Object obj)
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("beanRemoved {}", obj);
|
||||
LOG.debug("beanRemoved {}->{}", parent, obj);
|
||||
|
||||
ObjectName objectName = _beans.remove(obj);
|
||||
if (parent == null)
|
||||
parent = ROOT;
|
||||
|
||||
if (objectName != null)
|
||||
unregister(objectName);
|
||||
if (_beans.remove(obj, parent))
|
||||
{
|
||||
ObjectName objectName = _mbeans.remove(obj);
|
||||
if (objectName != null)
|
||||
unregister(objectName);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -248,7 +257,7 @@ public class MBeanContainer implements Container.InheritedListener, Dumpable, De
|
|||
public void dump(Appendable out, String indent) throws IOException
|
||||
{
|
||||
ContainerLifeCycle.dumpObject(out,this);
|
||||
ContainerLifeCycle.dump(out, indent, _beans.entrySet());
|
||||
ContainerLifeCycle.dump(out, indent, _mbeans.entrySet());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -260,9 +269,11 @@ public class MBeanContainer implements Container.InheritedListener, Dumpable, De
|
|||
@Override
|
||||
public void destroy()
|
||||
{
|
||||
_beans.values().stream()
|
||||
.filter(objectName -> objectName != null)
|
||||
_mbeans.values().stream()
|
||||
.filter(Objects::nonNull)
|
||||
.forEach(this::unregister);
|
||||
_mbeans.clear();
|
||||
_beans.clear();
|
||||
}
|
||||
|
||||
private void unregister(ObjectName objectName)
|
||||
|
|
|
@ -200,8 +200,8 @@ public class MBeanContainerTest
|
|||
setUpDestroy();
|
||||
|
||||
// when
|
||||
mbeanContainer.destroy();
|
||||
objectName = mbeanContainer.findMBean(managed);
|
||||
mbeanContainer.destroy();
|
||||
|
||||
// then
|
||||
Assert.assertFalse("Unregistered bean - managed", mbeanContainer.getMBeanServer().isRegistered(objectName));
|
||||
|
@ -212,9 +212,9 @@ public class MBeanContainerTest
|
|||
{
|
||||
// given
|
||||
setUpDestroy();
|
||||
objectName = mbeanContainer.findMBean(managed);
|
||||
|
||||
// when
|
||||
objectName = mbeanContainer.findMBean(managed);
|
||||
mbeanContainer.getMBeanServer().unregisterMBean(objectName);
|
||||
|
||||
// then
|
||||
|
@ -224,4 +224,34 @@ public class MBeanContainerTest
|
|||
// an exception of type InstanceNotFoundException occurs.
|
||||
mbeanContainer.destroy();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNonManagedLifecycleNotUnregistered() throws Exception
|
||||
{
|
||||
testNonManagedObjectNotUnregistered(new ContainerLifeCycle());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNonManagedPojoNotUnregistered() throws Exception
|
||||
{
|
||||
testNonManagedObjectNotUnregistered(new Object());
|
||||
}
|
||||
|
||||
private void testNonManagedObjectNotUnregistered(Object lifeCycle) throws Exception
|
||||
{
|
||||
ContainerLifeCycle parent = new ContainerLifeCycle();
|
||||
parent.addBean(mbeanContainer);
|
||||
|
||||
ContainerLifeCycle child = new ContainerLifeCycle();
|
||||
parent.addBean(child);
|
||||
|
||||
parent.addBean(lifeCycle, true);
|
||||
child.addBean(lifeCycle, false);
|
||||
|
||||
parent.start();
|
||||
|
||||
parent.removeBean(child);
|
||||
|
||||
Assert.assertNotNull(mbeanContainer.findMBean(lifeCycle));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
<Array type="java.net.InetSocketAddress">
|
||||
<Item>
|
||||
<New class="java.net.InetSocketAddress">
|
||||
<Arg><Property name="jetty.session.memcache.host"><Default><Env name="MEMCACHE_PORT_11211_TCP_ADDR" default="localhost"/></Default></Property></Arg>
|
||||
<Arg type="int"><Property name="jetty.session.memcache.port"><Default><Env name="MEMCACHE_PORT_11211_TCP_PORT" default="11211"/></Default></Property></Arg>
|
||||
<Arg><Property name="jetty.session.memcached.host"><Default><Env name="MEMCACHE_PORT_11211_TCP_ADDR" default="localhost"/></Default></Property></Arg>
|
||||
<Arg type="int"><Property name="jetty.session.memcached.port"><Default><Env name="MEMCACHE_PORT_11211_TCP_PORT" default="11211"/></Default></Property></Arg>
|
||||
</New>
|
||||
</Item>
|
||||
<!-- Add more here -->
|
||||
|
@ -20,7 +20,7 @@
|
|||
<Item type="int">100</Item>
|
||||
</Array>
|
||||
</Set>
|
||||
<Set name="expirySec"><Property name="jetty.session.memcache.expirySec" default="0"/></Set>
|
||||
<Set name="heartbeats"><Property name="jetty.session.memcache.heartbeats" default="true"/></Set>
|
||||
<Set name="expirySec"><Property name="jetty.session.memcached.expirySec" default="0"/></Set>
|
||||
<Set name="heartbeats"><Property name="jetty.session.memcached.heartbeats" default="true"/></Set>
|
||||
</New>
|
||||
</Configure>
|
||||
|
|
|
@ -30,11 +30,17 @@ import java.io.BufferedWriter;
|
|||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.FilePermission;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.Writer;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.nio.file.attribute.PosixFilePermissions;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -139,7 +145,7 @@ public class PropertyUserStoreTest
|
|||
}
|
||||
|
||||
}
|
||||
return "jar:file:" + usersJar.getCanonicalPath() + "!/" + entryPath;
|
||||
return "jar:" + usersJar.toURI().toASCIIString() + "!/" + entryPath;
|
||||
}
|
||||
|
||||
private void writeUser(File usersFile)
|
||||
|
@ -205,30 +211,47 @@ public class PropertyUserStoreTest
|
|||
userCount.awaitCount(3);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testPropertyUserStoreLoadUpdateUser() throws Exception
|
||||
{
|
||||
assumeThat("Skipping on OSX", OS.IS_OSX, is(false));
|
||||
final UserCount userCount = new UserCount();
|
||||
final File usersFile = initUsersText();
|
||||
|
||||
PropertyUserStore store = new PropertyUserStore();
|
||||
final AtomicInteger loadCount = new AtomicInteger(0);
|
||||
PropertyUserStore store = new PropertyUserStore()
|
||||
{
|
||||
@Override
|
||||
protected void loadUsers() throws IOException
|
||||
{
|
||||
loadCount.incrementAndGet();
|
||||
super.loadUsers();
|
||||
}
|
||||
};
|
||||
store.setHotReload(true);
|
||||
store.setConfigFile(usersFile);
|
||||
|
||||
store.registerUserListener(userCount);
|
||||
|
||||
store.start();
|
||||
|
||||
userCount.assertThatCount(is(3));
|
||||
|
||||
addAdditionalUser(usersFile,"skip: skip, roleA\n");
|
||||
|
||||
userCount.awaitCount(4);
|
||||
|
||||
assertThat("Failed to retrieve UserIdentity from PropertyUserStore directly", store.getUserIdentity("skip"), notNullValue());
|
||||
assertThat(loadCount.get(),is(1));
|
||||
|
||||
addAdditionalUser(usersFile,"skip: skip, roleA\n");
|
||||
userCount.awaitCount(4);
|
||||
assertThat(loadCount.get(),is(2));
|
||||
assertThat(store.getUserIdentity("skip"), notNullValue());
|
||||
userCount.assertThatCount(is(4));
|
||||
userCount.assertThatUsers(hasItem("skip"));
|
||||
|
||||
if (OS.IS_LINUX)
|
||||
Files.createFile(testdir.getPath().toRealPath().resolve("unrelated.txt"),
|
||||
PosixFilePermissions.asFileAttribute(EnumSet.noneOf(PosixFilePermission.class)));
|
||||
else
|
||||
Files.createFile(testdir.getPath().toRealPath().resolve("unrelated.txt"));
|
||||
|
||||
Thread.sleep(1100);
|
||||
assertThat(loadCount.get(),is(2));
|
||||
|
||||
userCount.assertThatCount(is(4));
|
||||
userCount.assertThatUsers(hasItem("skip"));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
<?xml version="1.0"?>
|
||||
<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure_9_3.dtd">
|
||||
|
||||
<Configure id="Server" class="org.eclipse.jetty.server.Server">
|
||||
<Call name="addBean">
|
||||
<Arg>
|
||||
<New class="org.eclipse.jetty.server.ConnectionLimit">
|
||||
<Arg type="int"><Property name="jetty.connection.limit" default="1000"/></Arg>
|
||||
<Arg><Ref refid="Server"/></Arg>
|
||||
</New>
|
||||
</Arg>
|
||||
</Call>
|
||||
</Configure>
|
|
@ -16,6 +16,7 @@
|
|||
<Set name="maxConnections"><Property name="jetty.lowresources.maxConnections" deprecated="lowresources.maxConnections" default="0"/></Set>
|
||||
<Set name="maxMemory"><Property name="jetty.lowresources.maxMemory" deprecated="lowresources.maxMemory" default="0"/></Set>
|
||||
<Set name="maxLowResourcesTime"><Property name="jetty.lowresources.maxLowResourcesTime" deprecated="lowresources.maxLowResourcesTime" default="5000"/></Set>
|
||||
<Set name="acceptingInLowResources"><Property name="jetty.lowresources.accepting" default="true"/></Set>
|
||||
</New>
|
||||
</Arg>
|
||||
</Call>
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
[description]
|
||||
Enable a server wide connection limit
|
||||
|
||||
[tags]
|
||||
connector
|
||||
|
||||
[depend]
|
||||
server
|
||||
|
||||
[xml]
|
||||
etc/jetty-connectionlimit.xml
|
||||
|
||||
[ini-template]
|
||||
jetty.connection.limit=1000
|
|
@ -27,3 +27,6 @@ etc/jetty-lowresources.xml
|
|||
|
||||
## Max time a resource may stay in low resource mode before actions are taken (in milliseconds)
|
||||
# jetty.lowresources.maxLowResourcesTime=5000
|
||||
|
||||
## Accept new connections while in low resources
|
||||
# jetty.lowresources.accepting=true
|
||||
|
|
|
@ -82,10 +82,3 @@ etc/jetty.xml
|
|||
|
||||
## Dump the state of the Jetty server, components, and webapps before shutdown
|
||||
# jetty.server.dumpBeforeStop=false
|
||||
|
||||
## The name to uniquely identify this server instance
|
||||
#jetty.defaultSessionIdManager.workerName=node1
|
||||
|
||||
## How frequently sessions are inspected
|
||||
#jetty.sessionInspectionInterval.seconds=60
|
||||
|
||||
|
|
|
@ -24,4 +24,4 @@ session-data-cache=xmemcached
|
|||
#jetty.session.memcached.host=localhost
|
||||
#jetty.session.memcached.port=11211
|
||||
#jetty.session.memcached.expirySec=
|
||||
|
||||
#jetty.session.memcached.heartbeats=true
|
||||
|
|
|
@ -0,0 +1,150 @@
|
|||
//
|
||||
// ========================================================================
|
||||
// Copyright (c) 1995-2017 Mort Bay Consulting Pty. Ltd.
|
||||
// ------------------------------------------------------------------------
|
||||
// All rights reserved. This program and the accompanying materials
|
||||
// are made available under the terms of the Eclipse Public License v1.0
|
||||
// and Apache License v2.0 which accompanies this distribution.
|
||||
//
|
||||
// The Eclipse Public License is available at
|
||||
// http://www.eclipse.org/legal/epl-v10.html
|
||||
//
|
||||
// The Apache License v2.0 is available at
|
||||
// http://www.opensource.org/licenses/apache2.0.php
|
||||
//
|
||||
// You may elect to redistribute this code under either of these licenses.
|
||||
// ========================================================================
|
||||
//
|
||||
|
||||
package org.eclipse.jetty.server;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.eclipse.jetty.io.Connection;
|
||||
import org.eclipse.jetty.io.Connection.Listener;
|
||||
import org.eclipse.jetty.util.annotation.ManagedAttribute;
|
||||
import org.eclipse.jetty.util.annotation.ManagedObject;
|
||||
import org.eclipse.jetty.util.component.AbstractLifeCycle;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
|
||||
/**
|
||||
* A Connection Listener that limits the number of Connections.
|
||||
* <p>This listener applies a limit to the number of connections, which when
|
||||
* exceeded results in a call to {@link AbstractConnector#setAccepting(boolean)}
|
||||
* to prevent further connections being received. It can be applied to an
|
||||
* entire server or to a specific connector.
|
||||
* <p>
|
||||
* @see Connection.Listener
|
||||
*/
|
||||
@ManagedObject
|
||||
public class ConnectionLimit extends AbstractLifeCycle implements Listener
|
||||
{
|
||||
private static final Logger LOG = Log.getLogger(ConnectionLimit.class);
|
||||
|
||||
private final Server _server;
|
||||
private final List<AbstractConnector> _connectors = new ArrayList<>();
|
||||
private int _connections;
|
||||
private int _maxConnections;
|
||||
private boolean _accepting = true;
|
||||
|
||||
public ConnectionLimit(int maxConnections, Server server)
|
||||
{
|
||||
_maxConnections = maxConnections;
|
||||
_server = server;
|
||||
}
|
||||
|
||||
public ConnectionLimit(int maxConnections, Connector...connectors)
|
||||
{
|
||||
_maxConnections = maxConnections;
|
||||
_server = null;
|
||||
for (Connector c: connectors)
|
||||
{
|
||||
if (c instanceof AbstractConnector)
|
||||
_connectors.add((AbstractConnector)c);
|
||||
else
|
||||
LOG.warn("Connector {} is not an AbstractConnection. Connections not limited",c);
|
||||
}
|
||||
}
|
||||
|
||||
@ManagedAttribute("The maximum number of connections allowed")
|
||||
public synchronized int getMaxConnections()
|
||||
{
|
||||
return _maxConnections;
|
||||
}
|
||||
|
||||
public synchronized void setMaxConnections(int max)
|
||||
{
|
||||
_maxConnections = max;
|
||||
}
|
||||
|
||||
@ManagedAttribute("The current number of connections ")
|
||||
public synchronized int getConnections()
|
||||
{
|
||||
return _connections;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void doStart() throws Exception
|
||||
{
|
||||
if (_server!=null)
|
||||
{
|
||||
for (Connector c: _server.getConnectors())
|
||||
{
|
||||
if (c instanceof AbstractConnector)
|
||||
_connectors.add((AbstractConnector)c);
|
||||
else
|
||||
LOG.warn("Connector {} is not an AbstractConnection. Connections not limited",c);
|
||||
}
|
||||
}
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("ConnectionLimit {} for {}",_maxConnections,_connectors);
|
||||
|
||||
_connections = 0;
|
||||
_accepting = true;
|
||||
|
||||
for (AbstractConnector c : _connectors)
|
||||
c.addBean(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void doStop() throws Exception
|
||||
{
|
||||
for (AbstractConnector c : _connectors)
|
||||
c.removeBean(this);
|
||||
_connections = 0;
|
||||
if (_server!=null)
|
||||
_connectors.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void onOpened(Connection connection)
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("onOpen {} < {} {}",_connections, _maxConnections, connection);
|
||||
if ( ++_connections >= _maxConnections && _accepting)
|
||||
{
|
||||
_accepting = false;
|
||||
LOG.info("Connection Limit({}) reached for {}",_maxConnections,_connectors);
|
||||
for (AbstractConnector c : _connectors)
|
||||
c.setAccepting(false);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void onClosed(Connection connection)
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("onClosed {} < {} {}",_connections, _maxConnections, connection);
|
||||
if ( --_connections < _maxConnections && !_accepting)
|
||||
{
|
||||
_accepting = true;
|
||||
LOG.info("Connection Limit({}) cleared for {}",_maxConnections,_connectors);
|
||||
for (AbstractConnector c : _connectors)
|
||||
c.setAccepting(true);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -21,6 +21,8 @@ package org.eclipse.jetty.server;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
@ -46,7 +48,7 @@ import org.eclipse.jetty.util.thread.ThreadPool;
|
|||
* Low resources can be detected by:
|
||||
* <ul>
|
||||
* <li>{@link ThreadPool#isLowOnThreads()} if {@link Connector#getExecutor()} is
|
||||
* an instance of {@link ThreadPool} and {@link #setMonitorThreads(boolean)} is true.<li>
|
||||
* an instance of {@link ThreadPool} and {@link #setMonitorThreads(boolean)} is true.</li>
|
||||
* <li>If {@link #setMaxMemory(long)} is non zero then low resources is detected if the JVMs
|
||||
* {@link Runtime} instance has {@link Runtime#totalMemory()} minus {@link Runtime#freeMemory()}
|
||||
* greater than {@link #getMaxMemory()}</li>
|
||||
|
@ -60,6 +62,9 @@ import org.eclipse.jetty.util.thread.ThreadPool;
|
|||
* resources state persists for more than {@link #getMaxLowResourcesTime()}, then the
|
||||
* {@link #getLowResourcesIdleTimeout()} to all connections again. Once the low resources state is
|
||||
* cleared, the idle timeout is reset to the connector default given by {@link Connector#getIdleTimeout()}.
|
||||
* <p>
|
||||
* If {@link #setAcceptingInLowResources(boolean)} is set to true, then no new connections are accepted
|
||||
* when in low resources state.
|
||||
*/
|
||||
@ManagedObject ("Monitor for low resource conditions and activate a low resource mode if detected")
|
||||
public class LowResourceMonitor extends AbstractLifeCycle
|
||||
|
@ -68,6 +73,7 @@ public class LowResourceMonitor extends AbstractLifeCycle
|
|||
private final Server _server;
|
||||
private Scheduler _scheduler;
|
||||
private Connector[] _monitoredConnectors;
|
||||
private Set<AbstractConnector> _acceptingConnectors = new HashSet<>();
|
||||
private int _period=1000;
|
||||
private int _maxConnections;
|
||||
private long _maxMemory;
|
||||
|
@ -78,6 +84,7 @@ public class LowResourceMonitor extends AbstractLifeCycle
|
|||
private String _cause;
|
||||
private String _reasons;
|
||||
private long _lowStarted;
|
||||
private boolean _acceptingInLowResources = true;
|
||||
|
||||
private final Runnable _monitor = new Runnable()
|
||||
{
|
||||
|
@ -134,6 +141,17 @@ public class LowResourceMonitor extends AbstractLifeCycle
|
|||
_monitoredConnectors = monitoredConnectors.toArray(new Connector[monitoredConnectors.size()]);
|
||||
}
|
||||
|
||||
@ManagedAttribute("If false, new connections are not accepted while in low resources")
|
||||
public boolean isAcceptingInLowResources()
|
||||
{
|
||||
return _acceptingInLowResources;
|
||||
}
|
||||
|
||||
public void setAcceptingInLowResources(boolean acceptingInLowResources)
|
||||
{
|
||||
_acceptingInLowResources = acceptingInLowResources;
|
||||
}
|
||||
|
||||
@ManagedAttribute("The monitor period in ms")
|
||||
public int getPeriod()
|
||||
{
|
||||
|
@ -329,6 +347,15 @@ public class LowResourceMonitor extends AbstractLifeCycle
|
|||
{
|
||||
for(Connector connector : getMonitoredOrServerConnectors())
|
||||
{
|
||||
if (connector instanceof AbstractConnector)
|
||||
{
|
||||
AbstractConnector c = (AbstractConnector)connector;
|
||||
if (c.isAccepting())
|
||||
{
|
||||
_acceptingConnectors.add(c);
|
||||
c.setAccepting(false);
|
||||
}
|
||||
}
|
||||
for (EndPoint endPoint : connector.getConnectedEndPoints())
|
||||
endPoint.setIdleTimeout(_lowResourcesIdleTimeout);
|
||||
}
|
||||
|
@ -341,6 +368,12 @@ public class LowResourceMonitor extends AbstractLifeCycle
|
|||
for (EndPoint endPoint : connector.getConnectedEndPoints())
|
||||
endPoint.setIdleTimeout(connector.getIdleTimeout());
|
||||
}
|
||||
|
||||
for (AbstractConnector connector : _acceptingConnectors)
|
||||
{
|
||||
connector.setAccepting(true);
|
||||
}
|
||||
_acceptingConnectors.clear();
|
||||
}
|
||||
|
||||
private String low(String reasons, String newReason)
|
||||
|
|
|
@ -402,11 +402,7 @@ public class Server extends HandlerWrapper implements Attributes
|
|||
acceptors += abstractConnector.getAcceptors();
|
||||
|
||||
if (connector instanceof ServerConnector)
|
||||
{
|
||||
// The SelectorManager uses 2 threads for each selector,
|
||||
// one for the normal and one for the low priority strategies.
|
||||
selectors += 2 * ((ServerConnector)connector).getSelectorManager().getSelectorCount();
|
||||
}
|
||||
selectors += ((ServerConnector)connector).getSelectorManager().getSelectorCount();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.io.File;
|
|||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.ListIterator;
|
||||
import java.util.Set;
|
||||
import java.util.zip.Deflater;
|
||||
|
||||
|
@ -495,22 +496,27 @@ public class GzipHandler extends HandlerWrapper implements GzipFactory
|
|||
}
|
||||
|
||||
// Special handling for etags
|
||||
String etag = baseRequest.getHttpFields().get(HttpHeader.IF_NONE_MATCH);
|
||||
if (etag!=null)
|
||||
for (ListIterator<HttpField> fields = baseRequest.getHttpFields().listIterator(); fields.hasNext();)
|
||||
{
|
||||
int i=etag.indexOf(CompressedContentFormat.GZIP._etagQuote);
|
||||
if (i>0)
|
||||
HttpField field = fields.next();
|
||||
if (field.getHeader()==HttpHeader.IF_NONE_MATCH || field.getHeader()==HttpHeader.IF_MATCH)
|
||||
{
|
||||
baseRequest.setAttribute("o.e.j.s.h.gzip.GzipHandler.etag",etag);
|
||||
while (i>=0)
|
||||
String etag = field.getValue();
|
||||
int i=etag.indexOf(CompressedContentFormat.GZIP._etagQuote);
|
||||
if (i>0)
|
||||
{
|
||||
etag=etag.substring(0,i)+etag.substring(i+CompressedContentFormat.GZIP._etag.length());
|
||||
i=etag.indexOf(CompressedContentFormat.GZIP._etagQuote,i);
|
||||
}
|
||||
baseRequest.getHttpFields().put(new HttpField(HttpHeader.IF_NONE_MATCH,etag));
|
||||
baseRequest.setAttribute("o.e.j.s.h.gzip.GzipHandler.etag",etag);
|
||||
while (i>=0)
|
||||
{
|
||||
etag=etag.substring(0,i)+etag.substring(i+CompressedContentFormat.GZIP._etag.length());
|
||||
i=etag.indexOf(CompressedContentFormat.GZIP._etagQuote,i);
|
||||
}
|
||||
|
||||
fields.set(new HttpField(field.getHeader(),etag));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
HttpOutput.Interceptor orig_interceptor = out.getInterceptor();
|
||||
try
|
||||
{
|
||||
|
|
|
@ -628,7 +628,7 @@ public abstract class AbstractSessionCache extends ContainerLifeCycle implements
|
|||
{
|
||||
|
||||
boolean dsdel = _sessionDataStore.delete(id);
|
||||
if (LOG.isDebugEnabled()) LOG.debug("Session {} deleted in db {}",id, dsdel);
|
||||
if (LOG.isDebugEnabled()) LOG.debug("Session {} deleted in session data store {}",id, dsdel);
|
||||
}
|
||||
|
||||
//delete it from the session object store
|
||||
|
@ -656,7 +656,7 @@ public abstract class AbstractSessionCache extends ContainerLifeCycle implements
|
|||
return Collections.emptySet();
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("SessionDataStore checking expiration on {}", candidates);
|
||||
LOG.debug("{} checking expiration on {}", this, candidates);
|
||||
Set<String> allCandidates = _sessionDataStore.getExpired(candidates);
|
||||
Set<String> sessionsInUse = new HashSet<>();
|
||||
if (allCandidates != null)
|
||||
|
|
|
@ -261,7 +261,7 @@ public class HouseKeeper extends AbstractLifeCycle
|
|||
return;
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Scavenging sessions");
|
||||
LOG.debug("{} scavenging sessions", this);
|
||||
|
||||
//find the session managers
|
||||
for (SessionHandler manager:_sessionIdManager.getSessionHandlers())
|
||||
|
|
|
@ -1265,7 +1265,7 @@ public class SessionHandler extends ScopedHandler
|
|||
Set<String> candidates = new HashSet<String>(Arrays.asList(ss));
|
||||
_candidateSessionIdsForExpiry.removeAll(candidates);
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Scavenging session ids {}", candidates);
|
||||
LOG.debug("{} scavenging session ids {}", this, candidates);
|
||||
try
|
||||
{
|
||||
candidates = _sessionCache.checkExpiration(candidates);
|
||||
|
|
|
@ -142,7 +142,7 @@ public class CookieCutterTest
|
|||
* Example from RFC2965
|
||||
*/
|
||||
@Test
|
||||
@Ignore("comma separation no longer supported by RFC6265")
|
||||
@Ignore("comma separation no longer supported by new RFC6265")
|
||||
public void testRFC2965_CookieSpoofingExample()
|
||||
{
|
||||
String rawCookie = "$Version=\"1\"; session_id=\"1234\", " +
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.util.concurrent.CountDownLatch;
|
|||
import org.eclipse.jetty.toolchain.test.AdvancedRunner;
|
||||
import org.eclipse.jetty.util.thread.QueuedThreadPool;
|
||||
import org.eclipse.jetty.util.thread.TimerScheduler;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -118,6 +119,51 @@ public class LowResourcesMonitorTest
|
|||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testNotAccepting() throws Exception
|
||||
{
|
||||
_lowResourcesMonitor.setAcceptingInLowResources(false);
|
||||
Thread.sleep(1200);
|
||||
_threadPool.setMaxThreads(_threadPool.getThreads()-_threadPool.getIdleThreads()+10);
|
||||
Thread.sleep(1200);
|
||||
Assert.assertFalse(_lowResourcesMonitor.isLowOnResources());
|
||||
|
||||
for (AbstractConnector c : _server.getBeans(AbstractConnector.class))
|
||||
assertThat(c.isAccepting(),Matchers.is(true));
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
for (int i=0;i<100;i++)
|
||||
{
|
||||
_threadPool.execute(new Runnable()
|
||||
{
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
try
|
||||
{
|
||||
latch.await();
|
||||
}
|
||||
catch (InterruptedException e)
|
||||
{
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Thread.sleep(1200);
|
||||
Assert.assertTrue(_lowResourcesMonitor.isLowOnResources());
|
||||
for (AbstractConnector c : _server.getBeans(AbstractConnector.class))
|
||||
assertThat(c.isAccepting(),Matchers.is(false));
|
||||
|
||||
latch.countDown();
|
||||
Thread.sleep(1200);
|
||||
Assert.assertFalse(_lowResourcesMonitor.isLowOnResources());
|
||||
for (AbstractConnector c : _server.getBeans(AbstractConnector.class))
|
||||
assertThat(c.isAccepting(),Matchers.is(true));
|
||||
}
|
||||
|
||||
|
||||
@Ignore ("not reliable")
|
||||
@Test
|
||||
public void testLowOnMemory() throws Exception
|
||||
|
@ -155,18 +201,18 @@ public class LowResourcesMonitorTest
|
|||
Thread.sleep(1200);
|
||||
Assert.assertTrue(_lowResourcesMonitor.isLowOnResources());
|
||||
|
||||
Socket newSocket = new Socket("localhost",_connector.getLocalPort());
|
||||
|
||||
// wait for low idle time to close sockets, but not new Socket
|
||||
Thread.sleep(1200);
|
||||
Assert.assertFalse(_lowResourcesMonitor.isLowOnResources());
|
||||
try(Socket newSocket = new Socket("localhost",_connector.getLocalPort()))
|
||||
{
|
||||
// wait for low idle time to close sockets, but not new Socket
|
||||
Thread.sleep(1200);
|
||||
Assert.assertFalse(_lowResourcesMonitor.isLowOnResources());
|
||||
|
||||
for (int i=0;i<socket.length;i++)
|
||||
Assert.assertEquals(-1,socket[i].getInputStream().read());
|
||||
|
||||
newSocket.getOutputStream().write("GET / HTTP/1.0\r\n\r\n".getBytes(StandardCharsets.UTF_8));
|
||||
Assert.assertEquals('H',newSocket.getInputStream().read());
|
||||
|
||||
for (int i=0;i<socket.length;i++)
|
||||
Assert.assertEquals(-1,socket[i].getInputStream().read());
|
||||
|
||||
newSocket.getOutputStream().write("GET / HTTP/1.0\r\n\r\n".getBytes(StandardCharsets.UTF_8));
|
||||
Assert.assertEquals('H',newSocket.getInputStream().read());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -175,26 +221,28 @@ public class LowResourcesMonitorTest
|
|||
_lowResourcesMonitor.setMaxLowResourcesTime(2000);
|
||||
Assert.assertFalse(_lowResourcesMonitor.isLowOnResources());
|
||||
|
||||
Socket socket0 = new Socket("localhost",_connector.getLocalPort());
|
||||
_lowResourcesMonitor.setMaxMemory(1);
|
||||
try(Socket socket0 = new Socket("localhost",_connector.getLocalPort()))
|
||||
{
|
||||
_lowResourcesMonitor.setMaxMemory(1);
|
||||
|
||||
Thread.sleep(1200);
|
||||
Assert.assertTrue(_lowResourcesMonitor.isLowOnResources());
|
||||
Thread.sleep(1200);
|
||||
Assert.assertTrue(_lowResourcesMonitor.isLowOnResources());
|
||||
|
||||
Socket socket1 = new Socket("localhost",_connector.getLocalPort());
|
||||
try(Socket socket1 = new Socket("localhost",_connector.getLocalPort()))
|
||||
{
|
||||
Thread.sleep(1200);
|
||||
Assert.assertTrue(_lowResourcesMonitor.isLowOnResources());
|
||||
Assert.assertEquals(-1,socket0.getInputStream().read());
|
||||
socket1.getOutputStream().write("G".getBytes(StandardCharsets.UTF_8));
|
||||
|
||||
Thread.sleep(1200);
|
||||
Assert.assertTrue(_lowResourcesMonitor.isLowOnResources());
|
||||
Assert.assertEquals(-1,socket0.getInputStream().read());
|
||||
socket1.getOutputStream().write("G".getBytes(StandardCharsets.UTF_8));
|
||||
|
||||
Thread.sleep(1200);
|
||||
Assert.assertTrue(_lowResourcesMonitor.isLowOnResources());
|
||||
socket1.getOutputStream().write("E".getBytes(StandardCharsets.UTF_8));
|
||||
|
||||
Thread.sleep(1200);
|
||||
Assert.assertTrue(_lowResourcesMonitor.isLowOnResources());
|
||||
Assert.assertEquals(-1,socket1.getInputStream().read());
|
||||
Thread.sleep(1200);
|
||||
Assert.assertTrue(_lowResourcesMonitor.isLowOnResources());
|
||||
socket1.getOutputStream().write("E".getBytes(StandardCharsets.UTF_8));
|
||||
|
||||
Thread.sleep(1200);
|
||||
Assert.assertTrue(_lowResourcesMonitor.isLowOnResources());
|
||||
Assert.assertEquals(-1,socket1.getInputStream().read());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -278,4 +278,106 @@ public class NotAcceptingTest
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConnectionLimit() throws Exception
|
||||
{
|
||||
Server server = new Server();
|
||||
server.addBean(new ConnectionLimit(9,server));
|
||||
server.setHandler(new HelloHandler());
|
||||
|
||||
LocalConnector localConnector = new LocalConnector(server);
|
||||
localConnector.setIdleTimeout(60000);
|
||||
server.addConnector(localConnector);
|
||||
|
||||
ServerConnector blockingConnector = new ServerConnector(server,1,1);
|
||||
blockingConnector.setPort(0);
|
||||
blockingConnector.setIdleTimeout(60000);
|
||||
blockingConnector.setAcceptQueueSize(10);
|
||||
server.addConnector(blockingConnector);
|
||||
|
||||
ServerConnector asyncConnector = new ServerConnector(server,0,1);
|
||||
asyncConnector.setPort(0);
|
||||
asyncConnector.setIdleTimeout(60000);
|
||||
asyncConnector.setAcceptQueueSize(10);
|
||||
server.addConnector(asyncConnector);
|
||||
|
||||
server.start();
|
||||
|
||||
try (
|
||||
LocalEndPoint local0 = localConnector.connect();
|
||||
LocalEndPoint local1 = localConnector.connect();
|
||||
LocalEndPoint local2 = localConnector.connect();
|
||||
Socket blocking0 = new Socket("localhost",blockingConnector.getLocalPort());
|
||||
Socket blocking1 = new Socket("localhost",blockingConnector.getLocalPort());
|
||||
Socket blocking2 = new Socket("localhost",blockingConnector.getLocalPort());
|
||||
Socket async0 = new Socket("localhost",asyncConnector.getLocalPort());
|
||||
Socket async1 = new Socket("localhost",asyncConnector.getLocalPort());
|
||||
Socket async2 = new Socket("localhost",asyncConnector.getLocalPort());
|
||||
)
|
||||
{
|
||||
for (LocalEndPoint client: new LocalEndPoint[] {local0,local1,local2})
|
||||
{
|
||||
client.addInputAndExecute(BufferUtil.toBuffer("GET /test HTTP/1.1\r\nHost:localhost\r\n\r\n"));
|
||||
HttpTester.Response response = HttpTester.parseResponse(client.getResponse());
|
||||
assertThat(response.getStatus(),is(200));
|
||||
assertThat(response.getContent(),is("Hello\n"));
|
||||
}
|
||||
|
||||
for (Socket client : new Socket[]{blocking0,blocking1,blocking2,async0,async1,async2})
|
||||
{
|
||||
HttpTester.Input in = HttpTester.from(client.getInputStream());
|
||||
client.getOutputStream().write("GET /test HTTP/1.1\r\nHost:localhost\r\n\r\n".getBytes());
|
||||
HttpTester.Response response = HttpTester.parseResponse(in);
|
||||
assertThat(response.getStatus(),is(200));
|
||||
assertThat(response.getContent(),is("Hello\n"));
|
||||
}
|
||||
|
||||
assertThat(localConnector.isAccepting(),is(false));
|
||||
assertThat(blockingConnector.isAccepting(),is(false));
|
||||
assertThat(asyncConnector.isAccepting(),is(false));
|
||||
|
||||
{
|
||||
// Close an async connection
|
||||
HttpTester.Input in = HttpTester.from(async1.getInputStream());
|
||||
async1.getOutputStream().write("GET /test HTTP/1.1\r\nHost:localhost\r\nConnection: close\r\n\r\n".getBytes());
|
||||
HttpTester.Response response = HttpTester.parseResponse(in);
|
||||
assertThat(response.getStatus(),is(200));
|
||||
assertThat(response.getContent(),is("Hello\n"));
|
||||
}
|
||||
|
||||
// make a new connection and request
|
||||
try (Socket blocking3 = new Socket("localhost",blockingConnector.getLocalPort());)
|
||||
{
|
||||
HttpTester.Input in = HttpTester.from(blocking3.getInputStream());
|
||||
blocking3.getOutputStream().write("GET /test HTTP/1.1\r\nHost:localhost\r\n\r\n".getBytes());
|
||||
HttpTester.Response response = HttpTester.parseResponse(in);
|
||||
assertThat(response.getStatus(),is(200));
|
||||
assertThat(response.getContent(),is("Hello\n"));
|
||||
}
|
||||
}
|
||||
|
||||
Thread.sleep(500); // TODO avoid lame sleep ???
|
||||
assertThat(localConnector.isAccepting(),is(true));
|
||||
assertThat(blockingConnector.isAccepting(),is(true));
|
||||
assertThat(asyncConnector.isAccepting(),is(true));
|
||||
|
||||
}
|
||||
|
||||
public static class HelloHandler extends AbstractHandler
|
||||
{
|
||||
public HelloHandler()
|
||||
{
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException
|
||||
{
|
||||
baseRequest.setHandled(true);
|
||||
response.setContentType("text/html;charset=utf-8");
|
||||
response.setStatus(HttpServletResponse.SC_OK);
|
||||
response.getWriter().println("Hello");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -583,7 +583,7 @@ public class DoSFilter implements Filter
|
|||
}
|
||||
else
|
||||
{
|
||||
loadId = isRemotePort() ? (request.getRemoteAddr() + request.getRemotePort()) : request.getRemoteAddr();
|
||||
loadId = isRemotePort() ? createRemotePortId(request) : request.getRemoteAddr();
|
||||
type = USER_IP;
|
||||
}
|
||||
}
|
||||
|
@ -616,6 +616,10 @@ public class DoSFilter implements Filter
|
|||
return tracker;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
public void addToRateTracker (RateTracker tracker)
|
||||
{
|
||||
_rateTrackers.put(tracker.getId(), tracker);
|
||||
|
@ -1356,4 +1360,12 @@ public class DoSFilter implements Filter
|
|||
super.onTimeout(event);
|
||||
}
|
||||
}
|
||||
|
||||
private String createRemotePortId(final ServletRequest request) {
|
||||
final String addr = request.getRemoteAddr();
|
||||
final int port = request.getRemotePort();
|
||||
if (addr.contains(":")) return "[" + addr + "]:" + port;
|
||||
return addr + ":" + port;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,8 +18,20 @@
|
|||
|
||||
package org.eclipse.jetty.servlets;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.junit.Assert.assertThat;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Collections;
|
||||
import java.util.Enumeration;
|
||||
|
||||
import javax.servlet.FilterConfig;
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.ServletRequest;
|
||||
|
||||
import org.eclipse.jetty.server.Request;
|
||||
import org.eclipse.jetty.server.handler.ContextHandler;
|
||||
import org.eclipse.jetty.servlets.DoSFilter.RateTracker;
|
||||
import org.hamcrest.Matchers;
|
||||
|
@ -29,12 +41,88 @@ import org.junit.Test;
|
|||
|
||||
public class DoSFilterTest extends AbstractDoSFilterTest
|
||||
{
|
||||
private static class RemoteAddressRequest extends Request
|
||||
{
|
||||
public RemoteAddressRequest(String remoteHost, int remotePort)
|
||||
{
|
||||
super(null, null);
|
||||
setRemoteAddr(new InetSocketAddress(remoteHost, remotePort));
|
||||
}
|
||||
}
|
||||
|
||||
private static class NoOpFilterConfig implements FilterConfig
|
||||
{
|
||||
@Override
|
||||
public String getFilterName()
|
||||
{
|
||||
return "noop";
|
||||
}
|
||||
|
||||
@Override
|
||||
public ServletContext getServletContext()
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getInitParameter(String name)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Enumeration<String> getInitParameterNames()
|
||||
{
|
||||
return Collections.emptyEnumeration();
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception
|
||||
{
|
||||
startServer(DoSFilter.class);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemotePortLoadIdCreation_ipv6() throws ServletException {
|
||||
final ServletRequest request = new RemoteAddressRequest("::192.9.5.5", 12345);
|
||||
DoSFilter doSFilter = new DoSFilter();
|
||||
doSFilter.init(new NoOpFilterConfig());
|
||||
doSFilter.setRemotePort(true);
|
||||
|
||||
try
|
||||
{
|
||||
RateTracker tracker = doSFilter.getRateTracker(request);
|
||||
assertThat("tracker.id", tracker.getId(),
|
||||
anyOf(
|
||||
is("[::192.9.5.5]:12345"), // short form
|
||||
is("[0:0:0:0:0:0:c009:505]:12345") // long form
|
||||
));
|
||||
}
|
||||
finally
|
||||
{
|
||||
doSFilter.stopScheduler();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemotePortLoadIdCreation_ipv4() throws ServletException {
|
||||
final ServletRequest request = new RemoteAddressRequest("127.0.0.1", 12345);
|
||||
DoSFilter doSFilter = new DoSFilter();
|
||||
doSFilter.init(new NoOpFilterConfig());
|
||||
doSFilter.setRemotePort(true);
|
||||
|
||||
try
|
||||
{
|
||||
RateTracker tracker = doSFilter.getRateTracker(request);
|
||||
assertThat("tracker.id", tracker.getId(), is("127.0.0.1:12345"));
|
||||
}
|
||||
finally
|
||||
{
|
||||
doSFilter.stopScheduler();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRateIsRateExceeded() throws InterruptedException
|
||||
{
|
||||
|
@ -70,7 +158,7 @@ public class DoSFilterTest extends AbstractDoSFilterTest
|
|||
{
|
||||
String last="GET /ctx/timeout/?sleep="+2*_requestMaxTime+" HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n";
|
||||
String responses = doRequests("",0,0,0,last);
|
||||
Assert.assertThat(responses, Matchers.containsString(" 503 "));
|
||||
assertThat(responses, Matchers.containsString(" 503 "));
|
||||
}
|
||||
|
||||
private boolean hitRateTracker(DoSFilter doSFilter, int sleep) throws InterruptedException
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.eclipse.jetty.start;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.nio.file.CopyOption;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
|
@ -30,12 +29,11 @@ import java.util.Set;
|
|||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.eclipse.jetty.start.Props.Prop;
|
||||
import org.eclipse.jetty.start.builders.StartDirBuilder;
|
||||
import org.eclipse.jetty.start.builders.StartIniBuilder;
|
||||
import org.eclipse.jetty.start.fileinits.BaseHomeFileInitializer;
|
||||
import org.eclipse.jetty.start.fileinits.MavenLocalRepoFileInitializer;
|
||||
import org.eclipse.jetty.start.fileinits.LocalFileInitializer;
|
||||
import org.eclipse.jetty.start.fileinits.MavenLocalRepoFileInitializer;
|
||||
import org.eclipse.jetty.start.fileinits.TestFileInitializer;
|
||||
import org.eclipse.jetty.start.fileinits.UriFileInitializer;
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ import java.nio.file.Path;
|
|||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
* Interface for initializing a file resource.
|
||||
|
|
|
@ -21,8 +21,14 @@ package org.eclipse.jetty.start;
|
|||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Java Version Utility class.
|
||||
* <p>Parses java versions to extract a consistent set of version parts</p>
|
||||
*/
|
||||
public class JavaVersion
|
||||
{
|
||||
// Copy of code in jetty-util
|
||||
|
||||
private static final Pattern PRE_JDK9 = Pattern.compile("1\\.(\\d)(\\.(\\d+)(_(\\d+))?)?(-.+)?");
|
||||
// Regexp from JEP 223 (http://openjdk.java.net/jeps/223).
|
||||
private static final Pattern JDK9 = Pattern.compile("(\\d+)(\\.(\\d+))?(\\.(\\d+))?((-.+)?(\\+(\\d+)?(-.+)?)?)");
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
package org.eclipse.jetty.start;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.eclipse.jetty.start.builders;
|
|||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
|
|
@ -19,18 +19,12 @@
|
|||
package org.eclipse.jetty.start.fileinits;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URI;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
|
||||
import org.eclipse.jetty.start.BaseHome;
|
||||
import org.eclipse.jetty.start.FS;
|
||||
import org.eclipse.jetty.start.FileInitializer;
|
||||
import org.eclipse.jetty.start.StartLog;
|
||||
|
||||
public class UriFileInitializer extends FileInitializer
|
||||
{
|
||||
|
|
|
@ -28,9 +28,9 @@ import java.nio.file.Path;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.eclipse.jetty.toolchain.test.IO;
|
||||
import org.eclipse.jetty.toolchain.test.MavenTestingUtils;
|
||||
import org.eclipse.jetty.toolchain.test.TestTracker;
|
||||
import org.eclipse.jetty.toolchain.test.IO;
|
||||
import org.junit.Before;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Rule;
|
||||
|
|
|
@ -18,6 +18,11 @@
|
|||
|
||||
package org.eclipse.jetty.start;
|
||||
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.junit.Assert.assertThat;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -32,11 +37,6 @@ import org.eclipse.jetty.toolchain.test.TestingDir;
|
|||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.junit.Assert.assertThat;
|
||||
|
||||
public class ModulesTest
|
||||
{
|
||||
private final static String TEST_SOURCE = "<test>";
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
//
|
||||
// ========================================================================
|
||||
// Copyright (c) 1995-2017 Mort Bay Consulting Pty. Ltd.
|
||||
// ------------------------------------------------------------------------
|
||||
// All rights reserved. This program and the accompanying materials
|
||||
// are made available under the terms of the Eclipse Public License v1.0
|
||||
// and Apache License v2.0 which accompanies this distribution.
|
||||
//
|
||||
// The Eclipse Public License is available at
|
||||
// http://www.eclipse.org/legal/epl-v10.html
|
||||
//
|
||||
// The Apache License v2.0 is available at
|
||||
// http://www.opensource.org/licenses/apache2.0.php
|
||||
//
|
||||
// You may elect to redistribute this code under either of these licenses.
|
||||
// ========================================================================
|
||||
//
|
||||
|
||||
package org.eclipse.jetty.util;
|
||||
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Java Version Utility class.
|
||||
* <p>Parses java versions to extract a consistent set of version parts</p>
|
||||
*/
|
||||
public class JavaVersion
|
||||
{
|
||||
// Copy of version in jetty-start
|
||||
|
||||
private static final Pattern PRE_JDK9 = Pattern.compile("1\\.(\\d)(\\.(\\d+)(_(\\d+))?)?(-.+)?");
|
||||
// Regexp from JEP 223 (http://openjdk.java.net/jeps/223).
|
||||
private static final Pattern JDK9 = Pattern.compile("(\\d+)(\\.(\\d+))?(\\.(\\d+))?((-.+)?(\\+(\\d+)?(-.+)?)?)");
|
||||
|
||||
public static final JavaVersion VERSION = parse(System.getProperty("java.version"));
|
||||
|
||||
public static JavaVersion parse(String version)
|
||||
{
|
||||
if (version.startsWith("1."))
|
||||
return parsePreJDK9(version);
|
||||
return parseJDK9(version);
|
||||
}
|
||||
|
||||
private static JavaVersion parsePreJDK9(String version)
|
||||
{
|
||||
Matcher matcher = PRE_JDK9.matcher(version);
|
||||
if (!matcher.matches())
|
||||
throw new IllegalArgumentException("Invalid Java version " + version);
|
||||
int major = 1;
|
||||
int minor = Integer.parseInt(matcher.group(1));
|
||||
String microGroup = matcher.group(3);
|
||||
int micro = microGroup == null || microGroup.isEmpty() ? 0 : Integer.parseInt(microGroup);
|
||||
String updateGroup = matcher.group(5);
|
||||
int update = updateGroup == null || updateGroup.isEmpty() ? 0 : Integer.parseInt(updateGroup);
|
||||
String suffix = matcher.group(6);
|
||||
return new JavaVersion(version, minor, major, minor, micro, update, suffix);
|
||||
}
|
||||
|
||||
private static JavaVersion parseJDK9(String version)
|
||||
{
|
||||
Matcher matcher = JDK9.matcher(version);
|
||||
if (!matcher.matches())
|
||||
throw new IllegalArgumentException("Invalid Java version " + version);
|
||||
int major = Integer.parseInt(matcher.group(1));
|
||||
String minorGroup = matcher.group(3);
|
||||
int minor = minorGroup == null || minorGroup.isEmpty() ? 0 : Integer.parseInt(minorGroup);
|
||||
String microGroup = matcher.group(5);
|
||||
int micro = microGroup == null || microGroup.isEmpty() ? 0 : Integer.parseInt(microGroup);
|
||||
String suffix = matcher.group(6);
|
||||
return new JavaVersion(version, major, major, minor, micro, 0, suffix);
|
||||
}
|
||||
|
||||
private final String version;
|
||||
private final int platform;
|
||||
private final int major;
|
||||
private final int minor;
|
||||
private final int micro;
|
||||
private final int update;
|
||||
private final String suffix;
|
||||
|
||||
private JavaVersion(String version, int platform, int major, int minor, int micro, int update, String suffix)
|
||||
{
|
||||
this.version = version;
|
||||
this.platform = platform;
|
||||
this.major = major;
|
||||
this.minor = minor;
|
||||
this.micro = micro;
|
||||
this.update = update;
|
||||
this.suffix = suffix;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the string from which this JavaVersion was created
|
||||
*/
|
||||
public String getVersion()
|
||||
{
|
||||
return version;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Returns the Java Platform version, such as {@code 8} for JDK 1.8.0_92 and {@code 9} for JDK 9.2.4.</p>
|
||||
*
|
||||
* @return the Java Platform version
|
||||
*/
|
||||
public int getPlatform()
|
||||
{
|
||||
return platform;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Returns the major number version, such as {@code 1} for JDK 1.8.0_92 and {@code 9} for JDK 9.2.4.</p>
|
||||
*
|
||||
* @return the major number version
|
||||
*/
|
||||
public int getMajor()
|
||||
{
|
||||
return major;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Returns the minor number version, such as {@code 8} for JDK 1.8.0_92 and {@code 2} for JDK 9.2.4.</p>
|
||||
*
|
||||
* @return the minor number version
|
||||
*/
|
||||
public int getMinor()
|
||||
{
|
||||
return minor;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Returns the micro number version, such as {@code 0} for JDK 1.8.0_92 and {@code 4} for JDK 9.2.4.</p>
|
||||
*
|
||||
* @return the micro number version
|
||||
*/
|
||||
public int getMicro()
|
||||
{
|
||||
return micro;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Returns the update number version, such as {@code 92} for JDK 1.8.0_92 and {@code 0} for JDK 9.2.4.</p>
|
||||
*
|
||||
* @return the update number version
|
||||
*/
|
||||
public int getUpdate()
|
||||
{
|
||||
return update;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Returns the remaining string after the version numbers, such as {@code -internal} for
|
||||
* JDK 1.8.0_92-internal and {@code -ea} for JDK 9-ea, or {@code +13} for JDK 9.2.4+13.</p>
|
||||
*
|
||||
* @return the remaining string after the version numbers
|
||||
*/
|
||||
public String getSuffix()
|
||||
{
|
||||
return suffix;
|
||||
}
|
||||
|
||||
public String toString()
|
||||
{
|
||||
return version;
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1056,5 +1056,4 @@ public class StringUtil
|
|||
{
|
||||
return object==null?null:String.valueOf(object);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@ import java.util.concurrent.Executor;
|
|||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
|
||||
import org.eclipse.jetty.util.annotation.ManagedAttribute;
|
||||
import org.eclipse.jetty.util.annotation.ManagedObject;
|
||||
import org.eclipse.jetty.util.component.AbstractLifeCycle;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
|
@ -32,17 +34,18 @@ import org.eclipse.jetty.util.log.Logger;
|
|||
* with a Thread immediately being assigned the Runnable task, or fail if no Thread is
|
||||
* available. Threads are preallocated up to the capacity from a wrapped {@link Executor}.
|
||||
*/
|
||||
@ManagedObject("A pool for reserved threads")
|
||||
public class ReservedThreadExecutor extends AbstractLifeCycle implements Executor
|
||||
{
|
||||
private static final Logger LOG = Log.getLogger(ReservedThreadExecutor.class);
|
||||
|
||||
|
||||
private final Executor _executor;
|
||||
private final Locker _locker = new Locker();
|
||||
private final ReservedThread[] _queue;
|
||||
private int _head;
|
||||
private int _size;
|
||||
private int _pending;
|
||||
|
||||
|
||||
public ReservedThreadExecutor(Executor executor)
|
||||
{
|
||||
this(executor,1);
|
||||
|
@ -51,27 +54,27 @@ public class ReservedThreadExecutor extends AbstractLifeCycle implements Executo
|
|||
/**
|
||||
* @param executor The executor to use to obtain threads
|
||||
* @param capacity The number of threads to preallocate. If less than 0 then capacity
|
||||
* is calculated based on a heuristic from the number of available processors and
|
||||
* is calculated based on a heuristic from the number of available processors and
|
||||
* thread pool size.
|
||||
*/
|
||||
public ReservedThreadExecutor(Executor executor,int capacity)
|
||||
{
|
||||
_executor = executor;
|
||||
|
||||
|
||||
if (capacity < 0)
|
||||
{
|
||||
if (executor instanceof ThreadPool)
|
||||
int cpus = Runtime.getRuntime().availableProcessors();
|
||||
if (executor instanceof ThreadPool.SizedThreadPool)
|
||||
{
|
||||
int threads = ((ThreadPool)executor).getThreads();
|
||||
int cpus = Runtime.getRuntime().availableProcessors();
|
||||
capacity = Math.max(1,Math.min(cpus,threads/8));
|
||||
int threads = ((ThreadPool.SizedThreadPool)executor).getMaxThreads();
|
||||
capacity = Math.max(1, Math.min(cpus, threads / 8));
|
||||
}
|
||||
else
|
||||
{
|
||||
capacity = Runtime.getRuntime().availableProcessors();
|
||||
capacity = cpus;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
_queue = new ReservedThread[capacity];
|
||||
}
|
||||
|
||||
|
@ -79,34 +82,31 @@ public class ReservedThreadExecutor extends AbstractLifeCycle implements Executo
|
|||
{
|
||||
return _executor;
|
||||
}
|
||||
|
||||
|
||||
@ManagedAttribute(value = "max number of reserved threads", readonly = true)
|
||||
public int getCapacity()
|
||||
{
|
||||
return _queue.length;
|
||||
}
|
||||
|
||||
public int getPreallocated()
|
||||
|
||||
@ManagedAttribute(value = "available reserved threads", readonly = true)
|
||||
public int getAvailable()
|
||||
{
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
{
|
||||
return _size;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doStart() throws Exception
|
||||
|
||||
@ManagedAttribute(value = "pending reserved threads", readonly = true)
|
||||
public int getPending()
|
||||
{
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
{
|
||||
_head = _size = _pending = 0;
|
||||
while (_pending<_queue.length)
|
||||
{
|
||||
_executor.execute(new ReservedThread());
|
||||
_pending++;
|
||||
}
|
||||
return _pending;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void doStop() throws Exception
|
||||
{
|
||||
|
@ -121,15 +121,15 @@ public class ReservedThreadExecutor extends AbstractLifeCycle implements Executo
|
|||
thread._wakeup.signal();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(Runnable task) throws RejectedExecutionException
|
||||
{
|
||||
if (!tryExecute(task))
|
||||
throw new RejectedExecutionException();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @param task The task to run
|
||||
* @return True iff a reserved thread was available and has been assigned the task to run.
|
||||
|
@ -138,7 +138,7 @@ public class ReservedThreadExecutor extends AbstractLifeCycle implements Executo
|
|||
{
|
||||
if (task==null)
|
||||
return false;
|
||||
|
||||
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
{
|
||||
if (_size==0)
|
||||
|
@ -150,21 +150,21 @@ public class ReservedThreadExecutor extends AbstractLifeCycle implements Executo
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
ReservedThread thread = _queue[_head];
|
||||
_queue[_head] = null;
|
||||
_head = (_head+1)%_queue.length;
|
||||
_size--;
|
||||
|
||||
|
||||
if (_size==0 && _pending<_queue.length)
|
||||
{
|
||||
_executor.execute(new ReservedThread());
|
||||
_pending++;
|
||||
}
|
||||
|
||||
|
||||
thread._task = task;
|
||||
thread._wakeup.signal();
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
catch(RejectedExecutionException e)
|
||||
|
@ -174,23 +174,31 @@ public class ReservedThreadExecutor extends AbstractLifeCycle implements Executo
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
{
|
||||
return String.format("%s{s=%d,p=%d}",super.toString(),_size,_pending);
|
||||
}
|
||||
}
|
||||
|
||||
private class ReservedThread implements Runnable
|
||||
{
|
||||
private Condition _wakeup = null;
|
||||
private Runnable _task = null;
|
||||
|
||||
|
||||
private void reservedWait() throws InterruptedException
|
||||
{
|
||||
_wakeup.await();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
Runnable task = null;
|
||||
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
{
|
||||
// if this is our first loop, decrement pending count
|
||||
|
@ -199,20 +207,24 @@ public class ReservedThreadExecutor extends AbstractLifeCycle implements Executo
|
|||
_pending--;
|
||||
_wakeup = _locker.newCondition();
|
||||
}
|
||||
|
||||
|
||||
// Exit if no longer running or there now too many preallocated threads
|
||||
if (!isRunning() || _size>=_queue.length)
|
||||
break;
|
||||
|
||||
|
||||
// Insert ourselves in the queue
|
||||
_queue[(_head+_size++)%_queue.length] = this;
|
||||
|
||||
// Wait for a task, ignoring spurious interrupts
|
||||
do
|
||||
// Wait for a task, ignoring spurious wakeups
|
||||
while (isRunning() && task==null)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("{} waiting", this);
|
||||
reservedWait();
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("{} woken up", this);
|
||||
task = _task;
|
||||
_task = null;
|
||||
}
|
||||
|
@ -221,7 +233,6 @@ public class ReservedThreadExecutor extends AbstractLifeCycle implements Executo
|
|||
LOG.ignore(e);
|
||||
}
|
||||
}
|
||||
while (isRunning() && task==null);
|
||||
}
|
||||
|
||||
// Run any task
|
||||
|
@ -231,22 +242,12 @@ public class ReservedThreadExecutor extends AbstractLifeCycle implements Executo
|
|||
{
|
||||
task.run();
|
||||
}
|
||||
catch (Exception e)
|
||||
catch (Throwable e)
|
||||
{
|
||||
LOG.warn(e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
{
|
||||
return String.format("%s{s=%d,p=%d}",super.toString(),_size,_pending);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,11 @@ package org.eclipse.jetty.util.thread.strategy;
|
|||
import java.io.Closeable;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.atomic.LongAdder;
|
||||
|
||||
import org.eclipse.jetty.util.annotation.ManagedAttribute;
|
||||
import org.eclipse.jetty.util.annotation.ManagedObject;
|
||||
import org.eclipse.jetty.util.annotation.ManagedOperation;
|
||||
import org.eclipse.jetty.util.component.ContainerLifeCycle;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
|
@ -57,6 +61,7 @@ import org.eclipse.jetty.util.thread.ReservedThreadExecutor;
|
|||
* sub-strategy is called ProduceExecuteConsume (PEC).
|
||||
* </p>
|
||||
*/
|
||||
@ManagedObject("eat what you kill execution strategy")
|
||||
public class EatWhatYouKill extends ContainerLifeCycle implements ExecutionStrategy, Runnable
|
||||
{
|
||||
private static final Logger LOG = Log.getLogger(EatWhatYouKill.class);
|
||||
|
@ -64,19 +69,22 @@ public class EatWhatYouKill extends ContainerLifeCycle implements ExecutionStrat
|
|||
private enum State { IDLE, PRODUCING, REPRODUCING }
|
||||
|
||||
private final Locker _locker = new Locker();
|
||||
private State _state = State.IDLE;
|
||||
private final LongAdder _nonBlocking = new LongAdder();
|
||||
private final LongAdder _blocking = new LongAdder();
|
||||
private final LongAdder _executed = new LongAdder();
|
||||
private final Producer _producer;
|
||||
private final Executor _executor;
|
||||
private final ReservedThreadExecutor _producers;
|
||||
private State _state = State.IDLE;
|
||||
|
||||
public EatWhatYouKill(Producer producer, Executor executor)
|
||||
{
|
||||
this(producer,executor,new ReservedThreadExecutor(executor,1));
|
||||
}
|
||||
|
||||
public EatWhatYouKill(Producer producer, Executor executor, int maxProducersPending)
|
||||
public EatWhatYouKill(Producer producer, Executor executor, int maxReserved)
|
||||
{
|
||||
this(producer,executor,new ReservedThreadExecutor(executor,maxProducersPending));
|
||||
this(producer,executor,new ReservedThreadExecutor(executor,maxReserved));
|
||||
}
|
||||
|
||||
public EatWhatYouKill(Producer producer, Executor executor, ReservedThreadExecutor producers)
|
||||
|
@ -184,7 +192,9 @@ public class EatWhatYouKill extends ContainerLifeCycle implements ExecutionStrat
|
|||
{
|
||||
// Could another one just have been queued with a produce call?
|
||||
if (_state==State.REPRODUCING)
|
||||
{
|
||||
_state = State.PRODUCING;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
|
@ -194,36 +204,42 @@ public class EatWhatYouKill extends ContainerLifeCycle implements ExecutionStrat
|
|||
}
|
||||
}
|
||||
}
|
||||
else if (Invocable.getInvocationType(task)==InvocationType.NON_BLOCKING)
|
||||
{
|
||||
// PRODUCE CONSUME (EWYK!)
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("{} PC t={}",this,task);
|
||||
task.run();
|
||||
}
|
||||
else
|
||||
{
|
||||
boolean consume;
|
||||
try (Lock locked = _locker.lock())
|
||||
if (Invocable.getInvocationType(task) == InvocationType.NON_BLOCKING)
|
||||
{
|
||||
if (_producers.tryExecute(this))
|
||||
// PRODUCE CONSUME (EWYK!)
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("{} PC t={}", this, task);
|
||||
consume = true;
|
||||
_nonBlocking.increment();
|
||||
}
|
||||
else
|
||||
{
|
||||
try (Lock locked = _locker.lock())
|
||||
{
|
||||
// EXECUTE PRODUCE CONSUME!
|
||||
// We have executed a new Producer, so we can EWYK consume
|
||||
_state = State.IDLE;
|
||||
producing = false;
|
||||
consume = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// PRODUCE EXECUTE CONSUME!
|
||||
consume = false;
|
||||
if (_producers.tryExecute(this))
|
||||
{
|
||||
// EXECUTE PRODUCE CONSUME!
|
||||
// We have executed a new Producer, so we can EWYK consume
|
||||
_state = State.IDLE;
|
||||
producing = false;
|
||||
consume = true;
|
||||
_blocking.increment();
|
||||
}
|
||||
else
|
||||
{
|
||||
// PRODUCE EXECUTE CONSUME!
|
||||
consume = false;
|
||||
_executed.increment();
|
||||
}
|
||||
}
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("{} {} t={}", this, consume ? "EPC" : "PEC", task);
|
||||
}
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("{} {} t={}",this,consume?"EPC":"PEC",task);
|
||||
|
||||
// Consume or execute task
|
||||
try
|
||||
{
|
||||
|
@ -232,7 +248,7 @@ public class EatWhatYouKill extends ContainerLifeCycle implements ExecutionStrat
|
|||
else
|
||||
_executor.execute(task);
|
||||
}
|
||||
catch(RejectedExecutionException e)
|
||||
catch (RejectedExecutionException e)
|
||||
{
|
||||
LOG.warn(e);
|
||||
if (task instanceof Closeable)
|
||||
|
@ -241,13 +257,13 @@ public class EatWhatYouKill extends ContainerLifeCycle implements ExecutionStrat
|
|||
{
|
||||
((Closeable)task).close();
|
||||
}
|
||||
catch(Throwable e2)
|
||||
catch (Throwable e2)
|
||||
{
|
||||
LOG.ignore(e2);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch(Throwable e)
|
||||
catch (Throwable e)
|
||||
{
|
||||
LOG.warn(e);
|
||||
}
|
||||
|
@ -257,7 +273,26 @@ public class EatWhatYouKill extends ContainerLifeCycle implements ExecutionStrat
|
|||
return producing;
|
||||
}
|
||||
|
||||
public Boolean isIdle()
|
||||
@ManagedAttribute(value = "number of non blocking tasks consumed", readonly = true)
|
||||
public long getNonBlockingTasksConsumed()
|
||||
{
|
||||
return _nonBlocking.longValue();
|
||||
}
|
||||
|
||||
@ManagedAttribute(value = "number of blocking tasks consumed", readonly = true)
|
||||
public long getBlockingTasksConsumed()
|
||||
{
|
||||
return _blocking.longValue();
|
||||
}
|
||||
|
||||
@ManagedAttribute(value = "number of blocking tasks executed", readonly = true)
|
||||
public long getBlockingTasksExecuted()
|
||||
{
|
||||
return _executed.longValue();
|
||||
}
|
||||
|
||||
@ManagedAttribute(value = "whether this execution strategy is idle", readonly = true)
|
||||
public boolean isIdle()
|
||||
{
|
||||
try (Lock locked = _locker.lock())
|
||||
{
|
||||
|
@ -265,6 +300,14 @@ public class EatWhatYouKill extends ContainerLifeCycle implements ExecutionStrat
|
|||
}
|
||||
}
|
||||
|
||||
@ManagedOperation(value = "resets the task counts", impact = "ACTION")
|
||||
public void reset()
|
||||
{
|
||||
_nonBlocking.reset();
|
||||
_blocking.reset();
|
||||
_executed.reset();
|
||||
}
|
||||
|
||||
public String toString()
|
||||
{
|
||||
try (Lock locked = _locker.lock())
|
||||
|
|
|
@ -18,9 +18,14 @@
|
|||
|
||||
package org.eclipse.jetty.util;
|
||||
|
||||
import static org.eclipse.jetty.util.PathWatcher.PathWatchEventType.*;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
import static org.junit.Assert.*;
|
||||
import static org.eclipse.jetty.util.PathWatcher.PathWatchEventType.ADDED;
|
||||
import static org.eclipse.jetty.util.PathWatcher.PathWatchEventType.DELETED;
|
||||
import static org.eclipse.jetty.util.PathWatcher.PathWatchEventType.MODIFIED;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
|
@ -28,6 +33,9 @@ import java.io.IOException;
|
|||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.nio.file.attribute.DosFileAttributes;
|
||||
import java.nio.file.attribute.FileTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
|
@ -36,19 +44,37 @@ import java.util.Map;
|
|||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.eclipse.jetty.toolchain.test.AdvancedRunner;
|
||||
import org.eclipse.jetty.toolchain.test.OS;
|
||||
import org.eclipse.jetty.toolchain.test.TestingDir;
|
||||
import org.eclipse.jetty.util.PathWatcher.PathWatchEvent;
|
||||
import org.eclipse.jetty.util.PathWatcher.PathWatchEventType;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
@Ignore("Disabled due to behavioral differences in various FileSystems (hard to write a single testcase that works in all scenarios)")
|
||||
@RunWith(AdvancedRunner.class)
|
||||
public class PathWatcherTest
|
||||
{
|
||||
public static final int QUIET_TIME;
|
||||
public static final int WAIT_TIME;
|
||||
public static final int LONG_TIME;
|
||||
|
||||
static
|
||||
{
|
||||
if (OS.IS_LINUX)
|
||||
QUIET_TIME = 300;
|
||||
else if (OS.IS_OSX)
|
||||
QUIET_TIME = 5000;
|
||||
else
|
||||
QUIET_TIME = 1000;
|
||||
WAIT_TIME = 2 * QUIET_TIME;
|
||||
LONG_TIME = 5 * QUIET_TIME;
|
||||
}
|
||||
|
||||
public static class PathWatchEventCapture implements PathWatcher.Listener
|
||||
{
|
||||
public final static String FINISH_TAG = "#finished#.tag";
|
||||
|
@ -76,27 +102,17 @@ public class PathWatcherTest
|
|||
events.clear();
|
||||
}
|
||||
|
||||
public void reset(int count)
|
||||
{
|
||||
setFinishTrigger(count);
|
||||
events.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onPathWatchEvent(PathWatchEvent event)
|
||||
{
|
||||
synchronized (events)
|
||||
{
|
||||
//if triggered by path
|
||||
if (triggerPath != null)
|
||||
{
|
||||
|
||||
if (triggerPath.equals(event.getPath()) && (event.getType() == triggerType))
|
||||
{
|
||||
LOG.debug("Encountered finish trigger: {} on {}",event.getType(),event.getPath());
|
||||
finishedLatch.countDown();
|
||||
}
|
||||
}
|
||||
else if (finishedLatch != null)
|
||||
{
|
||||
finishedLatch.countDown();
|
||||
}
|
||||
|
||||
|
||||
Path relativePath = this.baseDir.relativize(event.getPath());
|
||||
String key = relativePath.toString().replace(File.separatorChar,'/');
|
||||
|
||||
|
@ -109,6 +125,20 @@ public class PathWatcherTest
|
|||
this.events.put(key,types);
|
||||
LOG.debug("Captured Event: {} | {}",event.getType(),key);
|
||||
}
|
||||
//if triggered by path
|
||||
if (triggerPath != null)
|
||||
{
|
||||
|
||||
if (triggerPath.equals(event.getPath()) && (event.getType() == triggerType))
|
||||
{
|
||||
LOG.debug("Encountered finish trigger: {} on {}",event.getType(),event.getPath());
|
||||
finishedLatch.countDown();
|
||||
}
|
||||
}
|
||||
else if (finishedLatch != null)
|
||||
{
|
||||
finishedLatch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -122,13 +152,21 @@ public class PathWatcherTest
|
|||
*/
|
||||
public void assertEvents(Map<String, PathWatchEventType[]> expectedEvents)
|
||||
{
|
||||
assertThat("Event match (file|diretory) count",this.events.size(),is(expectedEvents.size()));
|
||||
|
||||
for (Map.Entry<String, PathWatchEventType[]> entry : expectedEvents.entrySet())
|
||||
try
|
||||
{
|
||||
String relativePath = entry.getKey();
|
||||
PathWatchEventType[] expectedTypes = entry.getValue();
|
||||
assertEvents(relativePath,expectedTypes);
|
||||
assertThat("Event match (file|directory) count", this.events.size(), is(expectedEvents.size()));
|
||||
|
||||
for (Map.Entry<String, PathWatchEventType[]> entry : expectedEvents.entrySet())
|
||||
{
|
||||
String relativePath = entry.getKey();
|
||||
PathWatchEventType[] expectedTypes = entry.getValue();
|
||||
assertEvents(relativePath, expectedTypes);
|
||||
}
|
||||
}
|
||||
catch(Throwable th)
|
||||
{
|
||||
System.err.println(this.events);
|
||||
throw th;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -175,7 +213,7 @@ public class PathWatcherTest
|
|||
latchCount = count;
|
||||
finishedLatch = new CountDownLatch(latchCount);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Await the countdown latch on the finish trigger
|
||||
*
|
||||
|
@ -197,6 +235,12 @@ public class PathWatcherTest
|
|||
assertThat("Timed Out (" + awaitMillis + "ms) waiting for capture to finish",finishedLatch.await(awaitMillis,TimeUnit.MILLISECONDS),is(true));
|
||||
LOG.debug("Finished capture");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return events.toString();
|
||||
}
|
||||
}
|
||||
|
||||
private static void updateFile(Path path, String newContents) throws IOException
|
||||
|
@ -228,39 +272,37 @@ public class PathWatcherTest
|
|||
* @throws InterruptedException
|
||||
* if sleep between writes was interrupted
|
||||
*/
|
||||
private void updateFileOverTime(Path path, int fileSize, int timeDuration, TimeUnit timeUnit) throws IOException, InterruptedException
|
||||
private void updateFileOverTime(Path path, int timeDuration, TimeUnit timeUnit)
|
||||
{
|
||||
// how long to sleep between writes
|
||||
int sleepMs = 100;
|
||||
|
||||
// how many millis to spend writing entire file size
|
||||
long totalMs = timeUnit.toMillis(timeDuration);
|
||||
|
||||
// how many write chunks to write
|
||||
int writeCount = (int)((int)totalMs / (int)sleepMs);
|
||||
|
||||
// average chunk buffer
|
||||
int chunkBufLen = fileSize / writeCount;
|
||||
byte chunkBuf[] = new byte[chunkBufLen];
|
||||
Arrays.fill(chunkBuf,(byte)'x');
|
||||
|
||||
try (FileOutputStream out = new FileOutputStream(path.toFile()))
|
||||
try
|
||||
{
|
||||
int left = fileSize;
|
||||
// how long to sleep between writes
|
||||
int sleepMs = 200;
|
||||
|
||||
while (left > 0)
|
||||
// average chunk buffer
|
||||
int chunkBufLen = 16;
|
||||
byte chunkBuf[] = new byte[chunkBufLen];
|
||||
Arrays.fill(chunkBuf, (byte)'x');
|
||||
long end = System.nanoTime() + timeUnit.toNanos(timeDuration);
|
||||
|
||||
try (FileOutputStream out = new FileOutputStream(path.toFile()))
|
||||
{
|
||||
int len = Math.min(left,chunkBufLen);
|
||||
out.write(chunkBuf,0,len);
|
||||
left -= chunkBufLen;
|
||||
out.flush();
|
||||
out.getChannel().force(true);
|
||||
// Force file to actually write to disk.
|
||||
// Skipping any sort of filesystem caching of the write
|
||||
out.getFD().sync();
|
||||
TimeUnit.MILLISECONDS.sleep(sleepMs);
|
||||
while(System.nanoTime()<end)
|
||||
{
|
||||
out.write(chunkBuf);
|
||||
out.flush();
|
||||
out.getChannel().force(true);
|
||||
// Force file to actually write to disk.
|
||||
// Skipping any sort of filesystem caching of the write
|
||||
out.getFD().sync();
|
||||
TimeUnit.MILLISECONDS.sleep(sleepMs);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -273,13 +315,7 @@ public class PathWatcherTest
|
|||
*/
|
||||
private static void awaitQuietTime(PathWatcher pathWatcher) throws InterruptedException
|
||||
{
|
||||
double multiplier = 5.0;
|
||||
if (OS.IS_WINDOWS)
|
||||
{
|
||||
// Microsoft Windows filesystem is too slow for a lower multiplier
|
||||
multiplier = 6.0;
|
||||
}
|
||||
TimeUnit.MILLISECONDS.sleep((long)((double)pathWatcher.getUpdateQuietTimeMillis() * multiplier));
|
||||
TimeUnit.MILLISECONDS.sleep(WAIT_TIME);
|
||||
}
|
||||
|
||||
private static final int KB = 1024;
|
||||
|
@ -288,74 +324,126 @@ public class PathWatcherTest
|
|||
@Rule
|
||||
public TestingDir testdir = new TestingDir();
|
||||
|
||||
|
||||
@Test
|
||||
public void testConfig_ShouldRecurse_0() throws IOException
|
||||
public void testSequence() throws Exception
|
||||
{
|
||||
Path dir = testdir.getEmptyPathDir();
|
||||
|
||||
// Create a few directories
|
||||
Files.createDirectories(dir.resolve("a/b/c/d"));
|
||||
// Files we are interested in
|
||||
Files.createFile(dir.resolve("file0"));
|
||||
Files.createDirectories(dir.resolve("subdir0/subsubdir0"));
|
||||
Files.createFile(dir.resolve("subdir0/fileA"));
|
||||
Files.createFile(dir.resolve("subdir0/subsubdir0/unseen"));
|
||||
|
||||
PathWatcher pathWatcher = new PathWatcher();
|
||||
pathWatcher.setUpdateQuietTime(QUIET_TIME,TimeUnit.MILLISECONDS);
|
||||
|
||||
// Add listener
|
||||
PathWatchEventCapture capture = new PathWatchEventCapture(dir);
|
||||
pathWatcher.addListener(capture);
|
||||
|
||||
// Add test dir configuration
|
||||
PathWatcher.Config config = new PathWatcher.Config(dir);
|
||||
|
||||
config.setRecurseDepth(0);
|
||||
assertThat("Config.recurse[0].shouldRecurse[./a/b]",config.shouldRecurseDirectory(dir.resolve("a/b")),is(false));
|
||||
assertThat("Config.recurse[0].shouldRecurse[./a]",config.shouldRecurseDirectory(dir.resolve("a")),is(false));
|
||||
assertThat("Config.recurse[0].shouldRecurse[./]",config.shouldRecurseDirectory(dir),is(false));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConfig_ShouldRecurse_1() throws IOException
|
||||
{
|
||||
Path dir = testdir.getEmptyPathDir();
|
||||
|
||||
// Create a few directories
|
||||
Files.createDirectories(dir.resolve("a/b/c/d"));
|
||||
|
||||
PathWatcher.Config config = new PathWatcher.Config(dir);
|
||||
|
||||
config.setRecurseDepth(1);
|
||||
assertThat("Config.recurse[1].shouldRecurse[./a/b]",config.shouldRecurseDirectory(dir.resolve("a/b")),is(false));
|
||||
assertThat("Config.recurse[1].shouldRecurse[./a]",config.shouldRecurseDirectory(dir.resolve("a")),is(true));
|
||||
assertThat("Config.recurse[1].shouldRecurse[./]",config.shouldRecurseDirectory(dir),is(true));
|
||||
}
|
||||
pathWatcher.watch(config);
|
||||
|
||||
@Test
|
||||
public void testConfig_ShouldRecurse_2() throws IOException
|
||||
{
|
||||
Path dir = testdir.getEmptyPathDir();
|
||||
try
|
||||
{
|
||||
Map<String, PathWatchEventType[]> expected = new HashMap<>();
|
||||
|
||||
// Create a few directories
|
||||
Files.createDirectories(dir.resolve("a/b/c/d"));
|
||||
// Check initial scan events
|
||||
capture.setFinishTrigger(4);
|
||||
pathWatcher.start();
|
||||
expected.put("file0",new PathWatchEventType[] { ADDED });
|
||||
expected.put("subdir0",new PathWatchEventType[] { ADDED });
|
||||
expected.put("subdir0/fileA",new PathWatchEventType[] { ADDED });
|
||||
expected.put("subdir0/subsubdir0",new PathWatchEventType[] { ADDED });
|
||||
|
||||
PathWatcher.Config config = new PathWatcher.Config(dir);
|
||||
capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS);
|
||||
capture.assertEvents(expected);
|
||||
Thread.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
|
||||
config.setRecurseDepth(2);
|
||||
assertThat("Config.recurse[1].shouldRecurse[./a/b/c]",config.shouldRecurseDirectory(dir.resolve("a/b/c")),is(false));
|
||||
assertThat("Config.recurse[1].shouldRecurse[./a/b]",config.shouldRecurseDirectory(dir.resolve("a/b")),is(true));
|
||||
assertThat("Config.recurse[1].shouldRecurse[./a]",config.shouldRecurseDirectory(dir.resolve("a")),is(true));
|
||||
assertThat("Config.recurse[1].shouldRecurse[./]",config.shouldRecurseDirectory(dir),is(true));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testConfig_ShouldRecurse_3() throws IOException
|
||||
{
|
||||
Path dir = testdir.getEmptyPathDir();
|
||||
|
||||
//Create some deep dirs
|
||||
Files.createDirectories(dir.resolve("a/b/c/d/e/f/g"));
|
||||
|
||||
PathWatcher.Config config = new PathWatcher.Config(dir);
|
||||
config.setRecurseDepth(PathWatcher.Config.UNLIMITED_DEPTH);
|
||||
assertThat("Config.recurse[1].shouldRecurse[./a/b/c/d/g]",config.shouldRecurseDirectory(dir.resolve("a/b/c/d/g")),is(true));
|
||||
assertThat("Config.recurse[1].shouldRecurse[./a/b/c/d/f]",config.shouldRecurseDirectory(dir.resolve("a/b/c/d/f")),is(true));
|
||||
assertThat("Config.recurse[1].shouldRecurse[./a/b/c/d/e]",config.shouldRecurseDirectory(dir.resolve("a/b/c/d/e")),is(true));
|
||||
assertThat("Config.recurse[1].shouldRecurse[./a/b/c/d]",config.shouldRecurseDirectory(dir.resolve("a/b/c/d")),is(true));
|
||||
assertThat("Config.recurse[1].shouldRecurse[./a/b/c]",config.shouldRecurseDirectory(dir.resolve("a/b/c")),is(true));
|
||||
assertThat("Config.recurse[1].shouldRecurse[./a/b]",config.shouldRecurseDirectory(dir.resolve("a/b")),is(true));
|
||||
assertThat("Config.recurse[1].shouldRecurse[./a]",config.shouldRecurseDirectory(dir.resolve("a")),is(true));
|
||||
assertThat("Config.recurse[1].shouldRecurse[./]",config.shouldRecurseDirectory(dir),is(true));
|
||||
// Check adding files
|
||||
capture.reset(3);
|
||||
expected.clear();
|
||||
Files.createFile(dir.resolve("subdir0/subsubdir0/toodeep"));
|
||||
expected.put("subdir0/subsubdir0",new PathWatchEventType[] { MODIFIED });
|
||||
Files.createFile(dir.resolve("file1"));
|
||||
expected.put("file1",new PathWatchEventType[] { ADDED });
|
||||
Files.createFile(dir.resolve("subdir0/fileB"));
|
||||
expected.put("subdir0/fileB",new PathWatchEventType[] { ADDED });
|
||||
|
||||
capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS);
|
||||
capture.assertEvents(expected);
|
||||
Thread.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
|
||||
// Check slow modification
|
||||
capture.reset(1);
|
||||
expected.clear();
|
||||
long start = System.nanoTime();
|
||||
new Thread(()->{updateFileOverTime(dir.resolve("file1"),2*QUIET_TIME,TimeUnit.MILLISECONDS);}).start();
|
||||
expected.put("file1",new PathWatchEventType[] { MODIFIED });
|
||||
capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS);
|
||||
long end = System.nanoTime();
|
||||
capture.assertEvents(expected);
|
||||
assertThat(end-start,greaterThan(TimeUnit.MILLISECONDS.toNanos(2*QUIET_TIME)));
|
||||
Thread.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
|
||||
// Check slow add
|
||||
capture.reset(1);
|
||||
expected.clear();
|
||||
start = System.nanoTime();
|
||||
new Thread(()->{updateFileOverTime(dir.resolve("file2"),2*QUIET_TIME,TimeUnit.MILLISECONDS);}).start();
|
||||
expected.put("file2",new PathWatchEventType[] { ADDED });
|
||||
capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS);
|
||||
end = System.nanoTime();
|
||||
capture.assertEvents(expected);
|
||||
assertThat(end-start,greaterThan(TimeUnit.MILLISECONDS.toNanos(2*QUIET_TIME)));
|
||||
Thread.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
|
||||
// Check move directory
|
||||
if (OS.IS_LINUX)
|
||||
{
|
||||
capture.reset(5);
|
||||
expected.clear();
|
||||
Files.move(dir.resolve("subdir0"), dir.resolve("subdir1"), StandardCopyOption.ATOMIC_MOVE);
|
||||
expected.put("subdir0", new PathWatchEventType[]{DELETED});
|
||||
// TODO expected.put("subdir0/fileA",new PathWatchEventType[] { DELETED });
|
||||
// TODO expected.put("subdir0/subsubdir0",new PathWatchEventType[] { DELETED });
|
||||
expected.put("subdir1", new PathWatchEventType[]{ADDED});
|
||||
expected.put("subdir1/fileA", new PathWatchEventType[]{ADDED});
|
||||
expected.put("subdir1/fileB", new PathWatchEventType[]{ADDED});
|
||||
expected.put("subdir1/subsubdir0", new PathWatchEventType[]{ADDED});
|
||||
|
||||
capture.finishedLatch.await(LONG_TIME, TimeUnit.MILLISECONDS);
|
||||
capture.assertEvents(expected);
|
||||
Thread.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
}
|
||||
|
||||
// Check delete file
|
||||
capture.reset(2);
|
||||
expected.clear();
|
||||
Files.delete(dir.resolve("file1"));
|
||||
expected.put("file1",new PathWatchEventType[] { DELETED });
|
||||
Files.delete(dir.resolve("file2"));
|
||||
expected.put("file2",new PathWatchEventType[] { DELETED });
|
||||
|
||||
capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS);
|
||||
capture.assertEvents(expected);
|
||||
Thread.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
|
||||
}
|
||||
finally
|
||||
{
|
||||
pathWatcher.stop();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -368,7 +456,7 @@ public class PathWatcherTest
|
|||
|
||||
PathWatcher pathWatcher = new PathWatcher();
|
||||
pathWatcher.setNotifyExistingOnStart(true);
|
||||
pathWatcher.setUpdateQuietTime(500,TimeUnit.MILLISECONDS);
|
||||
pathWatcher.setUpdateQuietTime(QUIET_TIME,TimeUnit.MILLISECONDS);
|
||||
|
||||
// Add listener
|
||||
PathWatchEventCapture capture = new PathWatchEventCapture(dir);
|
||||
|
@ -393,7 +481,8 @@ public class PathWatcherTest
|
|||
expected.put("a.txt",new PathWatchEventType[] {ADDED});
|
||||
expected.put("b.txt",new PathWatchEventType[] {ADDED});
|
||||
|
||||
|
||||
Thread.currentThread().sleep(1000); // TODO poor test
|
||||
|
||||
capture.assertEvents(expected);
|
||||
|
||||
//stop it
|
||||
|
@ -401,7 +490,7 @@ public class PathWatcherTest
|
|||
|
||||
capture.reset();
|
||||
|
||||
Thread.currentThread().sleep(1000);
|
||||
Thread.currentThread().sleep(1000); // TODO poor test
|
||||
|
||||
pathWatcher.start();
|
||||
|
||||
|
@ -436,13 +525,17 @@ public class PathWatcherTest
|
|||
|
||||
// Files we don't care about
|
||||
Files.createFile(dir.resolve("foo.war.backup"));
|
||||
Files.createFile(dir.resolve(".hidden.war"));
|
||||
|
||||
String hidden_war = OS.IS_WINDOWS ? "hidden.war" : ".hidden.war";
|
||||
Files.createFile(dir.resolve(hidden_war));
|
||||
if (OS.IS_WINDOWS)
|
||||
Files.setAttribute(dir.resolve(hidden_war),"dos:hidden",Boolean.TRUE);
|
||||
Files.createDirectories(dir.resolve(".wat/WEB-INF"));
|
||||
Files.createFile(dir.resolve(".wat/huh.war"));
|
||||
Files.createFile(dir.resolve(".wat/WEB-INF/web.xml"));
|
||||
|
||||
PathWatcher pathWatcher = new PathWatcher();
|
||||
pathWatcher.setUpdateQuietTime(300,TimeUnit.MILLISECONDS);
|
||||
pathWatcher.setUpdateQuietTime(QUIET_TIME,TimeUnit.MILLISECONDS);
|
||||
|
||||
// Add listener
|
||||
PathWatchEventCapture capture = new PathWatchEventCapture(dir);
|
||||
|
@ -458,17 +551,19 @@ public class PathWatcherTest
|
|||
|
||||
try
|
||||
{
|
||||
capture.setFinishTrigger(2);
|
||||
pathWatcher.start();
|
||||
|
||||
// Let quiet time do its thing
|
||||
awaitQuietTime(pathWatcher);
|
||||
capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS);
|
||||
|
||||
Map<String, PathWatchEventType[]> expected = new HashMap<>();
|
||||
|
||||
expected.put("bar/WEB-INF/web.xml",new PathWatchEventType[] { ADDED });
|
||||
expected.put("foo.war",new PathWatchEventType[] { ADDED });
|
||||
|
||||
capture.assertEvents(expected);
|
||||
TimeUnit.MILLISECONDS.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
}
|
||||
finally
|
||||
{
|
||||
|
@ -494,11 +589,10 @@ public class PathWatcherTest
|
|||
|
||||
|
||||
PathWatcher pathWatcher = new PathWatcher();
|
||||
pathWatcher.setUpdateQuietTime(300,TimeUnit.MILLISECONDS);
|
||||
pathWatcher.setUpdateQuietTime(QUIET_TIME,TimeUnit.MILLISECONDS);
|
||||
|
||||
// Add listener
|
||||
PathWatchEventCapture capture = new PathWatchEventCapture(dir);
|
||||
capture.setFinishTrigger(3);
|
||||
pathWatcher.addListener(capture);
|
||||
|
||||
// Add test dir configuration
|
||||
|
@ -510,10 +604,9 @@ public class PathWatcherTest
|
|||
|
||||
try
|
||||
{
|
||||
capture.setFinishTrigger(3);
|
||||
pathWatcher.start();
|
||||
|
||||
// Let quiet time do its thing
|
||||
awaitQuietTime(pathWatcher);
|
||||
assertTrue(capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS));
|
||||
|
||||
Map<String, PathWatchEventType[]> expected = new HashMap<>();
|
||||
|
||||
|
@ -521,6 +614,8 @@ public class PathWatcherTest
|
|||
expected.put("b/b.txt",new PathWatchEventType[] { ADDED });
|
||||
expected.put("c/d/d.txt",new PathWatchEventType[] { ADDED });
|
||||
capture.assertEvents(expected);
|
||||
TimeUnit.MILLISECONDS.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
}
|
||||
finally
|
||||
{
|
||||
|
@ -539,16 +634,15 @@ public class PathWatcherTest
|
|||
Files.createFile(dir.resolve("bar/WEB-INF/web.xml"));
|
||||
|
||||
PathWatcher pathWatcher = new PathWatcher();
|
||||
pathWatcher.setUpdateQuietTime(300,TimeUnit.MILLISECONDS);
|
||||
pathWatcher.setUpdateQuietTime(QUIET_TIME,TimeUnit.MILLISECONDS);
|
||||
|
||||
// Add listener
|
||||
PathWatchEventCapture capture = new PathWatchEventCapture(dir);
|
||||
capture.setFinishTrigger(5);
|
||||
pathWatcher.addListener(capture);
|
||||
|
||||
// Add test dir configuration
|
||||
PathWatcher.Config baseDirConfig = new PathWatcher.Config(dir);
|
||||
baseDirConfig.setRecurseDepth(2);
|
||||
baseDirConfig.setRecurseDepth(100);
|
||||
baseDirConfig.addExcludeHidden();
|
||||
baseDirConfig.addIncludeGlobRelative("*.war");
|
||||
baseDirConfig.addIncludeGlobRelative("*/WEB-INF/web.xml");
|
||||
|
@ -556,11 +650,13 @@ public class PathWatcherTest
|
|||
|
||||
try
|
||||
{
|
||||
capture.setFinishTrigger(2);
|
||||
pathWatcher.start();
|
||||
|
||||
// Pretend that startup occurred
|
||||
awaitQuietTime(pathWatcher);
|
||||
assertTrue(capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS));
|
||||
|
||||
capture.setFinishTrigger(3);
|
||||
|
||||
// Update web.xml
|
||||
Path webFile = dir.resolve("bar/WEB-INF/web.xml");
|
||||
//capture.setFinishTrigger(webFile,MODIFIED);
|
||||
|
@ -573,7 +669,7 @@ public class PathWatcherTest
|
|||
Files.createFile(dir.resolve("bar.war"));
|
||||
|
||||
// Let capture complete
|
||||
capture.awaitFinish(pathWatcher);
|
||||
assertTrue(capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS));
|
||||
|
||||
Map<String, PathWatchEventType[]> expected = new HashMap<>();
|
||||
|
||||
|
@ -582,6 +678,8 @@ public class PathWatcherTest
|
|||
expected.put("bar.war",new PathWatchEventType[] { ADDED });
|
||||
|
||||
capture.assertEvents(expected);
|
||||
TimeUnit.MILLISECONDS.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
}
|
||||
finally
|
||||
{
|
||||
|
@ -600,7 +698,7 @@ public class PathWatcherTest
|
|||
Files.createFile(dir.resolve("bar/WEB-INF/web.xml"));
|
||||
|
||||
PathWatcher pathWatcher = new PathWatcher();
|
||||
pathWatcher.setUpdateQuietTime(300,TimeUnit.MILLISECONDS);
|
||||
pathWatcher.setUpdateQuietTime(QUIET_TIME,TimeUnit.MILLISECONDS);
|
||||
|
||||
// Add listener
|
||||
PathWatchEventCapture capture = new PathWatchEventCapture(dir);
|
||||
|
@ -616,26 +714,36 @@ public class PathWatcherTest
|
|||
|
||||
try
|
||||
{
|
||||
capture.setFinishTrigger(2);
|
||||
pathWatcher.start();
|
||||
|
||||
// Pretend that startup occurred
|
||||
awaitQuietTime(pathWatcher);
|
||||
assertTrue(capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS));
|
||||
|
||||
// New war added
|
||||
capture.setFinishTrigger(1);
|
||||
Path warFile = dir.resolve("hello.war");
|
||||
capture.setFinishTrigger(warFile,MODIFIED);
|
||||
updateFile(warFile,"Hello Update");
|
||||
updateFile(warFile,"Create Hello");
|
||||
Thread.sleep(QUIET_TIME/2);
|
||||
updateFile(warFile,"Hello 1");
|
||||
Thread.sleep(QUIET_TIME/2);
|
||||
updateFile(warFile,"Hello two");
|
||||
Thread.sleep(QUIET_TIME/2);
|
||||
updateFile(warFile,"Hello three");
|
||||
|
||||
// Let capture finish
|
||||
capture.awaitFinish(pathWatcher);
|
||||
assertTrue(capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS));
|
||||
|
||||
Map<String, PathWatchEventType[]> expected = new HashMap<>();
|
||||
|
||||
expected.put("bar/WEB-INF/web.xml",new PathWatchEventType[] { ADDED });
|
||||
expected.put("foo.war",new PathWatchEventType[] { ADDED });
|
||||
expected.put("hello.war",new PathWatchEventType[] { ADDED, MODIFIED });
|
||||
expected.put("hello.war",new PathWatchEventType[] { ADDED });
|
||||
|
||||
capture.assertEvents(expected);
|
||||
TimeUnit.MILLISECONDS.sleep(WAIT_TIME);
|
||||
Assume.assumeFalse(OS.IS_OSX); // TODO fix this
|
||||
capture.assertEvents(expected);
|
||||
}
|
||||
finally
|
||||
{
|
||||
|
@ -643,8 +751,195 @@ public class PathWatcherTest
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeployFiles_NewDir() throws Exception
|
||||
{
|
||||
Path dir = testdir.getEmptyPathDir();
|
||||
|
||||
// Files we are interested in
|
||||
Files.createFile(dir.resolve("foo.war"));
|
||||
|
||||
PathWatcher pathWatcher = new PathWatcher();
|
||||
pathWatcher.setUpdateQuietTime(QUIET_TIME,TimeUnit.MILLISECONDS);
|
||||
|
||||
// Add listener
|
||||
PathWatchEventCapture capture = new PathWatchEventCapture(dir);
|
||||
pathWatcher.addListener(capture);
|
||||
|
||||
// Add test dir configuration
|
||||
PathWatcher.Config baseDirConfig = new PathWatcher.Config(dir);
|
||||
baseDirConfig.setRecurseDepth(2);
|
||||
baseDirConfig.addExcludeHidden();
|
||||
baseDirConfig.addIncludeGlobRelative("*.war");
|
||||
baseDirConfig.addIncludeGlobRelative("*/WEB-INF/web.xml");
|
||||
pathWatcher.watch(baseDirConfig);
|
||||
|
||||
try
|
||||
{
|
||||
capture.setFinishTrigger(1);
|
||||
pathWatcher.start();
|
||||
|
||||
// Pretend that startup occurred
|
||||
assertTrue(capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS));
|
||||
|
||||
// New war added
|
||||
capture.setFinishTrigger(1);
|
||||
|
||||
Files.createDirectories(dir.resolve("bar/WEB-INF"));
|
||||
Thread.sleep(QUIET_TIME/2);
|
||||
Files.createFile(dir.resolve("bar/WEB-INF/web.xml"));
|
||||
Thread.sleep(QUIET_TIME/2);
|
||||
updateFile(dir.resolve("bar/WEB-INF/web.xml"),"Update");
|
||||
Thread.sleep(QUIET_TIME/2);
|
||||
updateFile(dir.resolve("bar/WEB-INF/web.xml"),"Update web.xml");
|
||||
|
||||
// Let capture finish
|
||||
assertTrue(capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS));
|
||||
|
||||
Map<String, PathWatchEventType[]> expected = new HashMap<>();
|
||||
|
||||
expected.put("bar/WEB-INF/web.xml",new PathWatchEventType[] { ADDED });
|
||||
expected.put("foo.war",new PathWatchEventType[] { ADDED });
|
||||
|
||||
capture.assertEvents(expected);
|
||||
TimeUnit.MILLISECONDS.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
}
|
||||
finally
|
||||
{
|
||||
pathWatcher.stop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testDeployFilesBeyondDepthLimit() throws Exception
|
||||
{
|
||||
Path dir = testdir.getEmptyPathDir();
|
||||
|
||||
// Files we are interested in
|
||||
Files.createDirectories(dir.resolve("foo/WEB-INF/lib"));
|
||||
Files.createDirectories(dir.resolve("bar/WEB-INF/lib"));
|
||||
|
||||
PathWatcher pathWatcher = new PathWatcher();
|
||||
pathWatcher.setUpdateQuietTime(QUIET_TIME,TimeUnit.MILLISECONDS);
|
||||
|
||||
// Add listener
|
||||
PathWatchEventCapture capture = new PathWatchEventCapture(dir);
|
||||
pathWatcher.addListener(capture);
|
||||
|
||||
// Add test dir configuration
|
||||
PathWatcher.Config baseDirConfig = new PathWatcher.Config(dir);
|
||||
baseDirConfig.setRecurseDepth(0);
|
||||
pathWatcher.watch(baseDirConfig);
|
||||
|
||||
try
|
||||
{
|
||||
capture.setFinishTrigger(2);
|
||||
pathWatcher.start();
|
||||
|
||||
// Pretend that startup occurred
|
||||
assertTrue(capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS));
|
||||
|
||||
Map<String, PathWatchEventType[]> expected = new HashMap<>();
|
||||
expected.put("foo",new PathWatchEventType[] { ADDED });
|
||||
expected.put("bar",new PathWatchEventType[] { ADDED });
|
||||
|
||||
capture.assertEvents(expected);
|
||||
|
||||
capture.reset(1);
|
||||
expected.clear();
|
||||
expected.put("bar",new PathWatchEventType[] { MODIFIED });
|
||||
Files.createFile(dir.resolve("bar/index.html"));
|
||||
assertTrue(capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS));
|
||||
|
||||
capture.assertEvents(expected);
|
||||
TimeUnit.MILLISECONDS.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
|
||||
capture.reset(1);
|
||||
expected.clear();
|
||||
expected.put("bob",new PathWatchEventType[] { ADDED });
|
||||
Files.createFile(dir.resolve("bar/WEB-INF/lib/ignored"));
|
||||
PathWatcher.LOG.debug("create bob");
|
||||
Files.createDirectories(dir.resolve("bob/WEB-INF/lib"));
|
||||
Thread.sleep(QUIET_TIME/2);
|
||||
PathWatcher.LOG.debug("create bob/index.html");
|
||||
Files.createFile(dir.resolve("bob/index.html"));
|
||||
Thread.sleep(QUIET_TIME/2);
|
||||
PathWatcher.LOG.debug("update bob/index.html");
|
||||
updateFile(dir.resolve("bob/index.html"),"Update");
|
||||
Thread.sleep(QUIET_TIME/2);
|
||||
PathWatcher.LOG.debug("update bob/index.html");
|
||||
updateFile(dir.resolve("bob/index.html"),"Update index.html");
|
||||
|
||||
assertTrue(capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS));
|
||||
capture.assertEvents(expected);
|
||||
TimeUnit.MILLISECONDS.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
|
||||
}
|
||||
finally
|
||||
{
|
||||
pathWatcher.stop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testWatchFile() throws Exception
|
||||
{
|
||||
Path dir = testdir.getEmptyPathDir();
|
||||
|
||||
// Files we are interested in
|
||||
Files.createDirectories(dir.resolve("bar/WEB-INF"));
|
||||
Files.createFile(dir.resolve("bar/WEB-INF/web.xml"));
|
||||
|
||||
PathWatcher pathWatcher = new PathWatcher();
|
||||
pathWatcher.setUpdateQuietTime(QUIET_TIME,TimeUnit.MILLISECONDS);
|
||||
|
||||
// Add listener
|
||||
PathWatchEventCapture capture = new PathWatchEventCapture(dir);
|
||||
pathWatcher.addListener(capture);
|
||||
|
||||
// Add test configuration
|
||||
pathWatcher.watch(dir.resolve("bar/WEB-INF/web.xml"));
|
||||
pathWatcher.setNotifyExistingOnStart(false);
|
||||
|
||||
try
|
||||
{
|
||||
pathWatcher.start();
|
||||
Thread.sleep(WAIT_TIME);
|
||||
assertThat(capture.events.size(),is(0));
|
||||
|
||||
Files.createFile(dir.resolve("bar/index.htnl"));
|
||||
Files.createFile(dir.resolve("bar/WEB-INF/other.xml"));
|
||||
Files.createDirectories(dir.resolve("bar/WEB-INF/lib"));
|
||||
|
||||
Thread.sleep(WAIT_TIME);
|
||||
assertThat(capture.events.size(),is(0));
|
||||
|
||||
capture.setFinishTrigger(1);
|
||||
updateFile(dir.resolve("bar/WEB-INF/web.xml"),"Update web.xml");
|
||||
assertTrue(capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS));
|
||||
|
||||
Map<String, PathWatchEventType[]> expected = new HashMap<>();
|
||||
|
||||
expected.put("bar/WEB-INF/web.xml",new PathWatchEventType[] { MODIFIED });
|
||||
|
||||
capture.assertEvents(expected);
|
||||
TimeUnit.MILLISECONDS.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
}
|
||||
finally
|
||||
{
|
||||
pathWatcher.stop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Pretend to add a new war file that is large, and being copied into place
|
||||
* Pretend to modify a new war file that is large, and being copied into place
|
||||
* using some sort of technique that is slow enough that it takes a while for
|
||||
* the entire war file to exist in place.
|
||||
* <p>
|
||||
|
@ -654,17 +949,18 @@ public class PathWatcherTest
|
|||
* on test failure
|
||||
*/
|
||||
@Test
|
||||
public void testDeployFiles_NewWar_LargeSlowCopy() throws Exception
|
||||
public void testDeployFiles_ModifyWar_LargeSlowCopy() throws Exception
|
||||
{
|
||||
Path dir = testdir.getEmptyPathDir();
|
||||
|
||||
// Files we are interested in
|
||||
Files.createFile(dir.resolve("foo.war"));
|
||||
Files.createFile(dir.resolve("hello.war"));
|
||||
Files.createDirectories(dir.resolve("bar/WEB-INF"));
|
||||
Files.createFile(dir.resolve("bar/WEB-INF/web.xml"));
|
||||
|
||||
PathWatcher pathWatcher = new PathWatcher();
|
||||
pathWatcher.setUpdateQuietTime(500,TimeUnit.MILLISECONDS);
|
||||
pathWatcher.setUpdateQuietTime(QUIET_TIME,TimeUnit.MILLISECONDS);
|
||||
|
||||
// Add listener
|
||||
PathWatchEventCapture capture = new PathWatchEventCapture(dir);
|
||||
|
@ -680,26 +976,35 @@ public class PathWatcherTest
|
|||
|
||||
try
|
||||
{
|
||||
capture.setFinishTrigger(3);
|
||||
pathWatcher.start();
|
||||
|
||||
// Pretend that startup occurred
|
||||
awaitQuietTime(pathWatcher);
|
||||
assertTrue(capture.finishedLatch.await(LONG_TIME,TimeUnit.MILLISECONDS));
|
||||
|
||||
|
||||
// New war added (slowly)
|
||||
capture.setFinishTrigger(1);
|
||||
Path warFile = dir.resolve("hello.war");
|
||||
capture.setFinishTrigger(warFile,MODIFIED);
|
||||
updateFileOverTime(warFile,50 * MB,3,TimeUnit.SECONDS);
|
||||
|
||||
// Let capture finish
|
||||
capture.awaitFinish(pathWatcher);
|
||||
long start = System.nanoTime();
|
||||
new Thread(()->
|
||||
{
|
||||
updateFileOverTime(warFile,2*QUIET_TIME,TimeUnit.MILLISECONDS);
|
||||
}).start();
|
||||
|
||||
assertTrue(capture.finishedLatch.await(4*QUIET_TIME,TimeUnit.MILLISECONDS));
|
||||
long end = System.nanoTime();
|
||||
assertThat(end-start,greaterThan(TimeUnit.MILLISECONDS.toNanos(2*QUIET_TIME)));
|
||||
|
||||
|
||||
Map<String, PathWatchEventType[]> expected = new HashMap<>();
|
||||
|
||||
expected.put("bar/WEB-INF/web.xml",new PathWatchEventType[] { ADDED });
|
||||
expected.put("foo.war",new PathWatchEventType[] { ADDED });
|
||||
expected.put("hello.war",new PathWatchEventType[] { ADDED, MODIFIED });
|
||||
|
||||
capture.assertEvents(expected);
|
||||
TimeUnit.MILLISECONDS.sleep(WAIT_TIME);
|
||||
capture.assertEvents(expected);
|
||||
}
|
||||
finally
|
||||
{
|
||||
|
|
|
@ -32,13 +32,11 @@ import java.security.KeyStore;
|
|||
|
||||
import javax.net.ssl.SSLEngine;
|
||||
|
||||
import org.eclipse.jetty.toolchain.test.JDK;
|
||||
import org.eclipse.jetty.util.component.AbstractLifeCycle;
|
||||
import org.eclipse.jetty.util.log.StacklessLogging;
|
||||
import org.eclipse.jetty.util.resource.Resource;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -225,8 +223,7 @@ public class SslContextFactoryTest
|
|||
public void testSetIncludeCipherSuitesRegex() throws Exception
|
||||
{
|
||||
cf.setIncludeCipherSuites(".*ECDHE.*",".*WIBBLE.*");
|
||||
Assume.assumeFalse(JDK.IS_8);
|
||||
|
||||
|
||||
cf.start();
|
||||
SSLEngine sslEngine = cf.newSSLEngine();
|
||||
String[] enabledCipherSuites = sslEngine.getEnabledCipherSuites();
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.concurrent.Executor;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -33,163 +34,138 @@ import static org.junit.Assert.assertThat;
|
|||
|
||||
public class ReservedThreadExecutorTest
|
||||
{
|
||||
final static int SIZE = 2;
|
||||
TestExecutor _executor;
|
||||
ReservedThreadExecutor _pae;
|
||||
|
||||
private static final int SIZE = 2;
|
||||
private static final Runnable NOOP = () -> {};
|
||||
|
||||
private TestExecutor _executor;
|
||||
private ReservedThreadExecutor _reservedExecutor;
|
||||
|
||||
@Before
|
||||
public void before() throws Exception
|
||||
{
|
||||
_executor = new TestExecutor();
|
||||
_pae = new ReservedThreadExecutor(_executor,SIZE);
|
||||
_pae.start();
|
||||
_reservedExecutor = new ReservedThreadExecutor(_executor, SIZE);
|
||||
_reservedExecutor.start();
|
||||
}
|
||||
|
||||
|
||||
|
||||
@After
|
||||
public void after() throws Exception
|
||||
{
|
||||
_pae.stop();
|
||||
_reservedExecutor.stop();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStarted() throws Exception
|
||||
{
|
||||
assertThat(_executor._queue.size(),is(SIZE));
|
||||
while(!_executor._queue.isEmpty())
|
||||
_executor.execute();
|
||||
|
||||
assertThat(_pae.getCapacity(),is(SIZE));
|
||||
|
||||
long started = System.nanoTime();
|
||||
while (_pae.getPreallocated()<SIZE)
|
||||
{
|
||||
if (TimeUnit.NANOSECONDS.toSeconds(System.nanoTime()-started)>10)
|
||||
break;
|
||||
Thread.sleep(100);
|
||||
}
|
||||
assertThat(_pae.getPreallocated(),is(SIZE));
|
||||
// Reserved threads are lazily started.
|
||||
assertThat(_executor._queue.size(), is(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPending() throws Exception
|
||||
{
|
||||
assertThat(_executor._queue.size(),is(SIZE));
|
||||
assertThat(_pae.tryExecute(new NOOP()),is(false));
|
||||
assertThat(_executor._queue.size(),is(SIZE));
|
||||
|
||||
_executor.execute();
|
||||
assertThat(_executor._queue.size(),is(SIZE-1));
|
||||
while (!_executor._queue.isEmpty())
|
||||
assertThat(_executor._queue.size(), is(0));
|
||||
|
||||
for (int i = 0; i < SIZE; i++)
|
||||
_reservedExecutor.tryExecute(NOOP);
|
||||
assertThat(_executor._queue.size(), is(SIZE));
|
||||
|
||||
for (int i = 0; i < SIZE; i++)
|
||||
_executor.execute();
|
||||
assertThat(_executor._queue.size(), is(0));
|
||||
|
||||
long started = System.nanoTime();
|
||||
while (_pae.getPreallocated()<SIZE)
|
||||
{
|
||||
if (TimeUnit.NANOSECONDS.toSeconds(System.nanoTime()-started)>10)
|
||||
break;
|
||||
Thread.sleep(100);
|
||||
}
|
||||
assertThat(_executor._queue.size(),is(0));
|
||||
assertThat(_pae.getPreallocated(),is(SIZE));
|
||||
|
||||
for (int i=SIZE;i-->0;)
|
||||
assertThat(_pae.tryExecute(new Task()),is(true));
|
||||
assertThat(_executor._queue.size(),is(1));
|
||||
assertThat(_pae.getPreallocated(),is(0));
|
||||
waitForAllAvailable();
|
||||
|
||||
for (int i=SIZE;i-->0;)
|
||||
assertThat(_pae.tryExecute(new NOOP()),is(false));
|
||||
assertThat(_executor._queue.size(),is(SIZE));
|
||||
assertThat(_pae.getPreallocated(),is(0));
|
||||
|
||||
assertThat(_pae.tryExecute(new NOOP()),is(false));
|
||||
assertThat(_executor._queue.size(),is(SIZE));
|
||||
assertThat(_pae.getPreallocated(),is(0));
|
||||
for (int i = 0; i < SIZE; i++)
|
||||
assertThat(_reservedExecutor.tryExecute(new Task()), is(true));
|
||||
assertThat(_executor._queue.size(), is(1));
|
||||
assertThat(_reservedExecutor.getAvailable(), is(0));
|
||||
|
||||
for (int i = 0; i < SIZE; i++)
|
||||
assertThat(_reservedExecutor.tryExecute(NOOP), is(false));
|
||||
assertThat(_executor._queue.size(), is(SIZE));
|
||||
assertThat(_reservedExecutor.getAvailable(), is(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExecuted() throws Exception
|
||||
{
|
||||
while(!_executor._queue.isEmpty())
|
||||
assertThat(_executor._queue.size(), is(0));
|
||||
|
||||
for (int i = 0; i < SIZE; i++)
|
||||
_reservedExecutor.tryExecute(NOOP);
|
||||
assertThat(_executor._queue.size(), is(SIZE));
|
||||
|
||||
for (int i = 0; i < SIZE; i++)
|
||||
_executor.execute();
|
||||
long started = System.nanoTime();
|
||||
while (_pae.getPreallocated()<SIZE)
|
||||
assertThat(_executor._queue.size(), is(0));
|
||||
|
||||
waitForAllAvailable();
|
||||
|
||||
Task[] tasks = new Task[SIZE];
|
||||
for (int i = 0; i < SIZE; i++)
|
||||
{
|
||||
if (TimeUnit.NANOSECONDS.toSeconds(System.nanoTime()-started)>10)
|
||||
break;
|
||||
Thread.sleep(100);
|
||||
}
|
||||
assertThat(_pae.getPreallocated(),is(SIZE));
|
||||
|
||||
Task[] task = new Task[SIZE];
|
||||
for (int i=SIZE;i-->0;)
|
||||
{
|
||||
task[i] = new Task();
|
||||
assertThat(_pae.tryExecute(task[i]),is(true));
|
||||
tasks[i] = new Task();
|
||||
assertThat(_reservedExecutor.tryExecute(tasks[i]), is(true));
|
||||
}
|
||||
|
||||
for (int i=SIZE;i-->0;)
|
||||
{
|
||||
task[i]._ran.await(10,TimeUnit.SECONDS);
|
||||
}
|
||||
for (int i = 0; i < SIZE; i++)
|
||||
tasks[i]._ran.await(10, TimeUnit.SECONDS);
|
||||
|
||||
assertThat(_executor._queue.size(), is(1));
|
||||
|
||||
assertThat(_executor._queue.size(),is(1));
|
||||
Task extra = new Task();
|
||||
assertThat(_pae.tryExecute(extra),is(false));
|
||||
assertThat(_executor._queue.size(),is(2));
|
||||
Thread.sleep(100);
|
||||
assertThat(extra._ran.getCount(),is(1L));
|
||||
assertThat(_reservedExecutor.tryExecute(extra), is(false));
|
||||
assertThat(_executor._queue.size(), is(2));
|
||||
|
||||
for (int i=SIZE;i-->0;)
|
||||
{
|
||||
task[i]._complete.countDown();
|
||||
}
|
||||
|
||||
started = System.nanoTime();
|
||||
while (_pae.getPreallocated()<SIZE)
|
||||
{
|
||||
if (TimeUnit.NANOSECONDS.toSeconds(System.nanoTime()-started)>10)
|
||||
break;
|
||||
Thread.sleep(100);
|
||||
}
|
||||
assertThat(_pae.getPreallocated(),is(SIZE));
|
||||
|
||||
|
||||
Thread.sleep(500);
|
||||
assertThat(extra._ran.getCount(), is(1L));
|
||||
|
||||
for (int i = 0; i < SIZE; i++)
|
||||
tasks[i]._complete.countDown();
|
||||
|
||||
waitForAllAvailable();
|
||||
}
|
||||
|
||||
|
||||
|
||||
protected void waitForAllAvailable() throws InterruptedException
|
||||
{
|
||||
long started = System.nanoTime();
|
||||
while (_reservedExecutor.getAvailable() < SIZE)
|
||||
{
|
||||
long elapsed = System.nanoTime() - started;
|
||||
if (elapsed > TimeUnit.SECONDS.toNanos(10))
|
||||
Assert.fail();
|
||||
Thread.sleep(10);
|
||||
}
|
||||
assertThat(_reservedExecutor.getAvailable(), is(SIZE));
|
||||
}
|
||||
|
||||
private static class TestExecutor implements Executor
|
||||
{
|
||||
Deque<Runnable> _queue = new ArrayDeque<>();
|
||||
private final Deque<Runnable> _queue = new ArrayDeque<>();
|
||||
|
||||
@Override
|
||||
public void execute(Runnable task)
|
||||
{
|
||||
_queue.addLast(task);
|
||||
}
|
||||
|
||||
|
||||
public void execute()
|
||||
{
|
||||
Runnable task = _queue.pollFirst();
|
||||
if (task!=null)
|
||||
if (task != null)
|
||||
new Thread(task).start();
|
||||
}
|
||||
}
|
||||
|
||||
private static class NOOP implements Runnable
|
||||
{
|
||||
@Override
|
||||
public void run() {}
|
||||
}
|
||||
|
||||
|
||||
private static class Task implements Runnable
|
||||
{
|
||||
private CountDownLatch _ran = new CountDownLatch(1);
|
||||
private CountDownLatch _complete = new CountDownLatch(1);
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
public void run()
|
||||
{
|
||||
_ran.countDown();
|
||||
try
|
||||
{
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Setup default logging implementation for during testing
|
||||
org.eclipse.jetty.util.log.class=org.eclipse.jetty.util.log.StdErrLog
|
||||
#org.eclipse.jetty.util.LEVEL=DEBUG
|
||||
#org.eclipse.jetty.util.PathWatcher.LEVEL=DEBUG
|
|
@ -1429,6 +1429,12 @@ public class StandardDescriptorProcessor extends IterativeDescriptorProcessor
|
|||
|
||||
public void visitSecurityConstraint(WebAppContext context, Descriptor descriptor, XmlParser.Node node)
|
||||
{
|
||||
if (context.getSecurityHandler() == null)
|
||||
{
|
||||
LOG.warn("security-constraint declared but SecurityHandler==null");
|
||||
return;
|
||||
}
|
||||
|
||||
Constraint scBase = new Constraint();
|
||||
|
||||
//ServletSpec 3.0, p74 security-constraints, as minOccurs > 1, are additive
|
||||
|
@ -1702,6 +1708,11 @@ public class StandardDescriptorProcessor extends IterativeDescriptorProcessor
|
|||
|
||||
public void visitSecurityRole(WebAppContext context, Descriptor descriptor, XmlParser.Node node)
|
||||
{
|
||||
if (context.getSecurityHandler() == null)
|
||||
{
|
||||
LOG.warn("security-role declared but SecurityHandler==null");
|
||||
return;
|
||||
}
|
||||
//ServletSpec 3.0, p74 elements with multiplicity >1 are additive when merged
|
||||
XmlParser.Node roleNode = node.get("role-name");
|
||||
String role = roleNode.toString(false, true);
|
||||
|
@ -1939,6 +1950,12 @@ public class StandardDescriptorProcessor extends IterativeDescriptorProcessor
|
|||
*/
|
||||
public void visitDenyUncoveredHttpMethods(WebAppContext context, Descriptor descriptor, XmlParser.Node node)
|
||||
{
|
||||
if (context.getSecurityHandler() == null)
|
||||
{
|
||||
LOG.warn("deny-uncovered-http-methods declared but SecurityHandler==null");
|
||||
return;
|
||||
}
|
||||
|
||||
((ConstraintAware)context.getSecurityHandler()).setDenyUncoveredHttpMethods(true);
|
||||
}
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.eclipse.jetty.server.Server;
|
|||
import org.eclipse.jetty.server.handler.ContextHandlerCollection;
|
||||
import org.eclipse.jetty.server.handler.HandlerList;
|
||||
import org.eclipse.jetty.server.handler.HotSwapHandler;
|
||||
import org.eclipse.jetty.servlet.ErrorPageErrorHandler;
|
||||
import org.eclipse.jetty.servlet.ServletContextHandler;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.resource.Resource;
|
||||
|
@ -226,6 +227,34 @@ public class WebAppContextTest
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNullSessionAndSecurityHandler() throws Exception
|
||||
{
|
||||
Server server = new Server(0);
|
||||
HandlerList handlers = new HandlerList();
|
||||
ContextHandlerCollection contexts = new ContextHandlerCollection();
|
||||
WebAppContext context = new WebAppContext(null, null, null, null, null, new ErrorPageErrorHandler(),
|
||||
ServletContextHandler.NO_SESSIONS|ServletContextHandler.NO_SECURITY);
|
||||
context.setContextPath("/");
|
||||
context.setBaseResource(Resource.newResource("./src/test/webapp"));
|
||||
server.setHandler(handlers);
|
||||
handlers.addHandler(contexts);
|
||||
contexts.addHandler(context);
|
||||
|
||||
LocalConnector connector = new LocalConnector(server);
|
||||
server.addConnector(connector);
|
||||
|
||||
try
|
||||
{
|
||||
server.start();
|
||||
Assert.assertTrue(context.isAvailable());
|
||||
}
|
||||
finally
|
||||
{
|
||||
server.stop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class ServletA extends GenericServlet
|
||||
{
|
||||
|
|
|
@ -97,10 +97,6 @@ public class DelayedStartClientTest
|
|||
QueuedThreadPool qtp = (QueuedThreadPool) executor;
|
||||
threadNames.add(qtp.getName());
|
||||
}
|
||||
else
|
||||
{
|
||||
System.err.println("### Executor: " + executor);
|
||||
}
|
||||
}
|
||||
|
||||
for (ContainerLifeCycle child : container.getBeans(ContainerLifeCycle.class))
|
||||
|
|
|
@ -52,7 +52,6 @@ public class TimeDecoder implements Decoder.Text<Date>
|
|||
@Override
|
||||
public void init(EndpointConfig config)
|
||||
{
|
||||
System.out.println("#### INIT ####");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -27,12 +27,13 @@ import java.util.Queue;
|
|||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.eclipse.jetty.io.ByteBufferPool;
|
||||
import org.eclipse.jetty.io.MappedByteBufferPool;
|
||||
import org.eclipse.jetty.servlet.FilterHolder;
|
||||
import org.eclipse.jetty.toolchain.test.TestingDir;
|
||||
import org.eclipse.jetty.webapp.WebAppContext;
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.client.WebSocketClient;
|
||||
import org.eclipse.jetty.websocket.common.test.LeakTrackingBufferPoolRule;
|
||||
import org.eclipse.jetty.websocket.jsr356.server.samples.echo.BasicEchoSocket;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Rule;
|
||||
|
@ -47,8 +48,7 @@ public class AltFilterTest
|
|||
@Rule
|
||||
public TestingDir testdir = new TestingDir();
|
||||
|
||||
@Rule
|
||||
public LeakTrackingBufferPoolRule bufferPool = new LeakTrackingBufferPoolRule("Test");
|
||||
public ByteBufferPool bufferPool = new MappedByteBufferPool();
|
||||
|
||||
@Test
|
||||
public void testEcho() throws Exception
|
||||
|
|
|
@ -26,12 +26,13 @@ import java.util.Queue;
|
|||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.eclipse.jetty.io.ByteBufferPool;
|
||||
import org.eclipse.jetty.io.MappedByteBufferPool;
|
||||
import org.eclipse.jetty.toolchain.test.MavenTestingUtils;
|
||||
import org.eclipse.jetty.webapp.WebAppContext;
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.client.ClientUpgradeRequest;
|
||||
import org.eclipse.jetty.websocket.client.WebSocketClient;
|
||||
import org.eclipse.jetty.websocket.common.test.LeakTrackingBufferPoolRule;
|
||||
import org.eclipse.jetty.websocket.jsr356.server.samples.beans.DateDecoder;
|
||||
import org.eclipse.jetty.websocket.jsr356.server.samples.beans.TimeEncoder;
|
||||
import org.eclipse.jetty.websocket.jsr356.server.samples.echo.ConfiguredEchoSocket;
|
||||
|
@ -47,8 +48,7 @@ import org.junit.Test;
|
|||
*/
|
||||
public class AnnotatedServerEndpointTest
|
||||
{
|
||||
@Rule
|
||||
public LeakTrackingBufferPoolRule bufferPool = new LeakTrackingBufferPoolRule("Test");
|
||||
public ByteBufferPool bufferPool = new MappedByteBufferPool();
|
||||
|
||||
private static WSServer server;
|
||||
|
||||
|
|
|
@ -23,11 +23,12 @@ import java.util.Queue;
|
|||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.eclipse.jetty.io.ByteBufferPool;
|
||||
import org.eclipse.jetty.io.MappedByteBufferPool;
|
||||
import org.eclipse.jetty.toolchain.test.TestingDir;
|
||||
import org.eclipse.jetty.webapp.WebAppContext;
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.client.WebSocketClient;
|
||||
import org.eclipse.jetty.websocket.common.test.LeakTrackingBufferPoolRule;
|
||||
import org.eclipse.jetty.websocket.jsr356.server.samples.echo.BasicEchoEndpoint;
|
||||
import org.eclipse.jetty.websocket.jsr356.server.samples.echo.BasicEchoEndpointConfigContextListener;
|
||||
import org.junit.Assert;
|
||||
|
@ -43,8 +44,7 @@ public class BasicEndpointTest
|
|||
@Rule
|
||||
public TestingDir testdir = new TestingDir();
|
||||
|
||||
@Rule
|
||||
public LeakTrackingBufferPoolRule bufferPool = new LeakTrackingBufferPoolRule("Test");
|
||||
public ByteBufferPool bufferPool = new MappedByteBufferPool();
|
||||
|
||||
@Test
|
||||
public void testEcho() throws Exception
|
||||
|
|
|
@ -380,10 +380,6 @@ public class DelayedStartClientOnServerTest
|
|||
QueuedThreadPool qtp = (QueuedThreadPool) executor;
|
||||
threadNames.add(qtp.getName());
|
||||
}
|
||||
else
|
||||
{
|
||||
System.err.println("### Executor: " + executor);
|
||||
}
|
||||
}
|
||||
|
||||
for (ContainerLifeCycle child : container.getBeans(ContainerLifeCycle.class))
|
||||
|
|
|
@ -25,11 +25,12 @@ import java.util.Queue;
|
|||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.eclipse.jetty.io.ByteBufferPool;
|
||||
import org.eclipse.jetty.io.MappedByteBufferPool;
|
||||
import org.eclipse.jetty.toolchain.test.TestingDir;
|
||||
import org.eclipse.jetty.webapp.WebAppContext;
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.client.WebSocketClient;
|
||||
import org.eclipse.jetty.websocket.common.test.LeakTrackingBufferPoolRule;
|
||||
import org.eclipse.jetty.websocket.jsr356.server.samples.echo.LargeEchoConfiguredSocket;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Ignore;
|
||||
|
@ -45,8 +46,7 @@ public class LargeAnnotatedTest
|
|||
@Rule
|
||||
public TestingDir testdir = new TestingDir();
|
||||
|
||||
@Rule
|
||||
public LeakTrackingBufferPoolRule bufferPool = new LeakTrackingBufferPoolRule("Test");
|
||||
public ByteBufferPool bufferPool = new MappedByteBufferPool();
|
||||
|
||||
@Test
|
||||
public void testEcho() throws Exception
|
||||
|
|
|
@ -25,11 +25,12 @@ import java.util.Queue;
|
|||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.eclipse.jetty.io.ByteBufferPool;
|
||||
import org.eclipse.jetty.io.MappedByteBufferPool;
|
||||
import org.eclipse.jetty.toolchain.test.TestingDir;
|
||||
import org.eclipse.jetty.webapp.WebAppContext;
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.client.WebSocketClient;
|
||||
import org.eclipse.jetty.websocket.common.test.LeakTrackingBufferPoolRule;
|
||||
import org.eclipse.jetty.websocket.jsr356.server.samples.echo.LargeEchoDefaultSocket;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Ignore;
|
||||
|
@ -45,8 +46,7 @@ public class LargeContainerTest
|
|||
@Rule
|
||||
public TestingDir testdir = new TestingDir();
|
||||
|
||||
@Rule
|
||||
public LeakTrackingBufferPoolRule bufferPool = new LeakTrackingBufferPoolRule("Test");
|
||||
public ByteBufferPool bufferPool = new MappedByteBufferPool();
|
||||
|
||||
@Test
|
||||
public void testEcho() throws Exception
|
||||
|
|
|
@ -23,11 +23,12 @@ import java.util.Queue;
|
|||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.eclipse.jetty.io.ByteBufferPool;
|
||||
import org.eclipse.jetty.io.MappedByteBufferPool;
|
||||
import org.eclipse.jetty.toolchain.test.TestingDir;
|
||||
import org.eclipse.jetty.webapp.WebAppContext;
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.client.WebSocketClient;
|
||||
import org.eclipse.jetty.websocket.common.test.LeakTrackingBufferPoolRule;
|
||||
import org.eclipse.jetty.websocket.jsr356.server.samples.echo.EchoReturnEndpoint;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Rule;
|
||||
|
@ -38,8 +39,7 @@ public class OnMessageReturnTest
|
|||
@Rule
|
||||
public TestingDir testdir = new TestingDir();
|
||||
|
||||
@Rule
|
||||
public LeakTrackingBufferPoolRule bufferPool = new LeakTrackingBufferPoolRule("Test");
|
||||
public ByteBufferPool bufferPool = new MappedByteBufferPool();
|
||||
|
||||
@Test
|
||||
public void testEchoReturn() throws Exception
|
||||
|
|
|
@ -29,16 +29,16 @@ import java.util.concurrent.Future;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.eclipse.jetty.io.ByteBufferPool;
|
||||
import org.eclipse.jetty.io.MappedByteBufferPool;
|
||||
import org.eclipse.jetty.servlet.DefaultServlet;
|
||||
import org.eclipse.jetty.toolchain.test.MavenTestingUtils;
|
||||
import org.eclipse.jetty.webapp.WebAppContext;
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.client.WebSocketClient;
|
||||
import org.eclipse.jetty.websocket.common.test.LeakTrackingBufferPoolRule;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
@ -47,9 +47,9 @@ import org.junit.runners.Parameterized.Parameters;
|
|||
@RunWith(Parameterized.class)
|
||||
public class SessionTest
|
||||
{
|
||||
private static interface Case
|
||||
private interface Case
|
||||
{
|
||||
public void customize(WebAppContext context);
|
||||
void customize(WebAppContext context);
|
||||
}
|
||||
|
||||
@Parameters
|
||||
|
@ -98,8 +98,7 @@ public class SessionTest
|
|||
return cases;
|
||||
}
|
||||
|
||||
@Rule
|
||||
public LeakTrackingBufferPoolRule bufferPool = new LeakTrackingBufferPoolRule("Test");
|
||||
public ByteBufferPool bufferPool = new MappedByteBufferPool();
|
||||
|
||||
private final Case testcase;
|
||||
private final static AtomicInteger ID = new AtomicInteger(0);
|
||||
|
|
|
@ -50,6 +50,8 @@ import javax.websocket.server.PathParam;
|
|||
import javax.websocket.server.ServerEndpoint;
|
||||
import javax.websocket.server.ServerEndpointConfig;
|
||||
|
||||
import org.eclipse.jetty.io.ByteBufferPool;
|
||||
import org.eclipse.jetty.io.MappedByteBufferPool;
|
||||
import org.eclipse.jetty.server.Server;
|
||||
import org.eclipse.jetty.server.ServerConnector;
|
||||
import org.eclipse.jetty.servlet.ServletContextHandler;
|
||||
|
@ -58,7 +60,6 @@ import org.eclipse.jetty.toolchain.test.IO;
|
|||
import org.eclipse.jetty.toolchain.test.MavenTestingUtils;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
import org.eclipse.jetty.websocket.common.test.LeakTrackingBufferPoolRule;
|
||||
import org.eclipse.jetty.websocket.common.util.Sha1Sum;
|
||||
import org.eclipse.jetty.websocket.jsr356.server.deploy.WebSocketServerContainerInitializer;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -71,8 +72,7 @@ public class StreamTest
|
|||
{
|
||||
private static final Logger LOG = Log.getLogger(StreamTest.class);
|
||||
|
||||
@Rule
|
||||
public LeakTrackingBufferPoolRule bufferPool = new LeakTrackingBufferPoolRule("Test");
|
||||
public ByteBufferPool bufferPool = new MappedByteBufferPool();
|
||||
|
||||
private static File outputDir;
|
||||
private static Server server;
|
||||
|
|
|
@ -205,9 +205,15 @@ public class WebSocketPolicy
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the maximum size of a binary message during parsing/generating.
|
||||
* Get the maximum size of a binary message during parsing.
|
||||
* <p>
|
||||
* Binary messages over this maximum will result in a close code 1009 {@link StatusCode#MESSAGE_TOO_LARGE}
|
||||
* This is a memory conservation option, memory over this limit will not be
|
||||
* allocated by Jetty for handling binary messages. This applies to individual frames,
|
||||
* whole message handling, and partial message handling.
|
||||
* </p>
|
||||
* <p>
|
||||
* Binary messages over this maximum will result in a close code 1009 {@link StatusCode#MESSAGE_TOO_LARGE}
|
||||
* </p>
|
||||
*
|
||||
* @return the maximum size of a binary message
|
||||
*/
|
||||
|
@ -227,9 +233,15 @@ public class WebSocketPolicy
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the maximum size of a text message during parsing/generating.
|
||||
* Get the maximum size of a text message during parsing.
|
||||
* <p>
|
||||
* Text messages over this maximum will result in a close code 1009 {@link StatusCode#MESSAGE_TOO_LARGE}
|
||||
* This is a memory conservation option, memory over this limit will not be
|
||||
* allocated by Jetty for handling text messages. This applies to individual frames,
|
||||
* whole message handling, and partial message handling.
|
||||
* </p>
|
||||
* <p>
|
||||
* Text messages over this maximum will result in a close code 1009 {@link StatusCode#MESSAGE_TOO_LARGE}
|
||||
* </p>
|
||||
*
|
||||
* @return the maximum size of a text message.
|
||||
*/
|
||||
|
@ -279,7 +291,7 @@ public class WebSocketPolicy
|
|||
/**
|
||||
* The maximum size of a binary message buffer.
|
||||
* <p>
|
||||
* Used ONLY for stream based message writing.
|
||||
* Used ONLY for stream based binary message writing.
|
||||
*
|
||||
* @param size
|
||||
* the maximum size of the binary message buffer
|
||||
|
@ -292,9 +304,15 @@ public class WebSocketPolicy
|
|||
}
|
||||
|
||||
/**
|
||||
* The maximum size of a binary message during parsing/generating.
|
||||
* The maximum size of a binary message during parsing.
|
||||
* <p>
|
||||
* Binary messages over this maximum will result in a close code 1009 {@link StatusCode#MESSAGE_TOO_LARGE}
|
||||
* This is a memory conservation option, memory over this limit will not be
|
||||
* allocated by Jetty for handling binary messages. This applies to individual frames,
|
||||
* whole message handling, and partial message handling.
|
||||
* </p>
|
||||
* <p>
|
||||
* Binary messages over this maximum will result in a close code 1009 {@link StatusCode#MESSAGE_TOO_LARGE}
|
||||
* </p>
|
||||
*
|
||||
* @param size
|
||||
* the maximum allowed size of a binary message.
|
||||
|
@ -309,7 +327,7 @@ public class WebSocketPolicy
|
|||
/**
|
||||
* The maximum size of a text message buffer.
|
||||
* <p>
|
||||
* Used ONLY for stream based message writing.
|
||||
* Used ONLY for stream based text message writing.
|
||||
*
|
||||
* @param size
|
||||
* the maximum size of the text message buffer
|
||||
|
@ -322,9 +340,15 @@ public class WebSocketPolicy
|
|||
}
|
||||
|
||||
/**
|
||||
* The maximum size of a text message during parsing/generating.
|
||||
* The maximum size of a text message during parsing.
|
||||
* <p>
|
||||
* Text messages over this maximum will result in a close code 1009 {@link StatusCode#MESSAGE_TOO_LARGE}
|
||||
* This is a memory conservation option, memory over this limit will not be
|
||||
* allocated by Jetty for handling text messages. This applies to individual frames,
|
||||
* whole message handling, and partial message handling.
|
||||
* </p>
|
||||
* <p>
|
||||
* Text messages over this maximum will result in a close code 1009 {@link StatusCode#MESSAGE_TOO_LARGE}
|
||||
* </p>
|
||||
*
|
||||
* @param size
|
||||
* the maximum allowed size of a text message.
|
||||
|
|
|
@ -77,6 +77,9 @@ public class WebSocketClient extends ContainerLifeCycle implements WebSocketCont
|
|||
|
||||
private final int id = ThreadLocalRandom.current().nextInt();
|
||||
|
||||
// defaults to true for backwards compatibility
|
||||
private boolean stopAtShutdown = true;
|
||||
|
||||
/**
|
||||
* Instantiate a WebSocketClient with defaults
|
||||
*/
|
||||
|
@ -552,7 +555,7 @@ public class WebSocketClient extends ContainerLifeCycle implements WebSocketCont
|
|||
|
||||
private synchronized void init() throws IOException
|
||||
{
|
||||
if (!ShutdownThread.isRegistered(this))
|
||||
if (isStopAtShutdown() && !ShutdownThread.isRegistered(this))
|
||||
{
|
||||
ShutdownThread.register(this);
|
||||
}
|
||||
|
@ -694,6 +697,31 @@ public class WebSocketClient extends ContainerLifeCycle implements WebSocketCont
|
|||
return this.httpClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set JVM shutdown behavior.
|
||||
* @param stop If true, this client instance will be explicitly stopped when the
|
||||
* JVM is shutdown. Otherwise the application is responsible for maintaining the WebSocketClient lifecycle.
|
||||
* @see Runtime#addShutdownHook(Thread)
|
||||
* @see ShutdownThread
|
||||
*/
|
||||
public synchronized void setStopAtShutdown(boolean stop)
|
||||
{
|
||||
if (stop)
|
||||
{
|
||||
if (!stopAtShutdown && isStarted() && !ShutdownThread.isRegistered(this))
|
||||
ShutdownThread.register(this);
|
||||
}
|
||||
else
|
||||
ShutdownThread.deregister(this);
|
||||
|
||||
stopAtShutdown = stop;
|
||||
}
|
||||
|
||||
public boolean isStopAtShutdown()
|
||||
{
|
||||
return stopAtShutdown;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
|
|
|
@ -22,12 +22,13 @@ import java.net.URI;
|
|||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.eclipse.jetty.io.ByteBufferPool;
|
||||
import org.eclipse.jetty.io.MappedByteBufferPool;
|
||||
import org.eclipse.jetty.toolchain.test.TestTracker;
|
||||
import org.eclipse.jetty.websocket.api.Session;
|
||||
import org.eclipse.jetty.websocket.api.StatusCode;
|
||||
import org.eclipse.jetty.websocket.common.test.BlockheadServer;
|
||||
import org.eclipse.jetty.websocket.common.test.IBlockheadServerConnection;
|
||||
import org.eclipse.jetty.websocket.common.test.LeakTrackingBufferPoolRule;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
|
@ -41,8 +42,7 @@ public class BadNetworkTest
|
|||
@Rule
|
||||
public TestTracker tt = new TestTracker();
|
||||
|
||||
@Rule
|
||||
public LeakTrackingBufferPoolRule bufferPool = new LeakTrackingBufferPoolRule("Test");
|
||||
public ByteBufferPool bufferPool = new MappedByteBufferPool();
|
||||
|
||||
private BlockheadServer server;
|
||||
private WebSocketClient client;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue