Merge remote-tracking branch 'origin/jetty-9.4.x' into jetty-10.0.x
This commit is contained in:
commit
ec9827bb35
|
@ -27,13 +27,20 @@ public abstract class HttpChannel
|
|||
protected static final Logger LOG = Log.getLogger(HttpChannel.class);
|
||||
|
||||
private final HttpDestination _destination;
|
||||
private final TimeoutCompleteListener _totalTimeout;
|
||||
private HttpExchange _exchange;
|
||||
|
||||
protected HttpChannel(HttpDestination destination)
|
||||
{
|
||||
this._destination = destination;
|
||||
_destination = destination;
|
||||
_totalTimeout = new TimeoutCompleteListener(destination.getHttpClient().getScheduler());
|
||||
}
|
||||
|
||||
public void destroy()
|
||||
{
|
||||
_totalTimeout.destroy();
|
||||
}
|
||||
|
||||
public HttpDestination getHttpDestination()
|
||||
{
|
||||
return _destination;
|
||||
|
@ -102,7 +109,23 @@ public abstract class HttpChannel
|
|||
|
||||
protected abstract HttpReceiver getHttpReceiver();
|
||||
|
||||
public abstract void send();
|
||||
public void send()
|
||||
{
|
||||
HttpExchange exchange = getHttpExchange();
|
||||
if (exchange != null)
|
||||
{
|
||||
HttpRequest request = exchange.getRequest();
|
||||
long timeoutAt = request.getTimeoutAt();
|
||||
if (timeoutAt != -1)
|
||||
{
|
||||
exchange.getResponseListeners().add(_totalTimeout);
|
||||
_totalTimeout.schedule(request, timeoutAt);
|
||||
}
|
||||
send(exchange);
|
||||
}
|
||||
}
|
||||
|
||||
public abstract void send(HttpExchange exchange);
|
||||
|
||||
public abstract void release();
|
||||
|
||||
|
|
|
@ -52,9 +52,11 @@ import org.eclipse.jetty.client.api.Request;
|
|||
import org.eclipse.jetty.client.api.Response;
|
||||
import org.eclipse.jetty.client.http.HttpClientTransportOverHTTP;
|
||||
import org.eclipse.jetty.client.util.FormContentProvider;
|
||||
import org.eclipse.jetty.http.HttpCompliance;
|
||||
import org.eclipse.jetty.http.HttpField;
|
||||
import org.eclipse.jetty.http.HttpHeader;
|
||||
import org.eclipse.jetty.http.HttpMethod;
|
||||
import org.eclipse.jetty.http.HttpParser;
|
||||
import org.eclipse.jetty.http.HttpScheme;
|
||||
import org.eclipse.jetty.io.ByteBufferPool;
|
||||
import org.eclipse.jetty.io.ClientConnectionFactory;
|
||||
|
@ -147,6 +149,7 @@ public class HttpClient extends ContainerLifeCycle
|
|||
private boolean removeIdleDestinations = false;
|
||||
private boolean connectBlocking = false;
|
||||
private String name = getClass().getSimpleName() + "@" + Integer.toHexString(hashCode());
|
||||
private HttpCompliance httpCompliance = HttpCompliance.RFC7230;
|
||||
|
||||
/**
|
||||
* Creates a {@link HttpClient} instance that can perform requests to non-TLS destinations only
|
||||
|
@ -551,7 +554,7 @@ public class HttpClient extends ContainerLifeCycle
|
|||
protected boolean removeDestination(HttpDestination destination)
|
||||
{
|
||||
removeBean(destination);
|
||||
return destinations.remove(destination.getOrigin()) != null;
|
||||
return destinations.remove(destination.getOrigin(), destination);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -972,6 +975,25 @@ public class HttpClient extends ContainerLifeCycle
|
|||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the http compliance mode for parsing http responses.
|
||||
* The default http compliance level is {@link HttpCompliance#RFC7230} which is the latest HTTP/1.1 specification
|
||||
*/
|
||||
public HttpCompliance getHttpCompliance()
|
||||
{
|
||||
return httpCompliance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the http compliance mode for parsing http responses.
|
||||
* This affect how weak the {@link HttpParser} parses http responses and which http protocol level is supported
|
||||
* @param httpCompliance The compliance level which is used to actually parse http responses
|
||||
*/
|
||||
public void setHttpCompliance(HttpCompliance httpCompliance)
|
||||
{
|
||||
this.httpCompliance = httpCompliance;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return whether request events must be strictly ordered
|
||||
* @see #setStrictEventOrdering(boolean)
|
||||
|
|
|
@ -68,12 +68,8 @@ public abstract class HttpConnection implements Connection
|
|||
HttpRequest httpRequest = (HttpRequest)request;
|
||||
|
||||
ArrayList<Response.ResponseListener> listeners = new ArrayList<>(httpRequest.getResponseListeners());
|
||||
if (httpRequest.getTimeout() > 0)
|
||||
{
|
||||
TimeoutCompleteListener timeoutListener = new TimeoutCompleteListener(httpRequest);
|
||||
timeoutListener.schedule(getHttpClient().getScheduler());
|
||||
listeners.add(timeoutListener);
|
||||
}
|
||||
|
||||
httpRequest.sent();
|
||||
if (listener != null)
|
||||
listeners.add(listener);
|
||||
|
||||
|
|
|
@ -26,6 +26,9 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.eclipse.jetty.client.api.Connection;
|
||||
import org.eclipse.jetty.client.api.Destination;
|
||||
|
@ -34,6 +37,7 @@ import org.eclipse.jetty.client.api.Response;
|
|||
import org.eclipse.jetty.http.HttpField;
|
||||
import org.eclipse.jetty.http.HttpHeader;
|
||||
import org.eclipse.jetty.io.ClientConnectionFactory;
|
||||
import org.eclipse.jetty.io.CyclicTimeout;
|
||||
import org.eclipse.jetty.util.BlockingArrayQueue;
|
||||
import org.eclipse.jetty.util.Callback;
|
||||
import org.eclipse.jetty.util.HostPort;
|
||||
|
@ -45,6 +49,7 @@ import org.eclipse.jetty.util.component.Dumpable;
|
|||
import org.eclipse.jetty.util.component.DumpableCollection;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
import org.eclipse.jetty.util.thread.Scheduler;
|
||||
import org.eclipse.jetty.util.thread.Sweeper;
|
||||
|
||||
@ManagedObject
|
||||
|
@ -60,6 +65,7 @@ public abstract class HttpDestination extends ContainerLifeCycle implements Dest
|
|||
private final ProxyConfiguration.Proxy proxy;
|
||||
private final ClientConnectionFactory connectionFactory;
|
||||
private final HttpField hostField;
|
||||
private final TimeoutTask timeout;
|
||||
private ConnectionPool connectionPool;
|
||||
|
||||
public HttpDestination(HttpClient client, Origin origin)
|
||||
|
@ -71,6 +77,8 @@ public abstract class HttpDestination extends ContainerLifeCycle implements Dest
|
|||
|
||||
this.requestNotifier = new RequestNotifier(client);
|
||||
this.responseNotifier = new ResponseNotifier();
|
||||
|
||||
this.timeout = new TimeoutTask(client.getScheduler());
|
||||
|
||||
ProxyConfiguration proxyConfig = client.getProxyConfiguration();
|
||||
proxy = proxyConfig.match(origin);
|
||||
|
@ -228,7 +236,7 @@ public abstract class HttpDestination extends ContainerLifeCycle implements Dest
|
|||
}
|
||||
|
||||
protected void send(HttpRequest request, List<Response.ResponseListener> listeners)
|
||||
{
|
||||
{
|
||||
if (!getScheme().equalsIgnoreCase(request.getScheme()))
|
||||
throw new IllegalArgumentException("Invalid request scheme " + request.getScheme() + " for destination " + this);
|
||||
if (!getHost().equalsIgnoreCase(request.getHost()))
|
||||
|
@ -246,6 +254,10 @@ public abstract class HttpDestination extends ContainerLifeCycle implements Dest
|
|||
{
|
||||
if (enqueue(exchanges, exchange))
|
||||
{
|
||||
long expiresAt = request.getTimeoutAt();
|
||||
if (expiresAt != -1)
|
||||
timeout.schedule(expiresAt);
|
||||
|
||||
if (!client.isRunning() && exchanges.remove(exchange))
|
||||
{
|
||||
request.abort(new RejectedExecutionException(client + " is stopping"));
|
||||
|
@ -370,6 +382,7 @@ public abstract class HttpDestination extends ContainerLifeCycle implements Dest
|
|||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Closed {}", this);
|
||||
connectionPool.close();
|
||||
timeout.destroy();
|
||||
}
|
||||
|
||||
public void release(Connection connection)
|
||||
|
@ -470,4 +483,59 @@ public abstract class HttpDestination extends ContainerLifeCycle implements Dest
|
|||
exchanges.size(),
|
||||
connectionPool);
|
||||
}
|
||||
|
||||
// The TimeoutTask that expires when the next check of expiry is needed
|
||||
private class TimeoutTask extends CyclicTimeout
|
||||
{
|
||||
private final AtomicLong nextTimeout = new AtomicLong(Long.MAX_VALUE);
|
||||
|
||||
private TimeoutTask(Scheduler scheduler)
|
||||
{
|
||||
super(scheduler);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onTimeoutExpired()
|
||||
{
|
||||
nextTimeout.set(Long.MAX_VALUE);
|
||||
long now = System.nanoTime();
|
||||
long nextExpiresAt = Long.MAX_VALUE;
|
||||
|
||||
// Check all queued exchanges for those that have expired
|
||||
// and to determine when the next check must be.
|
||||
for (HttpExchange exchange : exchanges)
|
||||
{
|
||||
HttpRequest request = exchange.getRequest();
|
||||
long expiresAt = request.getTimeoutAt();
|
||||
if (expiresAt == -1)
|
||||
continue;
|
||||
if (expiresAt <= now)
|
||||
request.abort(new TimeoutException("Total timeout " + request.getTimeout() + " ms elapsed"));
|
||||
else if (expiresAt < nextExpiresAt)
|
||||
nextExpiresAt = expiresAt;
|
||||
}
|
||||
|
||||
if (nextExpiresAt < Long.MAX_VALUE && client.isRunning())
|
||||
schedule(nextExpiresAt);
|
||||
}
|
||||
|
||||
private void schedule(long expiresAt)
|
||||
{
|
||||
// Schedule a timeout for the soonest any known exchange can expire.
|
||||
// If subsequently that exchange is removed from the queue, the
|
||||
// timeout is not cancelled, instead the entire queue is swept
|
||||
// for expired exchanges and a new timeout is set.
|
||||
long timeoutAt = nextTimeout.getAndUpdate(e -> Math.min(e, expiresAt));
|
||||
if (timeoutAt != expiresAt)
|
||||
{
|
||||
long delay = expiresAt - System.nanoTime();
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Scheduled timeout in {} ms", TimeUnit.NANOSECONDS.toMillis(delay));
|
||||
if (delay <= 0)
|
||||
onTimeoutExpired();
|
||||
else
|
||||
schedule(delay, TimeUnit.NANOSECONDS);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,6 +78,7 @@ public class HttpRequest implements Request
|
|||
private HttpVersion version = HttpVersion.HTTP_1_1;
|
||||
private long idleTimeout;
|
||||
private long timeout;
|
||||
private long timeoutAt;
|
||||
private ContentProvider content;
|
||||
private boolean followRedirects;
|
||||
private List<HttpCookie> cookies;
|
||||
|
@ -677,14 +678,31 @@ public class HttpRequest implements Request
|
|||
|
||||
try
|
||||
{
|
||||
long timeout = getTimeout();
|
||||
if (timeout <= 0)
|
||||
return listener.get();
|
||||
return listener.get();
|
||||
}
|
||||
catch (ExecutionException x)
|
||||
{
|
||||
// Previously this method used a timed get on the future, which was in a race
|
||||
// with the timeouts implemented in HttpDestination and HttpConnection. The change to
|
||||
// make those timeouts relative to the timestamp taken in sent() has made that race
|
||||
// less certain, so a timeout could be either a TimeoutException from the get() or
|
||||
// a ExecutionException(TimeoutException) from the HttpDestination/HttpConnection.
|
||||
// We now do not do a timed get and just rely on the HttpDestination/HttpConnection
|
||||
// timeouts. This has the affect of changing this method from mostly throwing a
|
||||
// TimeoutException to always throwing a ExecutionException(TimeoutException).
|
||||
// Thus for backwards compatibility we unwrap the timeout exception here
|
||||
if (x.getCause() instanceof TimeoutException)
|
||||
{
|
||||
TimeoutException t = (TimeoutException) (x.getCause());
|
||||
abort(t);
|
||||
throw t;
|
||||
}
|
||||
|
||||
return listener.get(timeout, TimeUnit.MILLISECONDS);
|
||||
abort(x);
|
||||
throw x;
|
||||
}
|
||||
catch (Throwable x)
|
||||
{
|
||||
{
|
||||
// Differently from the Future, the semantic of this method is that if
|
||||
// the send() is interrupted or times out, we abort the request.
|
||||
abort(x);
|
||||
|
@ -695,34 +713,32 @@ public class HttpRequest implements Request
|
|||
@Override
|
||||
public void send(Response.CompleteListener listener)
|
||||
{
|
||||
TimeoutCompleteListener timeoutListener = null;
|
||||
try
|
||||
{
|
||||
if (getTimeout() > 0)
|
||||
{
|
||||
timeoutListener = new TimeoutCompleteListener(this);
|
||||
timeoutListener.schedule(client.getScheduler());
|
||||
responseListeners.add(timeoutListener);
|
||||
}
|
||||
send(this, listener);
|
||||
}
|
||||
catch (Throwable x)
|
||||
{
|
||||
// Do not leak the scheduler task if we
|
||||
// can't even start sending the request.
|
||||
if (timeoutListener != null)
|
||||
timeoutListener.cancel();
|
||||
throw x;
|
||||
}
|
||||
send(this, listener);
|
||||
}
|
||||
|
||||
|
||||
private void send(HttpRequest request, Response.CompleteListener listener)
|
||||
{
|
||||
if (listener != null)
|
||||
responseListeners.add(listener);
|
||||
sent();
|
||||
client.send(request, responseListeners);
|
||||
}
|
||||
|
||||
void sent()
|
||||
{
|
||||
long timeout = getTimeout();
|
||||
timeoutAt = timeout > 0 ? System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(timeout) : -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The nanoTime at which the timeout expires or -1 if there is no timeout.
|
||||
* @see #timeout(long, TimeUnit)
|
||||
*/
|
||||
long getTimeoutAt()
|
||||
{
|
||||
return timeoutAt;
|
||||
}
|
||||
|
||||
protected List<Response.ResponseListener> getResponseListeners()
|
||||
{
|
||||
return responseListeners;
|
||||
|
|
|
@ -25,60 +25,56 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
import org.eclipse.jetty.client.api.Request;
|
||||
import org.eclipse.jetty.client.api.Response;
|
||||
import org.eclipse.jetty.client.api.Result;
|
||||
import org.eclipse.jetty.io.CyclicTimeout;
|
||||
import org.eclipse.jetty.util.component.Destroyable;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
import org.eclipse.jetty.util.thread.Scheduler;
|
||||
|
||||
public class TimeoutCompleteListener implements Response.CompleteListener, Runnable
|
||||
public class TimeoutCompleteListener extends CyclicTimeout implements Response.CompleteListener
|
||||
{
|
||||
private static final Logger LOG = Log.getLogger(TimeoutCompleteListener.class);
|
||||
|
||||
private final AtomicReference<Scheduler.Task> task = new AtomicReference<>();
|
||||
private final Request request;
|
||||
private final AtomicReference<Request> request = new AtomicReference<>();
|
||||
|
||||
public TimeoutCompleteListener(Request request)
|
||||
public TimeoutCompleteListener(Scheduler scheduler)
|
||||
{
|
||||
this.request = request;
|
||||
super(scheduler);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onTimeoutExpired()
|
||||
{
|
||||
Request request = this.request.getAndSet(null);
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Total timeout {} ms elapsed for {}", request.getTimeout(), request);
|
||||
if (request != null)
|
||||
request.abort(new TimeoutException("Total timeout " + request.getTimeout() + " ms elapsed"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
cancel();
|
||||
}
|
||||
|
||||
public boolean schedule(Scheduler scheduler)
|
||||
{
|
||||
long timeout = request.getTimeout();
|
||||
Scheduler.Task task = scheduler.schedule(this, timeout, TimeUnit.MILLISECONDS);
|
||||
Scheduler.Task existing = this.task.getAndSet(task);
|
||||
if (existing != null)
|
||||
Request request = this.request.getAndSet(null);
|
||||
if (request != null)
|
||||
{
|
||||
existing.cancel();
|
||||
cancel();
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Scheduled timeout task {} in {} ms for {}", task, timeout, request);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Executing timeout task {} for {}", task, request);
|
||||
request.abort(new TimeoutException("Total timeout " + request.getTimeout() + " ms elapsed"));
|
||||
}
|
||||
|
||||
public void cancel()
|
||||
{
|
||||
Scheduler.Task task = this.task.getAndSet(null);
|
||||
if (task != null)
|
||||
{
|
||||
boolean cancelled = task.cancel();
|
||||
boolean cancelled = cancel();
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Cancelled (successfully: {}) timeout task {}", cancelled, task);
|
||||
LOG.debug("Cancelled ({}) timeout for {}", cancelled, request);
|
||||
}
|
||||
}
|
||||
|
||||
void schedule(HttpRequest request, long timeoutAt)
|
||||
{
|
||||
if (this.request.compareAndSet(null, request))
|
||||
{
|
||||
long delay = timeoutAt - System.nanoTime();
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Scheduled timeout in {} ms for {}", TimeUnit.NANOSECONDS.toMillis(delay), request);
|
||||
if (delay <= 0)
|
||||
onTimeoutExpired();
|
||||
else
|
||||
schedule(delay, TimeUnit.NANOSECONDS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,14 +79,10 @@ public class HttpChannelOverHTTP extends HttpChannel
|
|||
}
|
||||
|
||||
@Override
|
||||
public void send()
|
||||
public void send(HttpExchange exchange)
|
||||
{
|
||||
HttpExchange exchange = getHttpExchange();
|
||||
if (exchange != null)
|
||||
{
|
||||
outMessages.increment();
|
||||
sender.send( exchange );
|
||||
}
|
||||
outMessages.increment();
|
||||
sender.send( exchange );
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -187,9 +187,8 @@ public class HttpConnectionOverHTTP extends AbstractConnection implements Connec
|
|||
if (closed.compareAndSet(false, true))
|
||||
{
|
||||
getHttpDestination().close(this);
|
||||
|
||||
abort(failure);
|
||||
|
||||
channel.destroy();
|
||||
getEndPoint().shutdownOutput();
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Shutdown {}", this);
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.eclipse.jetty.client.HttpExchange;
|
|||
import org.eclipse.jetty.client.HttpReceiver;
|
||||
import org.eclipse.jetty.client.HttpResponse;
|
||||
import org.eclipse.jetty.client.HttpResponseException;
|
||||
import org.eclipse.jetty.http.HttpCompliance;
|
||||
import org.eclipse.jetty.http.HttpField;
|
||||
import org.eclipse.jetty.http.HttpMethod;
|
||||
import org.eclipse.jetty.http.HttpParser;
|
||||
|
@ -38,13 +39,14 @@ import org.eclipse.jetty.util.CompletableCallback;
|
|||
|
||||
public class HttpReceiverOverHTTP extends HttpReceiver implements HttpParser.ResponseHandler
|
||||
{
|
||||
private final HttpParser parser = new HttpParser(this);
|
||||
private final HttpParser parser;
|
||||
private ByteBuffer buffer;
|
||||
private boolean shutdown;
|
||||
|
||||
public HttpReceiverOverHTTP(HttpChannelOverHTTP channel)
|
||||
{
|
||||
super(channel);
|
||||
parser = new HttpParser(this, -1, channel.getHttpDestination().getHttpClient().getHttpCompliance());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -689,9 +689,8 @@ public class HttpClientTest extends AbstractHttpClientServerTest
|
|||
.send();
|
||||
Assert.fail();
|
||||
}
|
||||
catch (ExecutionException expected)
|
||||
catch (TimeoutException expected)
|
||||
{
|
||||
Assert.assertTrue(expected.getCause() instanceof TimeoutException);
|
||||
}
|
||||
|
||||
// Make another request without specifying the idle timeout, should not fail
|
||||
|
|
|
@ -172,9 +172,7 @@ public class HttpClientUploadDuringServerShutdown
|
|||
public void send()
|
||||
{
|
||||
if (afterSetup.get())
|
||||
{
|
||||
associateLatch.countDown();
|
||||
}
|
||||
super.send();
|
||||
}
|
||||
};
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
package org.eclipse.jetty.client;
|
||||
|
||||
import java.nio.channels.Selector;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
@ -29,7 +30,6 @@ import org.eclipse.jetty.server.Handler;
|
|||
import org.eclipse.jetty.server.Server;
|
||||
import org.eclipse.jetty.server.ServerConnector;
|
||||
import org.eclipse.jetty.util.SocketAddressResolver;
|
||||
import org.eclipse.jetty.util.thread.Invocable;
|
||||
import org.eclipse.jetty.util.thread.QueuedThreadPool;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
|
@ -89,10 +89,10 @@ public class LivelockTest
|
|||
AtomicBoolean busy = new AtomicBoolean(true);
|
||||
|
||||
ManagedSelector clientSelector = client.getContainedBeans(ManagedSelector.class).stream().findAny().get();
|
||||
Runnable clientLivelock = new Invocable.NonBlocking()
|
||||
ManagedSelector.SelectorUpdate clientLivelock = new ManagedSelector.SelectorUpdate()
|
||||
{
|
||||
@Override
|
||||
public void run()
|
||||
public void update(Selector selector)
|
||||
{
|
||||
sleep(10);
|
||||
if (busy.get())
|
||||
|
@ -102,10 +102,10 @@ public class LivelockTest
|
|||
clientSelector.submit(clientLivelock);
|
||||
|
||||
ManagedSelector serverSelector = connector.getContainedBeans(ManagedSelector.class).stream().findAny().get();
|
||||
Runnable serverLivelock = new Invocable.NonBlocking()
|
||||
ManagedSelector.SelectorUpdate serverLivelock = new ManagedSelector.SelectorUpdate()
|
||||
{
|
||||
@Override
|
||||
public void run()
|
||||
public void update(Selector selector)
|
||||
{
|
||||
sleep(10);
|
||||
if (busy.get())
|
||||
|
|
|
@ -20,6 +20,8 @@ package org.eclipse.jetty.client.http;
|
|||
|
||||
import java.io.EOFException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -30,9 +32,9 @@ import org.eclipse.jetty.client.HttpExchange;
|
|||
import org.eclipse.jetty.client.HttpRequest;
|
||||
import org.eclipse.jetty.client.HttpResponseException;
|
||||
import org.eclipse.jetty.client.Origin;
|
||||
import org.eclipse.jetty.client.api.Connection;
|
||||
import org.eclipse.jetty.client.api.Response;
|
||||
import org.eclipse.jetty.client.util.FutureResponseListener;
|
||||
import org.eclipse.jetty.http.HttpCompliance;
|
||||
import org.eclipse.jetty.http.HttpFields;
|
||||
import org.eclipse.jetty.http.HttpHeader;
|
||||
import org.eclipse.jetty.http.HttpVersion;
|
||||
|
@ -44,21 +46,38 @@ import org.junit.Assert;
|
|||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
public class HttpReceiverOverHTTPTest
|
||||
{
|
||||
{
|
||||
@Rule
|
||||
public final TestTracker tracker = new TestTracker();
|
||||
|
||||
@Parameterized.Parameter(0)
|
||||
public HttpCompliance compliance;
|
||||
|
||||
private HttpClient client;
|
||||
private HttpDestinationOverHTTP destination;
|
||||
private ByteArrayEndPoint endPoint;
|
||||
private HttpConnectionOverHTTP connection;
|
||||
|
||||
|
||||
@Parameterized.Parameters
|
||||
public static Collection<Object[]> parameters() throws Exception
|
||||
{
|
||||
return Arrays.asList(
|
||||
new Object[] { HttpCompliance.LEGACY },
|
||||
new Object[] { HttpCompliance.RFC2616_LEGACY },
|
||||
new Object[] { HttpCompliance.RFC7230_LEGACY }
|
||||
);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void init() throws Exception
|
||||
{
|
||||
client = new HttpClient();
|
||||
client.setHttpCompliance(compliance);
|
||||
client.start();
|
||||
destination = new HttpDestinationOverHTTP(client, new Origin("http", "localhost", 8080));
|
||||
destination.start();
|
||||
|
|
|
@ -82,15 +82,11 @@ public class HttpChannelOverFCGI extends HttpChannel
|
|||
}
|
||||
|
||||
@Override
|
||||
public void send()
|
||||
public void send(HttpExchange exchange)
|
||||
{
|
||||
HttpExchange exchange = getHttpExchange();
|
||||
if (exchange != null)
|
||||
{
|
||||
version = exchange.getRequest().getVersion();
|
||||
idle.onOpen();
|
||||
sender.send(exchange);
|
||||
}
|
||||
version = exchange.getRequest().getVersion();
|
||||
idle.onOpen();
|
||||
sender.send(exchange);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -29,6 +29,7 @@ import java.util.concurrent.ConcurrentLinkedQueue;
|
|||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.eclipse.jetty.client.HttpChannel;
|
||||
import org.eclipse.jetty.client.HttpClient;
|
||||
import org.eclipse.jetty.client.HttpConnection;
|
||||
import org.eclipse.jetty.client.HttpDestination;
|
||||
|
@ -207,11 +208,17 @@ public class HttpConnectionOverFCGI extends AbstractConnection implements Connec
|
|||
|
||||
protected void release(HttpChannelOverFCGI channel)
|
||||
{
|
||||
if (activeChannels.remove(channel.getRequest()) != null)
|
||||
if (activeChannels.remove(channel.getRequest()) == null)
|
||||
{
|
||||
channel.destroy();
|
||||
}
|
||||
else
|
||||
{
|
||||
channel.setRequest(0);
|
||||
// Recycle only non-failed channels.
|
||||
if (!channel.isFailed())
|
||||
if (channel.isFailed())
|
||||
channel.destroy();
|
||||
else
|
||||
idleChannels.offer(channel);
|
||||
destination.release(this);
|
||||
}
|
||||
|
@ -263,16 +270,27 @@ public class HttpConnectionOverFCGI extends AbstractConnection implements Connec
|
|||
HttpExchange exchange = channel.getHttpExchange();
|
||||
if (exchange != null)
|
||||
exchange.getRequest().abort(failure);
|
||||
channel.destroy();
|
||||
}
|
||||
activeChannels.clear();
|
||||
idleChannels.clear();
|
||||
|
||||
HttpChannel channel = idleChannels.poll();
|
||||
while (channel!=null)
|
||||
{
|
||||
channel.destroy();
|
||||
channel = idleChannels.poll();
|
||||
}
|
||||
}
|
||||
|
||||
private void failAndClose(Throwable failure)
|
||||
{
|
||||
boolean result = false;
|
||||
for (HttpChannelOverFCGI channel : activeChannels.values())
|
||||
{
|
||||
result |= channel.responseFailure(failure);
|
||||
channel.destroy();
|
||||
}
|
||||
|
||||
if (result)
|
||||
close(failure);
|
||||
}
|
||||
|
@ -296,7 +314,7 @@ public class HttpConnectionOverFCGI extends AbstractConnection implements Connec
|
|||
}
|
||||
}
|
||||
|
||||
protected HttpChannelOverFCGI provideHttpChannel(int id, Request request)
|
||||
protected HttpChannelOverFCGI acquireHttpChannel(int id, Request request)
|
||||
{
|
||||
HttpChannelOverFCGI channel = idleChannels.poll();
|
||||
if (channel == null)
|
||||
|
@ -335,7 +353,7 @@ public class HttpConnectionOverFCGI extends AbstractConnection implements Connec
|
|||
|
||||
// FCGI may be multiplexed, so one channel for each exchange.
|
||||
int id = acquireRequest();
|
||||
HttpChannelOverFCGI channel = provideHttpChannel(id, request);
|
||||
HttpChannelOverFCGI channel = acquireHttpChannel(id, request);
|
||||
activeChannels.put(id, channel);
|
||||
|
||||
return send(channel, exchange);
|
||||
|
|
|
@ -442,9 +442,8 @@ public class HttpClientTest extends AbstractHttpClientServerTest
|
|||
.send();
|
||||
Assert.fail();
|
||||
}
|
||||
catch (ExecutionException expected)
|
||||
catch (TimeoutException expected)
|
||||
{
|
||||
Assert.assertTrue(expected.getCause() instanceof TimeoutException);
|
||||
}
|
||||
|
||||
// Make another request without specifying the idle timeout, should not fail
|
||||
|
|
|
@ -18,15 +18,175 @@
|
|||
|
||||
package org.eclipse.jetty.http;
|
||||
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
|
||||
/**
|
||||
* HTTP compliance modes:
|
||||
* <dl>
|
||||
* <dt>RFC7230</dt><dd>(default) Compliance with RFC7230</dd>
|
||||
* <dt>RFC2616</dt><dd>Wrapped/Continued headers and HTTP/0.9 supported</dd>
|
||||
* <dt>LEGACY</dt><dd>(aka STRICT) Adherence to Servlet Specification requirement for
|
||||
* exact case of header names, bypassing the header caches, which are case insensitive,
|
||||
* otherwise equivalent to RFC2616</dd>
|
||||
* HTTP compliance modes for Jetty HTTP parsing and handling.
|
||||
* A Compliance mode consists of a set of {@link HttpComplianceSection}s which are applied
|
||||
* when the mode is enabled.
|
||||
* <p>
|
||||
* Currently the set of modes is an enum and cannot be dynamically extended, but future major releases may convert this
|
||||
* to a class. To modify modes there are four custom modes that can be modified by setting the property
|
||||
* <code>org.eclipse.jetty.http.HttpCompliance.CUSTOMn</code> (where 'n' is '0', '1', '2' or '3'), to a comma separated
|
||||
* list of sections. The list should start with one of the following strings:<dl>
|
||||
* <dt>0</dt><dd>No {@link HttpComplianceSection}s</dd>
|
||||
* <dt>*</dt><dd>All {@link HttpComplianceSection}s</dd>
|
||||
* <dt>RFC2616</dt><dd>The set of {@link HttpComplianceSection}s application to https://tools.ietf.org/html/rfc2616,
|
||||
* but not https://tools.ietf.org/html/rfc7230</dd>
|
||||
* <dt>RFC7230</dt><dd>The set of {@link HttpComplianceSection}s application to https://tools.ietf.org/html/rfc7230</dd>
|
||||
* </dl>
|
||||
* The remainder of the list can contain then names of {@link HttpComplianceSection}s to include them in the mode, or prefixed
|
||||
* with a '-' to exclude thm from the mode. Note that Jetty's modes may have some historic minor differences from the strict
|
||||
* RFC compliance, for example the <code>RFC2616_LEGACY</code> HttpCompliance is defined as
|
||||
* <code>RFC2616,-FIELD_COLON,-METHOD_CASE_SENSITIVE</code>.
|
||||
* <p>
|
||||
* Note also that the {@link EnumSet} return by {@link HttpCompliance#sections()} is mutable, so that modes may
|
||||
* be altered in code and will affect all usages of the mode.
|
||||
*/
|
||||
public enum HttpCompliance { LEGACY, RFC2616, RFC7230 }
|
||||
public enum HttpCompliance // TODO in Jetty-10 convert this enum to a class so that extra custom modes can be defined dynamically
|
||||
{
|
||||
/** A Legacy compliance mode to match jetty's behavior prior to RFC2616 and RFC7230. It only
|
||||
* contains {@link HttpComplianceSection#METHOD_CASE_SENSITIVE}
|
||||
*/
|
||||
LEGACY(sectionsBySpec("0,METHOD_CASE_SENSITIVE")),
|
||||
|
||||
/** The legacy RFC2616 support, which incorrectly excludes
|
||||
* {@link HttpComplianceSection#METHOD_CASE_SENSITIVE}, {@link HttpComplianceSection#FIELD_COLON}
|
||||
*/
|
||||
RFC2616_LEGACY(sectionsBySpec("RFC2616,-FIELD_COLON,-METHOD_CASE_SENSITIVE")),
|
||||
|
||||
/** The strict RFC2616 support mode */
|
||||
RFC2616(sectionsBySpec("RFC2616")),
|
||||
|
||||
/** Jetty's current RFC7230 support, which incorrectly excludes {@link HttpComplianceSection#METHOD_CASE_SENSITIVE} */
|
||||
RFC7230_LEGACY(sectionsBySpec("RFC7230,-METHOD_CASE_SENSITIVE")),
|
||||
|
||||
/** The RFC7230 support mode */
|
||||
RFC7230(sectionsBySpec("RFC7230")),
|
||||
|
||||
/** Custom compliance mode that can be defined with System property <code>org.eclipse.jetty.http.HttpCompliance.CUSTOM0</code> */
|
||||
@Deprecated
|
||||
CUSTOM0(sectionsByProperty("CUSTOM0")),
|
||||
/** Custom compliance mode that can be defined with System property <code>org.eclipse.jetty.http.HttpCompliance.CUSTOM1</code> */
|
||||
@Deprecated
|
||||
CUSTOM1(sectionsByProperty("CUSTOM1")),
|
||||
/** Custom compliance mode that can be defined with System property <code>org.eclipse.jetty.http.HttpCompliance.CUSTOM2</code> */
|
||||
@Deprecated
|
||||
CUSTOM2(sectionsByProperty("CUSTOM2")),
|
||||
/** Custom compliance mode that can be defined with System property <code>org.eclipse.jetty.http.HttpCompliance.CUSTOM3</code> */
|
||||
@Deprecated
|
||||
CUSTOM3(sectionsByProperty("CUSTOM3"));
|
||||
|
||||
private static final Logger LOG = Log.getLogger(HttpParser.class);
|
||||
private static EnumSet<HttpComplianceSection> sectionsByProperty(String property)
|
||||
{
|
||||
String s = System.getProperty(HttpCompliance.class.getName()+property);
|
||||
return sectionsBySpec(s==null?"*":s);
|
||||
}
|
||||
|
||||
static EnumSet<HttpComplianceSection> sectionsBySpec(String spec)
|
||||
{
|
||||
EnumSet<HttpComplianceSection> sections;
|
||||
String[] elements = spec.split("\\s*,\\s*");
|
||||
int i=0;
|
||||
|
||||
switch(elements[i])
|
||||
{
|
||||
case "0":
|
||||
sections = EnumSet.noneOf(HttpComplianceSection.class);
|
||||
i++;
|
||||
break;
|
||||
|
||||
case "*":
|
||||
i++;
|
||||
sections = EnumSet.allOf(HttpComplianceSection.class);
|
||||
break;
|
||||
|
||||
case "RFC2616":
|
||||
sections = EnumSet.complementOf(EnumSet.of(
|
||||
HttpComplianceSection.NO_FIELD_FOLDING,
|
||||
HttpComplianceSection.NO_HTTP_9));
|
||||
i++;
|
||||
break;
|
||||
|
||||
case "RFC7230":
|
||||
i++;
|
||||
sections = EnumSet.allOf(HttpComplianceSection.class);
|
||||
break;
|
||||
|
||||
default:
|
||||
sections = EnumSet.noneOf(HttpComplianceSection.class);
|
||||
break;
|
||||
}
|
||||
|
||||
while(i<elements.length)
|
||||
{
|
||||
String element = elements[i++];
|
||||
boolean exclude = element.startsWith("-");
|
||||
if (exclude)
|
||||
element = element.substring(1);
|
||||
HttpComplianceSection section = HttpComplianceSection.valueOf(element);
|
||||
if (section==null)
|
||||
{
|
||||
LOG.warn("Unknown section '"+element+"' in HttpCompliance spec: "+spec);
|
||||
continue;
|
||||
}
|
||||
if (exclude)
|
||||
sections.remove(section);
|
||||
else
|
||||
sections.add(section);
|
||||
|
||||
}
|
||||
|
||||
return sections;
|
||||
}
|
||||
|
||||
private final static Map<HttpComplianceSection,HttpCompliance> __required = new HashMap<>();
|
||||
static
|
||||
{
|
||||
for (HttpComplianceSection section : HttpComplianceSection.values())
|
||||
{
|
||||
for (HttpCompliance compliance : HttpCompliance.values())
|
||||
{
|
||||
if (compliance.sections().contains(section))
|
||||
{
|
||||
__required.put(section,compliance);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param section The section to query
|
||||
* @return The minimum compliance required to enable the section.
|
||||
*/
|
||||
public static HttpCompliance requiredCompliance(HttpComplianceSection section)
|
||||
{
|
||||
return __required.get(section);
|
||||
}
|
||||
|
||||
private final EnumSet<HttpComplianceSection> _sections;
|
||||
|
||||
private HttpCompliance(EnumSet<HttpComplianceSection> sections)
|
||||
{
|
||||
_sections = sections;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the set of {@link HttpComplianceSection}s supported by this compliance mode. This set
|
||||
* is mutable, so it can be modified. Any modification will affect all usages of the mode
|
||||
* within the same {@link ClassLoader}.
|
||||
* @return The set of {@link HttpComplianceSection}s supported by this compliance mode.
|
||||
*/
|
||||
public EnumSet<HttpComplianceSection> sections()
|
||||
{
|
||||
return _sections;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
//
|
||||
// ========================================================================
|
||||
// Copyright (c) 1995-2018 Mort Bay Consulting Pty. Ltd.
|
||||
// ------------------------------------------------------------------------
|
||||
// All rights reserved. This program and the accompanying materials
|
||||
// are made available under the terms of the Eclipse Public License v1.0
|
||||
// and Apache License v2.0 which accompanies this distribution.
|
||||
//
|
||||
// The Eclipse Public License is available at
|
||||
// http://www.eclipse.org/legal/epl-v10.html
|
||||
//
|
||||
// The Apache License v2.0 is available at
|
||||
// http://www.opensource.org/licenses/apache2.0.php
|
||||
//
|
||||
// You may elect to redistribute this code under either of these licenses.
|
||||
// ========================================================================
|
||||
//
|
||||
|
||||
package org.eclipse.jetty.http;
|
||||
|
||||
/**
|
||||
*/
|
||||
public enum HttpComplianceSection
|
||||
{
|
||||
CASE_INSENSITIVE_FIELD_VALUE_CACHE("","Use case insensitive field value cache"),
|
||||
METHOD_CASE_SENSITIVE("https://tools.ietf.org/html/rfc7230#section-3.1.1","Method is case-sensitive"),
|
||||
FIELD_COLON("https://tools.ietf.org/html/rfc7230#section-3.2","Fields must have a Colon"),
|
||||
FIELD_NAME_CASE_INSENSITIVE("https://tools.ietf.org/html/rfc7230#section-3.2","Field name is case-insensitive"),
|
||||
NO_WS_AFTER_FIELD_NAME("https://tools.ietf.org/html/rfc7230#section-3.2.4","Whitespace not allowed after field name"),
|
||||
NO_FIELD_FOLDING("https://tools.ietf.org/html/rfc7230#section-3.2.4","No line Folding"),
|
||||
NO_HTTP_9("https://tools.ietf.org/html/rfc7230#appendix-A.2","No HTTP/0.9"),
|
||||
;
|
||||
|
||||
final String url;
|
||||
final String description;
|
||||
|
||||
HttpComplianceSection(String url,String description)
|
||||
{
|
||||
this.url = url;
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public String getURL()
|
||||
{
|
||||
return url;
|
||||
}
|
||||
|
||||
public String getDescription()
|
||||
{
|
||||
return description;
|
||||
}
|
||||
|
||||
}
|
|
@ -20,6 +20,7 @@ package org.eclipse.jetty.http;
|
|||
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.eclipse.jetty.util.ArrayTernaryTrie;
|
||||
import org.eclipse.jetty.util.ArrayTrie;
|
||||
import org.eclipse.jetty.util.StringUtil;
|
||||
import org.eclipse.jetty.util.Trie;
|
||||
|
@ -132,7 +133,15 @@ public enum HttpMethod
|
|||
}
|
||||
|
||||
/* ------------------------------------------------------------ */
|
||||
public final static Trie<HttpMethod> CACHE= new ArrayTrie<>();
|
||||
public final static Trie<HttpMethod> INSENSITIVE_CACHE= new ArrayTrie<>();
|
||||
static
|
||||
{
|
||||
for (HttpMethod method : HttpMethod.values())
|
||||
INSENSITIVE_CACHE.put(method.toString(),method);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------ */
|
||||
public final static Trie<HttpMethod> CACHE= new ArrayTernaryTrie<>(false);
|
||||
static
|
||||
{
|
||||
for (HttpMethod method : HttpMethod.values())
|
||||
|
|
|
@ -35,9 +35,6 @@ import org.eclipse.jetty.util.Utf8StringBuilder;
|
|||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
|
||||
import static org.eclipse.jetty.http.HttpCompliance.LEGACY;
|
||||
import static org.eclipse.jetty.http.HttpCompliance.RFC2616;
|
||||
import static org.eclipse.jetty.http.HttpCompliance.RFC7230;
|
||||
import static org.eclipse.jetty.http.HttpTokens.CARRIAGE_RETURN;
|
||||
import static org.eclipse.jetty.http.HttpTokens.LINE_FEED;
|
||||
import static org.eclipse.jetty.http.HttpTokens.SPACE;
|
||||
|
@ -114,6 +111,7 @@ public class HttpParser
|
|||
IN_NAME,
|
||||
VALUE,
|
||||
IN_VALUE,
|
||||
WS_AFTER_NAME,
|
||||
}
|
||||
|
||||
// States
|
||||
|
@ -152,6 +150,7 @@ public class HttpParser
|
|||
private final ComplianceHandler _complianceHandler;
|
||||
private final int _maxHeaderBytes;
|
||||
private final HttpCompliance _compliance;
|
||||
private final EnumSet<HttpComplianceSection> _compliances;
|
||||
private HttpField _field;
|
||||
private HttpHeader _header;
|
||||
private String _headerString;
|
||||
|
@ -189,16 +188,21 @@ public class HttpParser
|
|||
CACHE.put(new HttpField(HttpHeader.CONNECTION,HttpHeaderValue.UPGRADE));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT_ENCODING,"gzip"));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT_ENCODING,"gzip, deflate"));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT_ENCODING,"gzip, deflate, br"));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT_ENCODING,"gzip,deflate,sdch"));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT_LANGUAGE,"en-US,en;q=0.5"));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT_LANGUAGE,"en-GB,en-US;q=0.8,en;q=0.6"));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT_LANGUAGE,"en-AU,en;q=0.9,it-IT;q=0.8,it;q=0.7,en-GB;q=0.6,en-US;q=0.5"));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT_CHARSET,"ISO-8859-1,utf-8;q=0.7,*;q=0.3"));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT,"*/*"));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT,"image/png,image/*;q=0.8,*/*;q=0.5"));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT,"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT,"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"));
|
||||
CACHE.put(new HttpField(HttpHeader.ACCEPT_RANGES,HttpHeaderValue.BYTES));
|
||||
CACHE.put(new HttpField(HttpHeader.PRAGMA,"no-cache"));
|
||||
CACHE.put(new HttpField(HttpHeader.CACHE_CONTROL,"private, no-cache, no-cache=Set-Cookie, proxy-revalidate"));
|
||||
CACHE.put(new HttpField(HttpHeader.CACHE_CONTROL,"no-cache"));
|
||||
CACHE.put(new HttpField(HttpHeader.CACHE_CONTROL,"max-age=0"));
|
||||
CACHE.put(new HttpField(HttpHeader.CONTENT_LENGTH,"0"));
|
||||
CACHE.put(new HttpField(HttpHeader.CONTENT_ENCODING,"gzip"));
|
||||
CACHE.put(new HttpField(HttpHeader.CONTENT_ENCODING,"deflate"));
|
||||
|
@ -270,23 +274,25 @@ public class HttpParser
|
|||
/* ------------------------------------------------------------------------------- */
|
||||
public HttpParser(RequestHandler handler,int maxHeaderBytes,HttpCompliance compliance)
|
||||
{
|
||||
_handler=handler;
|
||||
_requestHandler=handler;
|
||||
_responseHandler=null;
|
||||
_maxHeaderBytes=maxHeaderBytes;
|
||||
_compliance=compliance==null?compliance():compliance;
|
||||
_complianceHandler=(ComplianceHandler)(handler instanceof ComplianceHandler?handler:null);
|
||||
this(handler,null,maxHeaderBytes,compliance==null?compliance():compliance);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------------- */
|
||||
public HttpParser(ResponseHandler handler,int maxHeaderBytes,HttpCompliance compliance)
|
||||
{
|
||||
_handler=handler;
|
||||
_requestHandler=null;
|
||||
_responseHandler=handler;
|
||||
this(null,handler,maxHeaderBytes,compliance==null?compliance():compliance);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------------- */
|
||||
private HttpParser(RequestHandler requestHandler,ResponseHandler responseHandler,int maxHeaderBytes,HttpCompliance compliance)
|
||||
{
|
||||
_handler=requestHandler!=null?requestHandler:responseHandler;
|
||||
_requestHandler=requestHandler;
|
||||
_responseHandler=responseHandler;
|
||||
_maxHeaderBytes=maxHeaderBytes;
|
||||
_compliance=compliance==null?compliance():compliance;
|
||||
_complianceHandler=(ComplianceHandler)(handler instanceof ComplianceHandler?handler:null);
|
||||
_compliance=compliance;
|
||||
_compliances=compliance.sections();
|
||||
_complianceHandler=(ComplianceHandler)(_handler instanceof ComplianceHandler?_handler:null);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------------- */
|
||||
|
@ -297,27 +303,36 @@ public class HttpParser
|
|||
|
||||
/* ------------------------------------------------------------------------------- */
|
||||
/** Check RFC compliance violation
|
||||
* @param compliance The compliance level violated
|
||||
* @param violation The compliance section violation
|
||||
* @param reason The reason for the violation
|
||||
* @return True if the current compliance level is set so as to Not allow this violation
|
||||
*/
|
||||
protected boolean complianceViolation(HttpCompliance compliance,String reason)
|
||||
protected boolean complianceViolation(HttpComplianceSection violation, String reason)
|
||||
{
|
||||
if (_complianceHandler==null)
|
||||
return _compliance.ordinal()>=compliance.ordinal();
|
||||
if (_compliance.ordinal()<compliance.ordinal())
|
||||
{
|
||||
_complianceHandler.onComplianceViolation(_compliance,compliance,reason);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
if (_compliances.contains(violation))
|
||||
return true;
|
||||
|
||||
if (_complianceHandler!=null)
|
||||
_complianceHandler.onComplianceViolation(_compliance,violation,reason);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------------- */
|
||||
protected void handleViolation(HttpComplianceSection section,String reason)
|
||||
{
|
||||
if (_complianceHandler!=null)
|
||||
_complianceHandler.onComplianceViolation(_compliance,section,reason);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------------- */
|
||||
protected String caseInsensitiveHeader(String orig, String normative)
|
||||
{
|
||||
return (_compliance!=LEGACY || orig.equals(normative) || complianceViolation(RFC2616,"https://tools.ietf.org/html/rfc2616#section-4.2 case sensitive header: "+orig))
|
||||
?normative:orig;
|
||||
if (_compliances.contains(HttpComplianceSection.FIELD_NAME_CASE_INSENSITIVE))
|
||||
return normative;
|
||||
if (!orig.equals(normative))
|
||||
handleViolation(HttpComplianceSection.FIELD_NAME_CASE_INSENSITIVE,orig);
|
||||
return orig;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------------- */
|
||||
|
@ -631,26 +646,24 @@ public class HttpParser
|
|||
_length=_string.length();
|
||||
_methodString=takeString();
|
||||
|
||||
// TODO #1966 This cache lookup is case insensitive when it should be case sensitive by RFC2616, RFC7230
|
||||
HttpMethod method=HttpMethod.CACHE.get(_methodString);
|
||||
if (method!=null)
|
||||
if (_compliances.contains(HttpComplianceSection.METHOD_CASE_SENSITIVE))
|
||||
{
|
||||
switch(_compliance)
|
||||
{
|
||||
case LEGACY:
|
||||
// Legacy correctly allows case sensitive header;
|
||||
break;
|
||||
|
||||
case RFC2616:
|
||||
case RFC7230:
|
||||
if (!method.asString().equals(_methodString) && _complianceHandler!=null)
|
||||
_complianceHandler.onComplianceViolation(_compliance,HttpCompliance.LEGACY,
|
||||
"https://tools.ietf.org/html/rfc7230#section-3.1.1 case insensitive method "+_methodString);
|
||||
// TODO Good to used cached version for faster equals checking, but breaks case sensitivity because cache is insensitive
|
||||
_methodString = method.asString();
|
||||
break;
|
||||
}
|
||||
HttpMethod method=HttpMethod.CACHE.get(_methodString);
|
||||
if (method!=null)
|
||||
_methodString = method.asString();
|
||||
}
|
||||
else
|
||||
{
|
||||
HttpMethod method=HttpMethod.INSENSITIVE_CACHE.get(_methodString);
|
||||
|
||||
if (method!=null)
|
||||
{
|
||||
if (!method.asString().equals(_methodString))
|
||||
handleViolation(HttpComplianceSection.METHOD_CASE_SENSITIVE,_methodString);
|
||||
_methodString = method.asString();
|
||||
}
|
||||
}
|
||||
|
||||
setState(State.SPACE1);
|
||||
}
|
||||
else if (b < SPACE)
|
||||
|
@ -751,7 +764,7 @@ public class HttpParser
|
|||
else if (b < HttpTokens.SPACE && b>=0)
|
||||
{
|
||||
// HTTP/0.9
|
||||
if (complianceViolation(RFC7230,"https://tools.ietf.org/html/rfc7230#appendix-A.2 HTTP/0.9"))
|
||||
if (complianceViolation(HttpComplianceSection.NO_HTTP_9,"No request version"))
|
||||
throw new BadMessageException("HTTP/0.9 not supported");
|
||||
handle=_requestHandler.startRequest(_methodString,_uri.toString(), HttpVersion.HTTP_0_9);
|
||||
setState(State.END);
|
||||
|
@ -818,7 +831,7 @@ public class HttpParser
|
|||
else
|
||||
{
|
||||
// HTTP/0.9
|
||||
if (complianceViolation(RFC7230,"https://tools.ietf.org/html/rfc7230#appendix-A.2 HTTP/0.9"))
|
||||
if (complianceViolation(HttpComplianceSection.NO_HTTP_9,"No request version"))
|
||||
throw new BadMessageException("HTTP/0.9 not supported");
|
||||
|
||||
handle=_requestHandler.startRequest(_methodString,_uri.toString(), HttpVersion.HTTP_0_9);
|
||||
|
@ -936,7 +949,9 @@ public class HttpParser
|
|||
_host=true;
|
||||
if (!(_field instanceof HostPortHttpField) && _valueString!=null && !_valueString.isEmpty())
|
||||
{
|
||||
_field=new HostPortHttpField(_header,caseInsensitiveHeader(_headerString,_header.asString()),_valueString);
|
||||
_field=new HostPortHttpField(_header,
|
||||
_compliances.contains(HttpComplianceSection.FIELD_NAME_CASE_INSENSITIVE)?_header.asString():_headerString,
|
||||
_valueString);
|
||||
add_to_connection_trie=_fieldCache!=null;
|
||||
}
|
||||
break;
|
||||
|
@ -1033,7 +1048,7 @@ public class HttpParser
|
|||
case HttpTokens.SPACE:
|
||||
case HttpTokens.TAB:
|
||||
{
|
||||
if (complianceViolation(RFC7230,"https://tools.ietf.org/html/rfc7230#section-3.2.4 folding"))
|
||||
if (complianceViolation(HttpComplianceSection.NO_FIELD_FOLDING,_headerString))
|
||||
throw new BadMessageException(HttpStatus.BAD_REQUEST_400,"Header Folding");
|
||||
|
||||
// header value without name - continuation?
|
||||
|
@ -1143,36 +1158,39 @@ public class HttpParser
|
|||
if (buffer.hasRemaining())
|
||||
{
|
||||
// Try a look ahead for the known header name and value.
|
||||
HttpField field=_fieldCache==null?null:_fieldCache.getBest(buffer,-1,buffer.remaining());
|
||||
if (field==null)
|
||||
field=CACHE.getBest(buffer,-1,buffer.remaining());
|
||||
HttpField cached_field=_fieldCache==null?null:_fieldCache.getBest(buffer,-1,buffer.remaining());
|
||||
if (cached_field==null)
|
||||
cached_field=CACHE.getBest(buffer,-1,buffer.remaining());
|
||||
|
||||
if (field!=null)
|
||||
if (cached_field!=null)
|
||||
{
|
||||
final String n;
|
||||
final String v;
|
||||
String n = cached_field.getName();
|
||||
String v = cached_field.getValue();
|
||||
|
||||
if (_compliance==LEGACY)
|
||||
if (!_compliances.contains(HttpComplianceSection.FIELD_NAME_CASE_INSENSITIVE))
|
||||
{
|
||||
// Have to get the fields exactly from the buffer to match case
|
||||
String fn=field.getName();
|
||||
n=caseInsensitiveHeader(BufferUtil.toString(buffer,buffer.position()-1,fn.length(),StandardCharsets.US_ASCII),fn);
|
||||
String fv=field.getValue();
|
||||
if (fv==null)
|
||||
v=null;
|
||||
else
|
||||
String en = BufferUtil.toString(buffer,buffer.position()-1,n.length(),StandardCharsets.US_ASCII);
|
||||
if (!n.equals(en))
|
||||
{
|
||||
v=caseInsensitiveHeader(BufferUtil.toString(buffer,buffer.position()+fn.length()+1,fv.length(),StandardCharsets.ISO_8859_1),fv);
|
||||
field=new HttpField(field.getHeader(),n,v);
|
||||
handleViolation(HttpComplianceSection.FIELD_NAME_CASE_INSENSITIVE,en);
|
||||
n = en;
|
||||
cached_field = new HttpField(cached_field.getHeader(),n,v);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
||||
if (v!=null && !_compliances.contains(HttpComplianceSection.CASE_INSENSITIVE_FIELD_VALUE_CACHE))
|
||||
{
|
||||
n=field.getName();
|
||||
v=field.getValue();
|
||||
String ev = BufferUtil.toString(buffer,buffer.position()+n.length()+1,v.length(),StandardCharsets.ISO_8859_1);
|
||||
if (!v.equals(ev))
|
||||
{
|
||||
handleViolation(HttpComplianceSection.CASE_INSENSITIVE_FIELD_VALUE_CACHE,ev+"!="+v);
|
||||
v = ev;
|
||||
cached_field = new HttpField(cached_field.getHeader(),n,v);
|
||||
}
|
||||
}
|
||||
|
||||
_header=field.getHeader();
|
||||
|
||||
_header=cached_field.getHeader();
|
||||
_headerString=n;
|
||||
|
||||
if (v==null)
|
||||
|
@ -1192,7 +1210,7 @@ public class HttpParser
|
|||
|
||||
if (peek==HttpTokens.CARRIAGE_RETURN || peek==HttpTokens.LINE_FEED)
|
||||
{
|
||||
_field=field;
|
||||
_field=cached_field;
|
||||
_valueString=v;
|
||||
setState(FieldState.IN_VALUE);
|
||||
|
||||
|
@ -1221,12 +1239,28 @@ public class HttpParser
|
|||
_string.setLength(0);
|
||||
_string.append((char)b);
|
||||
_length=1;
|
||||
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case IN_NAME:
|
||||
if (b>HttpTokens.SPACE && b!=HttpTokens.COLON)
|
||||
{
|
||||
if (_header!=null)
|
||||
{
|
||||
setString(_header.asString());
|
||||
_header=null;
|
||||
_headerString=null;
|
||||
}
|
||||
|
||||
_string.append((char)b);
|
||||
_length=_string.length();
|
||||
break;
|
||||
}
|
||||
|
||||
// Fallthrough
|
||||
|
||||
case WS_AFTER_NAME:
|
||||
if (b==HttpTokens.COLON)
|
||||
{
|
||||
if (_headerString==null)
|
||||
|
@ -1239,23 +1273,8 @@ public class HttpParser
|
|||
setState(FieldState.VALUE);
|
||||
break;
|
||||
}
|
||||
|
||||
if (b>HttpTokens.SPACE)
|
||||
{
|
||||
if (_header!=null)
|
||||
{
|
||||
setString(_header.asString());
|
||||
_header=null;
|
||||
_headerString=null;
|
||||
}
|
||||
|
||||
_string.append((char)b);
|
||||
if (b>HttpTokens.SPACE)
|
||||
_length=_string.length();
|
||||
break;
|
||||
}
|
||||
|
||||
if (b==HttpTokens.LINE_FEED && !complianceViolation(RFC7230,"https://tools.ietf.org/html/rfc7230#section-3.2 No colon"))
|
||||
if (b==HttpTokens.LINE_FEED)
|
||||
{
|
||||
if (_headerString==null)
|
||||
{
|
||||
|
@ -1266,7 +1285,17 @@ public class HttpParser
|
|||
_valueString="";
|
||||
_length=-1;
|
||||
|
||||
setState(FieldState.FIELD);
|
||||
if (!complianceViolation(HttpComplianceSection.FIELD_COLON,_headerString))
|
||||
{
|
||||
setState(FieldState.FIELD);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//Ignore trailing whitespaces
|
||||
if (b==HttpTokens.SPACE && !complianceViolation(HttpComplianceSection.NO_WS_AFTER_FIELD_NAME,null))
|
||||
{
|
||||
setState(FieldState.WS_AFTER_NAME);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1800,7 +1829,13 @@ public class HttpParser
|
|||
/* ------------------------------------------------------------------------------- */
|
||||
public interface ComplianceHandler extends HttpHandler
|
||||
{
|
||||
public void onComplianceViolation(HttpCompliance compliance,HttpCompliance required,String reason);
|
||||
@Deprecated
|
||||
public default void onComplianceViolation(HttpCompliance compliance, HttpCompliance required, String reason) {}
|
||||
|
||||
public default void onComplianceViolation(HttpCompliance compliance, HttpComplianceSection violation, String details)
|
||||
{
|
||||
onComplianceViolation(compliance,HttpCompliance.requiredCompliance(violation), details);
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------------- */
|
||||
|
|
|
@ -18,9 +18,12 @@
|
|||
|
||||
package org.eclipse.jetty.http;
|
||||
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.eclipse.jetty.http.HttpParser.State;
|
||||
|
@ -33,6 +36,11 @@ import org.junit.Test;
|
|||
|
||||
public class HttpParserTest
|
||||
{
|
||||
static
|
||||
{
|
||||
HttpCompliance.CUSTOM0.sections().remove(HttpComplianceSection.NO_WS_AFTER_FIELD_NAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse until {@link State#END} state.
|
||||
* If the parser is already in the END state, then it is {@link HttpParser#reset()} and re-parsed.
|
||||
|
@ -112,7 +120,7 @@ public class HttpParserTest
|
|||
ByteBuffer buffer = BufferUtil.toBuffer("GET /999\r\n");
|
||||
|
||||
HttpParser.RequestHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler, HttpCompliance.RFC2616);
|
||||
HttpParser parser = new HttpParser(handler, HttpCompliance.RFC2616_LEGACY);
|
||||
parseAll(parser, buffer);
|
||||
|
||||
Assert.assertNull(_bad);
|
||||
|
@ -120,7 +128,7 @@ public class HttpParserTest
|
|||
Assert.assertEquals("/999", _uriOrStatus);
|
||||
Assert.assertEquals("HTTP/0.9", _versionOrReason);
|
||||
Assert.assertEquals(-1, _headers);
|
||||
Assert.assertThat(_complianceViolation, Matchers.containsString("0.9"));
|
||||
Assert.assertThat(_complianceViolation, contains(HttpComplianceSection.NO_HTTP_9));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -132,7 +140,7 @@ public class HttpParserTest
|
|||
HttpParser parser = new HttpParser(handler);
|
||||
parseAll(parser, buffer);
|
||||
Assert.assertEquals("HTTP/0.9 not supported", _bad);
|
||||
Assert.assertNull(_complianceViolation);
|
||||
Assert.assertThat(_complianceViolation,Matchers.empty());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -141,7 +149,7 @@ public class HttpParserTest
|
|||
ByteBuffer buffer = BufferUtil.toBuffer("POST /222 \r\n");
|
||||
|
||||
HttpParser.RequestHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler, HttpCompliance.RFC2616);
|
||||
HttpParser parser = new HttpParser(handler, HttpCompliance.RFC2616_LEGACY);
|
||||
parseAll(parser, buffer);
|
||||
|
||||
Assert.assertNull(_bad);
|
||||
|
@ -149,7 +157,7 @@ public class HttpParserTest
|
|||
Assert.assertEquals("/222", _uriOrStatus);
|
||||
Assert.assertEquals("HTTP/0.9", _versionOrReason);
|
||||
Assert.assertEquals(-1, _headers);
|
||||
Assert.assertThat(_complianceViolation, Matchers.containsString("0.9"));
|
||||
Assert.assertThat(_complianceViolation, contains(HttpComplianceSection.NO_HTTP_9));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -162,7 +170,7 @@ public class HttpParserTest
|
|||
HttpParser parser = new HttpParser(handler);
|
||||
parseAll(parser, buffer);
|
||||
Assert.assertEquals("HTTP/0.9 not supported", _bad);
|
||||
Assert.assertNull(_complianceViolation);
|
||||
Assert.assertThat(_complianceViolation,Matchers.empty());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -256,7 +264,7 @@ public class HttpParserTest
|
|||
"\r\n");
|
||||
|
||||
HttpParser.RequestHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler, HttpCompliance.RFC2616);
|
||||
HttpParser parser = new HttpParser(handler, HttpCompliance.RFC2616_LEGACY);
|
||||
parseAll(parser, buffer);
|
||||
|
||||
Assert.assertThat(_bad, Matchers.nullValue());
|
||||
|
@ -265,7 +273,7 @@ public class HttpParserTest
|
|||
Assert.assertEquals("Name", _hdr[1]);
|
||||
Assert.assertEquals("value extra", _val[1]);
|
||||
Assert.assertEquals(1, _headers);
|
||||
Assert.assertThat(_complianceViolation, Matchers.containsString("folding"));
|
||||
Assert.assertThat(_complianceViolation, contains(HttpComplianceSection.NO_FIELD_FOLDING));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -279,12 +287,12 @@ public class HttpParserTest
|
|||
"\r\n");
|
||||
|
||||
HttpParser.RequestHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler, 4096, HttpCompliance.RFC7230);
|
||||
HttpParser parser = new HttpParser(handler, 4096, HttpCompliance.RFC7230_LEGACY);
|
||||
parseAll(parser, buffer);
|
||||
|
||||
Assert.assertThat(_bad, Matchers.notNullValue());
|
||||
Assert.assertThat(_bad, Matchers.containsString("Header Folding"));
|
||||
Assert.assertNull(_complianceViolation);
|
||||
Assert.assertThat(_complianceViolation,Matchers.empty());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -297,7 +305,7 @@ public class HttpParserTest
|
|||
"\r\n");
|
||||
|
||||
HttpParser.RequestHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler, 4096, HttpCompliance.RFC7230);
|
||||
HttpParser parser = new HttpParser(handler, 4096, HttpCompliance.RFC7230_LEGACY);
|
||||
parseAll(parser, buffer);
|
||||
|
||||
Assert.assertThat(_bad, Matchers.notNullValue());
|
||||
|
@ -314,7 +322,7 @@ public class HttpParserTest
|
|||
"\r\n");
|
||||
|
||||
HttpParser.RequestHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler, 4096, HttpCompliance.RFC7230);
|
||||
HttpParser parser = new HttpParser(handler, 4096, HttpCompliance.RFC7230_LEGACY);
|
||||
parseAll(parser, buffer);
|
||||
|
||||
Assert.assertThat(_bad, Matchers.notNullValue());
|
||||
|
@ -350,34 +358,92 @@ public class HttpParserTest
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testNoColonLegacy() throws Exception
|
||||
public void testSpaceinNameCustom0() throws Exception
|
||||
{
|
||||
ByteBuffer buffer = BufferUtil.toBuffer(
|
||||
"GET / HTTP/1.0\r\n" +
|
||||
"Host: localhost\r\n" +
|
||||
"Name\r\n" +
|
||||
"Name with space: value\r\n" +
|
||||
"Other: value\r\n" +
|
||||
"\r\n");
|
||||
|
||||
HttpParser.RequestHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler,HttpCompliance.LEGACY);
|
||||
HttpParser parser = new HttpParser(handler,HttpCompliance.CUSTOM0);
|
||||
parseAll(parser, buffer);
|
||||
|
||||
Assert.assertThat(_bad, Matchers.containsString("Illegal character"));
|
||||
Assert.assertThat(_complianceViolation,contains(HttpComplianceSection.NO_WS_AFTER_FIELD_NAME));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNoColonCustom0() throws Exception
|
||||
{
|
||||
ByteBuffer buffer = BufferUtil.toBuffer(
|
||||
"GET / HTTP/1.0\r\n" +
|
||||
"Host: localhost\r\n" +
|
||||
"Name \r\n" +
|
||||
"Other: value\r\n" +
|
||||
"\r\n");
|
||||
|
||||
HttpParser.RequestHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler,HttpCompliance.CUSTOM0);
|
||||
parseAll(parser, buffer);
|
||||
|
||||
Assert.assertThat(_bad, Matchers.containsString("Illegal character"));
|
||||
Assert.assertThat(_complianceViolation,contains(HttpComplianceSection.NO_WS_AFTER_FIELD_NAME));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTrailingSpacesInHeaderNameInCustom0Mode() throws Exception
|
||||
{
|
||||
ByteBuffer buffer = BufferUtil.toBuffer(
|
||||
"HTTP/1.1 204 No Content\r\n" +
|
||||
"Access-Control-Allow-Headers : Origin\r\n" +
|
||||
"Other: value\r\n" +
|
||||
"\r\n");
|
||||
|
||||
HttpParser.ResponseHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler, -1, HttpCompliance.CUSTOM0);
|
||||
parseAll(parser, buffer);
|
||||
|
||||
Assert.assertTrue(_headerCompleted);
|
||||
Assert.assertTrue(_messageCompleted);
|
||||
Assert.assertEquals("GET", _methodOrVersion);
|
||||
Assert.assertEquals("/", _uriOrStatus);
|
||||
Assert.assertEquals("HTTP/1.0", _versionOrReason);
|
||||
Assert.assertEquals("Host", _hdr[0]);
|
||||
Assert.assertEquals("localhost", _val[0]);
|
||||
Assert.assertEquals("Name", _hdr[1]);
|
||||
Assert.assertEquals("", _val[1]);
|
||||
Assert.assertEquals("Other", _hdr[2]);
|
||||
Assert.assertEquals("value", _val[2]);
|
||||
Assert.assertEquals(2, _headers);
|
||||
Assert.assertThat(_complianceViolation, Matchers.containsString("No colon"));
|
||||
|
||||
Assert.assertEquals("HTTP/1.1", _methodOrVersion);
|
||||
Assert.assertEquals("204", _uriOrStatus);
|
||||
Assert.assertEquals("No Content", _versionOrReason);
|
||||
Assert.assertEquals(null, _content);
|
||||
|
||||
Assert.assertEquals(1, _headers);
|
||||
System.out.println(Arrays.asList(_hdr));
|
||||
System.out.println(Arrays.asList(_val));
|
||||
Assert.assertEquals("Access-Control-Allow-Headers", _hdr[0]);
|
||||
Assert.assertEquals("Origin", _val[0]);
|
||||
Assert.assertEquals("Other", _hdr[1]);
|
||||
Assert.assertEquals("value", _val[1]);
|
||||
|
||||
Assert.assertThat(_complianceViolation, contains(HttpComplianceSection.NO_WS_AFTER_FIELD_NAME));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testTrailingSpacesInHeaderNameNoCustom0() throws Exception
|
||||
{
|
||||
ByteBuffer buffer = BufferUtil.toBuffer(
|
||||
"HTTP/1.1 204 No Content\r\n" +
|
||||
"Access-Control-Allow-Headers : Origin\r\n" +
|
||||
"Other: value\r\n" +
|
||||
"\r\n");
|
||||
|
||||
HttpParser.ResponseHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler);
|
||||
parseAll(parser, buffer);
|
||||
|
||||
Assert.assertEquals("HTTP/1.1", _methodOrVersion);
|
||||
Assert.assertEquals("204", _uriOrStatus);
|
||||
Assert.assertEquals("No Content", _versionOrReason);
|
||||
Assert.assertThat(_bad, Matchers.containsString("Illegal character 0x20"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNoColon7230() throws Exception
|
||||
{
|
||||
|
@ -388,10 +454,10 @@ public class HttpParserTest
|
|||
"\r\n");
|
||||
|
||||
HttpParser.RequestHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler,HttpCompliance.RFC7230);
|
||||
HttpParser parser = new HttpParser(handler,HttpCompliance.RFC7230_LEGACY);
|
||||
parseAll(parser, buffer);
|
||||
Assert.assertThat(_bad, Matchers.containsString("Illegal character"));
|
||||
Assert.assertNull(_complianceViolation);
|
||||
Assert.assertThat(_complianceViolation,Matchers.empty());
|
||||
}
|
||||
|
||||
|
||||
|
@ -682,11 +748,11 @@ public class HttpParserTest
|
|||
"Connection: close\r\n" +
|
||||
"\r\n");
|
||||
HttpParser.RequestHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler, -1, HttpCompliance.RFC7230);
|
||||
HttpParser parser = new HttpParser(handler, -1, HttpCompliance.RFC7230_LEGACY);
|
||||
parseAll(parser, buffer);
|
||||
Assert.assertNull(_bad);
|
||||
Assert.assertEquals("GET", _methodOrVersion);
|
||||
Assert.assertThat(_complianceViolation, Matchers.containsString("case insensitive method gEt"));
|
||||
Assert.assertThat(_complianceViolation, contains(HttpComplianceSection.METHOD_CASE_SENSITIVE));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -702,7 +768,7 @@ public class HttpParserTest
|
|||
parseAll(parser, buffer);
|
||||
Assert.assertNull(_bad);
|
||||
Assert.assertEquals("gEt", _methodOrVersion);
|
||||
Assert.assertNull(_complianceViolation);
|
||||
Assert.assertThat(_complianceViolation,Matchers.empty());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -714,7 +780,7 @@ public class HttpParserTest
|
|||
"cOnNeCtIoN: ClOsE\r\n" +
|
||||
"\r\n");
|
||||
HttpParser.RequestHandler handler = new Handler();
|
||||
HttpParser parser = new HttpParser(handler, -1, HttpCompliance.RFC7230);
|
||||
HttpParser parser = new HttpParser(handler, -1, HttpCompliance.RFC7230_LEGACY);
|
||||
parseAll(parser, buffer);
|
||||
Assert.assertNull(_bad);
|
||||
Assert.assertEquals("GET", _methodOrVersion);
|
||||
|
@ -725,7 +791,7 @@ public class HttpParserTest
|
|||
Assert.assertEquals("Connection", _hdr[1]);
|
||||
Assert.assertEquals("close", _val[1]);
|
||||
Assert.assertEquals(1, _headers);
|
||||
Assert.assertNull(_complianceViolation);
|
||||
Assert.assertThat(_complianceViolation,Matchers.empty());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -748,7 +814,7 @@ public class HttpParserTest
|
|||
Assert.assertEquals("cOnNeCtIoN", _hdr[1]);
|
||||
Assert.assertEquals("ClOsE", _val[1]);
|
||||
Assert.assertEquals(1, _headers);
|
||||
Assert.assertThat(_complianceViolation, Matchers.containsString("case sensitive"));
|
||||
Assert.assertThat(_complianceViolation, contains(HttpComplianceSection.FIELD_NAME_CASE_INSENSITIVE,HttpComplianceSection.FIELD_NAME_CASE_INSENSITIVE,HttpComplianceSection.CASE_INSENSITIVE_FIELD_VALUE_CACHE));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -1366,10 +1432,10 @@ public class HttpParserTest
|
|||
|
||||
@Test
|
||||
public void testResponseReasonIso8859_1() throws Exception
|
||||
{
|
||||
{
|
||||
ByteBuffer buffer = BufferUtil.toBuffer(
|
||||
"HTTP/1.1 302 déplacé temporairement\r\n"
|
||||
+ "Content-Length: 0\r\n"
|
||||
+ "Content-Length: 0\r\n"
|
||||
+ "\r\n",StandardCharsets.ISO_8859_1);
|
||||
|
||||
HttpParser.ResponseHandler handler = new Handler();
|
||||
|
@ -2041,7 +2107,7 @@ public class HttpParserTest
|
|||
_headers = 0;
|
||||
_headerCompleted = false;
|
||||
_messageCompleted = false;
|
||||
_complianceViolation = null;
|
||||
_complianceViolation.clear();
|
||||
}
|
||||
|
||||
private String _host;
|
||||
|
@ -2059,8 +2125,8 @@ public class HttpParserTest
|
|||
private boolean _early;
|
||||
private boolean _headerCompleted;
|
||||
private boolean _messageCompleted;
|
||||
private String _complianceViolation;
|
||||
|
||||
private final List<HttpComplianceSection> _complianceViolation = new ArrayList<>();
|
||||
|
||||
private class Handler implements HttpParser.RequestHandler, HttpParser.ResponseHandler, HttpParser.ComplianceHandler
|
||||
{
|
||||
@Override
|
||||
|
@ -2147,8 +2213,9 @@ public class HttpParserTest
|
|||
_methodOrVersion = version.asString();
|
||||
_uriOrStatus = Integer.toString(status);
|
||||
_versionOrReason = reason;
|
||||
_hdr = new String[9];
|
||||
_val = new String[9];
|
||||
_headers = -1;
|
||||
_hdr = new String[10];
|
||||
_val = new String[10];
|
||||
_messageCompleted = false;
|
||||
_headerCompleted = false;
|
||||
return false;
|
||||
|
@ -2167,9 +2234,9 @@ public class HttpParserTest
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onComplianceViolation(HttpCompliance compliance, HttpCompliance required, String reason)
|
||||
public void onComplianceViolation(HttpCompliance compliance, HttpComplianceSection violation, String reason)
|
||||
{
|
||||
_complianceViolation=reason;
|
||||
_complianceViolation.add(violation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,11 +90,9 @@ public class HttpChannelOverHTTP2 extends HttpChannel
|
|||
}
|
||||
|
||||
@Override
|
||||
public void send()
|
||||
public void send(HttpExchange exchange)
|
||||
{
|
||||
HttpExchange exchange = getHttpExchange();
|
||||
if (exchange != null)
|
||||
sender.send(exchange);
|
||||
sender.send(exchange);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -66,13 +66,13 @@ public class HttpConnectionOverHTTP2 extends HttpConnection implements Sweeper.S
|
|||
normalizeRequest(request);
|
||||
|
||||
// One connection maps to N channels, so one channel for each exchange.
|
||||
HttpChannelOverHTTP2 channel = provideHttpChannel();
|
||||
HttpChannelOverHTTP2 channel = acquireHttpChannel();
|
||||
activeChannels.add(channel);
|
||||
|
||||
return send(channel, exchange);
|
||||
}
|
||||
|
||||
protected HttpChannelOverHTTP2 provideHttpChannel()
|
||||
protected HttpChannelOverHTTP2 acquireHttpChannel()
|
||||
{
|
||||
HttpChannelOverHTTP2 channel = idleChannels.poll();
|
||||
if (channel == null)
|
||||
|
@ -92,10 +92,16 @@ public class HttpConnectionOverHTTP2 extends HttpConnection implements Sweeper.S
|
|||
{
|
||||
channel.setStream(null);
|
||||
// Recycle only non-failed channels.
|
||||
if (!channel.isFailed())
|
||||
if (channel.isFailed())
|
||||
channel.destroy();
|
||||
else
|
||||
idleChannels.offer(channel);
|
||||
getHttpDestination().release(this);
|
||||
}
|
||||
else
|
||||
{
|
||||
channel.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -122,6 +128,13 @@ public class HttpConnectionOverHTTP2 extends HttpConnection implements Sweeper.S
|
|||
abort(failure);
|
||||
|
||||
session.close(ErrorCode.NO_ERROR.code, failure.getMessage(), Callback.NOOP);
|
||||
|
||||
HttpChannel channel = idleChannels.poll();
|
||||
while (channel!=null)
|
||||
{
|
||||
channel.destroy();
|
||||
channel = idleChannels.poll();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,7 +153,12 @@ public class HttpConnectionOverHTTP2 extends HttpConnection implements Sweeper.S
|
|||
exchange.getRequest().abort(failure);
|
||||
}
|
||||
activeChannels.clear();
|
||||
idleChannels.clear();
|
||||
HttpChannel channel = idleChannels.poll();
|
||||
while (channel!=null)
|
||||
{
|
||||
channel.destroy();
|
||||
channel = idleChannels.poll();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -129,7 +129,7 @@ public class HttpReceiverOverHTTP2 extends HttpReceiver implements Stream.Listen
|
|||
Response.CompleteListener listener = pushListener.apply(request, pushRequest);
|
||||
if (listener != null)
|
||||
{
|
||||
HttpChannelOverHTTP2 pushChannel = getHttpChannel().getHttpConnection().provideHttpChannel();
|
||||
HttpChannelOverHTTP2 pushChannel = getHttpChannel().getHttpConnection().acquireHttpChannel();
|
||||
List<Response.ResponseListener> listeners = Collections.singletonList(listener);
|
||||
HttpExchange pushExchange = new HttpExchange(getHttpDestination(), pushRequest, listeners);
|
||||
pushChannel.associate(pushExchange);
|
||||
|
|
|
@ -33,6 +33,7 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
|
@ -388,7 +389,7 @@ public class HttpClientTransportOverHTTP2Test extends AbstractTest
|
|||
.send();
|
||||
Assert.fail();
|
||||
}
|
||||
catch (ExecutionException e)
|
||||
catch (TimeoutException e)
|
||||
{
|
||||
// Expected.
|
||||
}
|
||||
|
@ -424,7 +425,7 @@ public class HttpClientTransportOverHTTP2Test extends AbstractTest
|
|||
.send();
|
||||
Assert.fail();
|
||||
}
|
||||
catch (ExecutionException e)
|
||||
catch (TimeoutException e)
|
||||
{
|
||||
// Expected.
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.nio.channels.ByteChannel;
|
|||
import java.nio.channels.CancelledKeyException;
|
||||
import java.nio.channels.GatheringByteChannel;
|
||||
import java.nio.channels.SelectionKey;
|
||||
import java.nio.channels.Selector;
|
||||
|
||||
import org.eclipse.jetty.util.BufferUtil;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
|
@ -41,7 +42,6 @@ public abstract class ChannelEndPoint extends AbstractEndPoint implements Manage
|
|||
{
|
||||
private static final Logger LOG = Log.getLogger(ChannelEndPoint.class);
|
||||
|
||||
private final Locker _locker = new Locker();
|
||||
private final ByteChannel _channel;
|
||||
private final GatheringByteChannel _gather;
|
||||
protected final ManagedSelector _selector;
|
||||
|
@ -95,16 +95,10 @@ public abstract class ChannelEndPoint extends AbstractEndPoint implements Manage
|
|||
}
|
||||
}
|
||||
|
||||
private final Runnable _runUpdateKey = new RunnableTask("runUpdateKey")
|
||||
private final ManagedSelector.SelectorUpdate _updateKeyAction = new ManagedSelector.SelectorUpdate()
|
||||
{
|
||||
@Override
|
||||
public InvocationType getInvocationType()
|
||||
{
|
||||
return InvocationType.NON_BLOCKING;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
public void update(Selector selector)
|
||||
{
|
||||
updateKey();
|
||||
}
|
||||
|
@ -336,7 +330,7 @@ public abstract class ChannelEndPoint extends AbstractEndPoint implements Manage
|
|||
int readyOps = _key.readyOps();
|
||||
int oldInterestOps;
|
||||
int newInterestOps;
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
synchronized(this)
|
||||
{
|
||||
_updatePending = true;
|
||||
// Remove the readyOps, that here can only be OP_READ or OP_WRITE (or both).
|
||||
|
@ -376,7 +370,7 @@ public abstract class ChannelEndPoint extends AbstractEndPoint implements Manage
|
|||
{
|
||||
int oldInterestOps;
|
||||
int newInterestOps;
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
synchronized(this)
|
||||
{
|
||||
_updatePending = false;
|
||||
oldInterestOps = _currentInterestOps;
|
||||
|
@ -413,7 +407,7 @@ public abstract class ChannelEndPoint extends AbstractEndPoint implements Manage
|
|||
int oldInterestOps;
|
||||
int newInterestOps;
|
||||
boolean pending;
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
synchronized(this)
|
||||
{
|
||||
pending = _updatePending;
|
||||
oldInterestOps = _desiredInterestOps;
|
||||
|
@ -426,7 +420,7 @@ public abstract class ChannelEndPoint extends AbstractEndPoint implements Manage
|
|||
LOG.debug("changeInterests p={} {}->{} for {}", pending, oldInterestOps, newInterestOps, this);
|
||||
|
||||
if (!pending && _selector!=null)
|
||||
_selector.submit(_runUpdateKey);
|
||||
_selector.submit(_updateKeyAction);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,279 @@
|
|||
//
|
||||
// ========================================================================
|
||||
// Copyright (c) 1995-2018 Mort Bay Consulting Pty. Ltd.
|
||||
// ------------------------------------------------------------------------
|
||||
// All rights reserved. This program and the accompanying materials
|
||||
// are made available under the terms of the Eclipse Public License v1.0
|
||||
// and Apache License v2.0 which accompanies this distribution.
|
||||
//
|
||||
// The Eclipse Public License is available at
|
||||
// http://www.eclipse.org/legal/epl-v10.html
|
||||
//
|
||||
// The Apache License v2.0 is available at
|
||||
// http://www.opensource.org/licenses/apache2.0.php
|
||||
//
|
||||
// You may elect to redistribute this code under either of these licenses.
|
||||
// ========================================================================
|
||||
//
|
||||
|
||||
package org.eclipse.jetty.io;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.eclipse.jetty.util.component.Destroyable;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
import org.eclipse.jetty.util.thread.Scheduler;
|
||||
|
||||
import static java.lang.Long.MAX_VALUE;
|
||||
|
||||
/**
|
||||
* <p>An abstract implementation of a timeout.</p>
|
||||
* <p>Subclasses should implement {@link #onTimeoutExpired()}.</p>
|
||||
* <p>This implementation is optimised assuming that the timeout
|
||||
* will mostly be cancelled and then reused with a similar value.</p>
|
||||
*/
|
||||
public abstract class CyclicTimeout implements Destroyable
|
||||
{
|
||||
private static final Logger LOG = Log.getLogger(CyclicTimeout.class);
|
||||
private static final Timeout NOT_SET = new Timeout(MAX_VALUE, null);
|
||||
private static final Scheduler.Task DESTROYED = () -> false;
|
||||
|
||||
/* The underlying scheduler to use */
|
||||
private final Scheduler _scheduler;
|
||||
/* Reference to the current Timeout and chain of Wakeup */
|
||||
private final AtomicReference<Timeout> _timeout = new AtomicReference<>(NOT_SET);
|
||||
|
||||
/**
|
||||
* @param scheduler A scheduler used to schedule wakeups
|
||||
*/
|
||||
public CyclicTimeout(Scheduler scheduler)
|
||||
{
|
||||
_scheduler = scheduler;
|
||||
}
|
||||
|
||||
public Scheduler getScheduler()
|
||||
{
|
||||
return _scheduler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedules a timeout, even if already set, cancelled or expired.
|
||||
*
|
||||
* @param delay The period of time before the timeout expires.
|
||||
* @param units The unit of time of the period.
|
||||
* @return true if the timer was already set.
|
||||
*/
|
||||
public boolean schedule(long delay, TimeUnit units)
|
||||
{
|
||||
long now = System.nanoTime();
|
||||
long new_timeout_at = now + units.toNanos(delay);
|
||||
|
||||
boolean result;
|
||||
Wakeup new_wakeup;
|
||||
while (true)
|
||||
{
|
||||
Timeout timeout = _timeout.get();
|
||||
|
||||
new_wakeup = null;
|
||||
result = timeout._at != MAX_VALUE;
|
||||
|
||||
// Is the current wakeup good to use? ie before our timeout time?
|
||||
Wakeup wakeup = timeout._wakeup;
|
||||
if (wakeup == null || wakeup._at > new_timeout_at)
|
||||
// No, we need an earlier wakeup.
|
||||
wakeup = new_wakeup = new Wakeup(new_timeout_at, wakeup);
|
||||
|
||||
if (_timeout.compareAndSet(timeout, new Timeout(new_timeout_at, wakeup)))
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Installed timeout in {} ms, waking up in {} ms",
|
||||
units.toMillis(delay),
|
||||
TimeUnit.NANOSECONDS.toMillis(wakeup._at - now));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If we created a new wakeup, we need to actually schedule it.
|
||||
// Any wakeup that is created and discarded by the failed CAS will not be
|
||||
// in the wakeup chain, will not have a scheduler task set and will be GC'd.
|
||||
if (new_wakeup != null)
|
||||
new_wakeup.schedule(now);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Cancels this CyclicTimeout so that it won't expire.</p>
|
||||
* <p>After being cancelled, this CyclicTimeout can be scheduled again.</p>
|
||||
*
|
||||
* @return true if this CyclicTimeout was scheduled to expire
|
||||
* @see #destroy()
|
||||
*/
|
||||
public boolean cancel()
|
||||
{
|
||||
boolean result;
|
||||
Timeout timeout;
|
||||
Timeout new_timeout;
|
||||
while (true)
|
||||
{
|
||||
timeout = _timeout.get();
|
||||
result = timeout._at != MAX_VALUE;
|
||||
Wakeup wakeup = timeout._wakeup;
|
||||
new_timeout = wakeup == null ? NOT_SET : new Timeout(MAX_VALUE, wakeup);
|
||||
if (_timeout.compareAndSet(timeout, new_timeout))
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Invoked when the timeout expires.</p>
|
||||
*/
|
||||
public abstract void onTimeoutExpired();
|
||||
|
||||
/**
|
||||
* <p>Destroys this CyclicTimeout.</p>
|
||||
* <p>After being destroyed, this CyclicTimeout is not used anymore.</p>
|
||||
*/
|
||||
public void destroy()
|
||||
{
|
||||
Timeout timeout = _timeout.getAndSet(NOT_SET);
|
||||
Wakeup wakeup = timeout == null ? null : timeout._wakeup;
|
||||
while (wakeup != null)
|
||||
{
|
||||
wakeup.destroy();
|
||||
wakeup = wakeup._next;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A timeout time with a link to a Wakeup chain.
|
||||
*/
|
||||
private static class Timeout
|
||||
{
|
||||
private final long _at;
|
||||
private final Wakeup _wakeup;
|
||||
|
||||
private Timeout(long timeoutAt, Wakeup wakeup)
|
||||
{
|
||||
_at = timeoutAt;
|
||||
_wakeup = wakeup;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return String.format("%s@%x:%d,%s", getClass().getSimpleName(), hashCode(), _at, _wakeup);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A Wakeup chain of real scheduler tasks.
|
||||
*/
|
||||
private class Wakeup implements Runnable
|
||||
{
|
||||
private final AtomicReference<Scheduler.Task> _task = new AtomicReference<>();
|
||||
private final long _at;
|
||||
private final Wakeup _next;
|
||||
|
||||
private Wakeup(long wakeupAt, Wakeup next)
|
||||
{
|
||||
_at = wakeupAt;
|
||||
_next = next;
|
||||
}
|
||||
|
||||
private void schedule(long now)
|
||||
{
|
||||
_task.compareAndSet(null, _scheduler.schedule(this, _at - now, TimeUnit.NANOSECONDS));
|
||||
}
|
||||
|
||||
private void destroy()
|
||||
{
|
||||
Scheduler.Task task = _task.getAndSet(DESTROYED);
|
||||
if (task != null)
|
||||
task.cancel();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
long now;
|
||||
Wakeup new_wakeup;
|
||||
boolean has_expired;
|
||||
|
||||
while (true)
|
||||
{
|
||||
Timeout timeout = _timeout.get();
|
||||
|
||||
// We must look for ourselves in the current wakeup list.
|
||||
// If we find ourselves, then we act and we use our tail for any new
|
||||
// wakeup list, effectively removing any wakeup before us in the list (and making them no-ops).
|
||||
// If we don't find ourselves, then a wakeup that should have expired after us has already run
|
||||
// and removed us from the list, so we become a noop.
|
||||
|
||||
Wakeup wakeup = timeout._wakeup;
|
||||
while (wakeup != null)
|
||||
{
|
||||
if (wakeup == this)
|
||||
break;
|
||||
// Not us, so look at next wakeup in the list.
|
||||
wakeup = wakeup._next;
|
||||
}
|
||||
if (wakeup == null)
|
||||
// Not found, we become a noop.
|
||||
return;
|
||||
|
||||
now = System.nanoTime();
|
||||
new_wakeup = null;
|
||||
has_expired = false;
|
||||
Timeout new_timeout;
|
||||
|
||||
// We are in the wakeup list! So we have to act and we know our
|
||||
// tail has not expired (else it would have removed us from the list).
|
||||
// Remove ourselves (and any prior Wakeup) from the wakeup list.
|
||||
wakeup = wakeup._next;
|
||||
|
||||
if (timeout._at <= now)
|
||||
{
|
||||
// We have timed out!
|
||||
has_expired = true;
|
||||
new_timeout = wakeup == null ? NOT_SET : new Timeout(MAX_VALUE, wakeup);
|
||||
}
|
||||
else if (timeout._at != MAX_VALUE)
|
||||
{
|
||||
// We have not timed out, but we are set to!
|
||||
// Is the current wakeup good to use? ie before our timeout time?
|
||||
if (wakeup == null || wakeup._at >= timeout._at)
|
||||
// No, we need an earlier wakeup.
|
||||
wakeup = new_wakeup = new Wakeup(timeout._at, wakeup);
|
||||
new_timeout = new Timeout(timeout._at, wakeup);
|
||||
}
|
||||
else
|
||||
{
|
||||
// We don't timeout, preserve scheduled chain.
|
||||
new_timeout = wakeup == null ? NOT_SET : new Timeout(MAX_VALUE, wakeup);
|
||||
}
|
||||
|
||||
// Loop until we succeed in changing state or we are a noop!
|
||||
if (_timeout.compareAndSet(timeout, new_timeout))
|
||||
break;
|
||||
}
|
||||
|
||||
// If we created a new wakeup, we need to actually schedule it.
|
||||
if (new_wakeup != null)
|
||||
new_wakeup.schedule(now);
|
||||
|
||||
// If we expired, then do the callback.
|
||||
if (has_expired)
|
||||
onTimeoutExpired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return String.format("%s@%x:%d->%s", getClass().getSimpleName(), hashCode(), _at, _next);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -33,6 +33,7 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Deque;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
@ -40,6 +41,7 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.eclipse.jetty.util.component.ContainerLifeCycle;
|
||||
|
@ -48,8 +50,6 @@ import org.eclipse.jetty.util.component.DumpableCollection;
|
|||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
import org.eclipse.jetty.util.thread.ExecutionStrategy;
|
||||
import org.eclipse.jetty.util.thread.Invocable;
|
||||
import org.eclipse.jetty.util.thread.Locker;
|
||||
import org.eclipse.jetty.util.thread.ReservedThreadExecutor;
|
||||
import org.eclipse.jetty.util.thread.Scheduler;
|
||||
import org.eclipse.jetty.util.thread.strategy.EatWhatYouKill;
|
||||
|
@ -64,14 +64,14 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
{
|
||||
private static final Logger LOG = Log.getLogger(ManagedSelector.class);
|
||||
|
||||
private final Locker _locker = new Locker();
|
||||
private final AtomicBoolean _started = new AtomicBoolean(false);
|
||||
private boolean _selecting = false;
|
||||
private final Deque<Runnable> _actions = new ArrayDeque<>();
|
||||
private final SelectorManager _selectorManager;
|
||||
private final int _id;
|
||||
private final ExecutionStrategy _strategy;
|
||||
private Selector _selector;
|
||||
private int _actionCount;
|
||||
private Deque<SelectorUpdate> _updates = new ArrayDeque<>();
|
||||
private Deque<SelectorUpdate> _updateable = new ArrayDeque<>();
|
||||
|
||||
public ManagedSelector(SelectorManager selectorManager, int id)
|
||||
{
|
||||
|
@ -102,6 +102,9 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
// The normal strategy obtains the produced task, schedules
|
||||
// a new thread to produce more, runs the task and then exits.
|
||||
_selectorManager.execute(_strategy::produce);
|
||||
|
||||
// Set started only if we really are started
|
||||
submit(s->_started.set(true));
|
||||
}
|
||||
|
||||
public int size()
|
||||
|
@ -114,31 +117,38 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
|
||||
@Override
|
||||
protected void doStop() throws Exception
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Stopping {}", this);
|
||||
CloseEndPoints close_endps = new CloseEndPoints();
|
||||
submit(close_endps);
|
||||
close_endps.await(getStopTimeout());
|
||||
CloseSelector close_selector = new CloseSelector();
|
||||
submit(close_selector);
|
||||
close_selector.await(getStopTimeout());
|
||||
{
|
||||
// doStop might be called for a failed managedSelector,
|
||||
// We do not want to wait twice, so we only stop once for each start
|
||||
if (_started.compareAndSet(true,false))
|
||||
{
|
||||
// Close connections, but only wait a single selector cycle for it to take effect
|
||||
CloseConnections close_connections = new CloseConnections();
|
||||
submit(close_connections);
|
||||
close_connections._complete.await();
|
||||
|
||||
super.doStop();
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Stopped {}", this);
|
||||
// Wait for any remaining endpoints to be closed and the selector to be stopped
|
||||
StopSelector stop_selector = new StopSelector();
|
||||
submit(stop_selector);
|
||||
stop_selector._stopped.await();
|
||||
}
|
||||
|
||||
super.doStop();
|
||||
}
|
||||
|
||||
public void submit(Runnable change)
|
||||
/**
|
||||
* Submit an {@link SelectorUpdate} to be acted on between calls to {@link Selector#select()}
|
||||
* @param action
|
||||
*/
|
||||
public void submit(SelectorUpdate action)
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Queued change {} on {}", change, this);
|
||||
LOG.debug("Queued change {} on {}", action, this);
|
||||
|
||||
Selector selector = null;
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
synchronized(ManagedSelector.this)
|
||||
{
|
||||
_actions.offer(change);
|
||||
_updates.offer(action);
|
||||
|
||||
if (_selecting)
|
||||
{
|
||||
|
@ -178,15 +188,24 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
if (connect.timeout.cancel())
|
||||
{
|
||||
key.interestOps(0);
|
||||
execute(new CreateEndPoint(channel, key)
|
||||
execute(new Runnable()
|
||||
{
|
||||
@Override
|
||||
protected void failed(Throwable failure)
|
||||
public void run()
|
||||
{
|
||||
super.failed(failure);
|
||||
connect.failed(failure);
|
||||
try
|
||||
{
|
||||
createEndPoint(channel,key);
|
||||
}
|
||||
catch(Throwable failure)
|
||||
{
|
||||
closeNoExceptions(channel);
|
||||
LOG.warn(String.valueOf(failure));
|
||||
LOG.debug(failure);
|
||||
connect.failed(failure);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -204,7 +223,7 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
}
|
||||
}
|
||||
|
||||
private void closeNoExceptions(Closeable closeable)
|
||||
private static void closeNoExceptions(Closeable closeable)
|
||||
{
|
||||
try
|
||||
{
|
||||
|
@ -237,9 +256,9 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
|
||||
private int getActionSize()
|
||||
{
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
synchronized(ManagedSelector.this)
|
||||
{
|
||||
return _actions.size();
|
||||
return _updates.size();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -248,16 +267,16 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
{
|
||||
Selector selector = _selector;
|
||||
List<String> keys = null;
|
||||
List<Runnable> actions = null;
|
||||
List<SelectorUpdate> actions = null;
|
||||
if (selector != null && selector.isOpen())
|
||||
{
|
||||
DumpKeys dump = new DumpKeys();
|
||||
String actionsAt;
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
synchronized(ManagedSelector.this)
|
||||
{
|
||||
actionsAt = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now());
|
||||
actions = new ArrayList<>(_actions);
|
||||
_actions.addFirst(dump);
|
||||
actions = new ArrayList<>(_updates);
|
||||
_updates.addFirst(dump);
|
||||
_selecting = false;
|
||||
}
|
||||
selector.wakeup();
|
||||
|
@ -321,10 +340,8 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
if (task != null)
|
||||
return task;
|
||||
|
||||
Runnable action = nextAction();
|
||||
if (action != null)
|
||||
return action;
|
||||
|
||||
processUpdates();
|
||||
|
||||
updateKeys();
|
||||
|
||||
if (!select())
|
||||
|
@ -332,61 +349,49 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
}
|
||||
}
|
||||
|
||||
private Runnable nextAction()
|
||||
private void processUpdates()
|
||||
{
|
||||
Selector selector = null;
|
||||
Runnable action = null;
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
synchronized(ManagedSelector.this)
|
||||
{
|
||||
// It is important to avoid live-lock (busy blocking) here. If too many actions
|
||||
// are submitted, this can indefinitely defer selection happening. Similarly if
|
||||
// we give too much priority to selection, it may prevent actions from being run.
|
||||
// The solution implemented here is to only process the number of actions that were
|
||||
// originally in the action queue before attempting a select
|
||||
|
||||
if (_actionCount==0)
|
||||
{
|
||||
// Calculate how many actions we are prepared to handle before selection
|
||||
_actionCount = _actions.size();
|
||||
if (_actionCount>0)
|
||||
action = _actions.poll();
|
||||
else
|
||||
_selecting = true;
|
||||
}
|
||||
else if (_actionCount==1)
|
||||
Deque<SelectorUpdate> updates = _updates;
|
||||
_updates = _updateable;
|
||||
_updateable = updates;
|
||||
}
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("updateable {}",_updateable.size());
|
||||
|
||||
for (SelectorUpdate update : _updateable)
|
||||
{
|
||||
if (_selector==null)
|
||||
break;
|
||||
try
|
||||
{
|
||||
_actionCount = 0;
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Forcing selection, actions={}",_actions.size());
|
||||
|
||||
if (_actions.size()==0)
|
||||
{
|
||||
// This was the last action, so select normally
|
||||
_selecting = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// there are still more actions to handle, so
|
||||
// immediately wake up (as if remaining action were just added).
|
||||
selector = _selector;
|
||||
_selecting = false;
|
||||
}
|
||||
LOG.debug("update {}",update);
|
||||
update.update(_selector);
|
||||
}
|
||||
else
|
||||
catch(Throwable th)
|
||||
{
|
||||
_actionCount--;
|
||||
action = _actions.poll();
|
||||
LOG.warn(th);
|
||||
}
|
||||
}
|
||||
_updateable.clear();
|
||||
|
||||
Selector selector;
|
||||
int actions;
|
||||
synchronized(ManagedSelector.this)
|
||||
{
|
||||
actions = _updates.size();
|
||||
_selecting = actions==0;
|
||||
selector = _selecting?null:_selector;
|
||||
}
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("action={} wakeup={}",action,selector!=null);
|
||||
LOG.debug("actions {}",actions);
|
||||
|
||||
if (selector != null)
|
||||
selector.wakeup();
|
||||
|
||||
return action;
|
||||
selector.wakeup();
|
||||
}
|
||||
|
||||
private boolean select()
|
||||
|
@ -403,11 +408,11 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
LOG.debug("Selector {} woken up from select, {}/{} selected", selector, selected, selector.keys().size());
|
||||
|
||||
int actions;
|
||||
try (Locker.Lock lock = _locker.lock())
|
||||
synchronized(ManagedSelector.this)
|
||||
{
|
||||
// finished selecting
|
||||
_selecting = false;
|
||||
actions = _actions.size();
|
||||
actions = _updates.size();
|
||||
}
|
||||
|
||||
_keys = selector.selectedKeys();
|
||||
|
@ -505,34 +510,38 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
return String.format("%s@%x", getClass().getSimpleName(), hashCode());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A selector update to be done when the selector has been woken.
|
||||
*/
|
||||
public interface SelectorUpdate
|
||||
{
|
||||
public void update(Selector selector);
|
||||
}
|
||||
|
||||
private class DumpKeys extends Invocable.NonBlocking
|
||||
private static class DumpKeys implements SelectorUpdate
|
||||
{
|
||||
private CountDownLatch latch = new CountDownLatch(1);
|
||||
private List<String> keys;
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
public void update(Selector selector)
|
||||
{
|
||||
Selector selector = _selector;
|
||||
if (selector != null && selector.isOpen())
|
||||
{
|
||||
Set<SelectionKey> selector_keys = selector.keys();
|
||||
List<String> list = new ArrayList<>(selector_keys.size()+1);
|
||||
list.add(selector + " keys=" + selector_keys.size());
|
||||
for (SelectionKey key : selector_keys)
|
||||
Set<SelectionKey> selector_keys = selector.keys();
|
||||
List<String> list = new ArrayList<>(selector_keys.size()+1);
|
||||
list.add(selector + " keys=" + selector_keys.size());
|
||||
for (SelectionKey key : selector_keys)
|
||||
{
|
||||
try
|
||||
{
|
||||
try
|
||||
{
|
||||
list.add(String.format("SelectionKey@%x{i=%d}->%s", key.hashCode(), key.interestOps(), key.attachment()));
|
||||
}
|
||||
catch (Throwable x)
|
||||
{
|
||||
list.add(String.format("SelectionKey@%x[%s]->%s", key.hashCode(), x, key.attachment()));
|
||||
}
|
||||
list.add(String.format("SelectionKey@%x{i=%d}->%s", key.hashCode(), key.interestOps(), key.attachment()));
|
||||
}
|
||||
catch (Throwable x)
|
||||
{
|
||||
list.add(String.format("SelectionKey@%x[%s]->%s", key.hashCode(), x, key.attachment()));
|
||||
}
|
||||
keys = list;
|
||||
}
|
||||
keys = list;
|
||||
|
||||
latch.countDown();
|
||||
}
|
||||
|
@ -551,7 +560,7 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
}
|
||||
}
|
||||
|
||||
class Acceptor extends Invocable.NonBlocking implements Selectable, Closeable
|
||||
class Acceptor implements SelectorUpdate, Selectable, Closeable
|
||||
{
|
||||
private final SelectableChannel _channel;
|
||||
private SelectionKey _key;
|
||||
|
@ -562,13 +571,13 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
public void update(Selector selector)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (_key==null)
|
||||
{
|
||||
_key = _channel.register(_selector, SelectionKey.OP_ACCEPT, this);
|
||||
_key = _channel.register(selector, SelectionKey.OP_ACCEPT, this);
|
||||
}
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
|
@ -620,10 +629,11 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
}
|
||||
}
|
||||
|
||||
class Accept extends Invocable.NonBlocking implements Closeable
|
||||
class Accept implements SelectorUpdate, Runnable, Closeable
|
||||
{
|
||||
private final SelectableChannel channel;
|
||||
private final Object attachment;
|
||||
private SelectionKey key;
|
||||
|
||||
Accept(SelectableChannel channel, Object attachment)
|
||||
{
|
||||
|
@ -639,12 +649,12 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
public void update(Selector selector)
|
||||
{
|
||||
try
|
||||
{
|
||||
final SelectionKey key = channel.register(_selector, 0, attachment);
|
||||
execute(new CreateEndPoint(channel, key));
|
||||
key = channel.register(selector, 0, attachment);
|
||||
execute(this);
|
||||
}
|
||||
catch (Throwable x)
|
||||
{
|
||||
|
@ -652,18 +662,6 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
LOG.debug(x);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class CreateEndPoint implements Runnable, Closeable
|
||||
{
|
||||
private final SelectableChannel channel;
|
||||
private final SelectionKey key;
|
||||
|
||||
public CreateEndPoint(SelectableChannel channel, SelectionKey key)
|
||||
{
|
||||
this.channel = channel;
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
|
@ -679,13 +677,6 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close()
|
||||
{
|
||||
LOG.debug("closed creation of {}", channel);
|
||||
closeNoExceptions(channel);
|
||||
}
|
||||
|
||||
protected void failed(Throwable failure)
|
||||
{
|
||||
closeNoExceptions(channel);
|
||||
|
@ -694,7 +685,8 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
}
|
||||
}
|
||||
|
||||
class Connect extends Invocable.NonBlocking
|
||||
|
||||
class Connect implements SelectorUpdate, Runnable
|
||||
{
|
||||
private final AtomicBoolean failed = new AtomicBoolean();
|
||||
private final SelectableChannel channel;
|
||||
|
@ -705,23 +697,34 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
{
|
||||
this.channel = channel;
|
||||
this.attachment = attachment;
|
||||
this.timeout = ManagedSelector.this._selectorManager.getScheduler().schedule(new ConnectTimeout(this), ManagedSelector.this._selectorManager.getConnectTimeout(), TimeUnit.MILLISECONDS);
|
||||
this.timeout = ManagedSelector.this._selectorManager.getScheduler().schedule(this, ManagedSelector.this._selectorManager.getConnectTimeout(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
public void update(Selector selector)
|
||||
{
|
||||
try
|
||||
{
|
||||
channel.register(_selector, SelectionKey.OP_CONNECT, this);
|
||||
channel.register(selector, SelectionKey.OP_CONNECT, this);
|
||||
}
|
||||
catch (Throwable x)
|
||||
{
|
||||
failed(x);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
if (_selectorManager.isConnectionPending(channel))
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Channel {} timed out while connecting, closing it", channel);
|
||||
failed(new SocketTimeoutException("Connect Timeout"));
|
||||
}
|
||||
}
|
||||
|
||||
private void failed(Throwable failure)
|
||||
public void failed(Throwable failure)
|
||||
{
|
||||
if (failed.compareAndSet(false, true))
|
||||
{
|
||||
|
@ -732,120 +735,97 @@ public class ManagedSelector extends ContainerLifeCycle implements Dumpable
|
|||
}
|
||||
}
|
||||
|
||||
private class ConnectTimeout extends Invocable.NonBlocking
|
||||
private class CloseConnections implements SelectorUpdate
|
||||
{
|
||||
private final Connect connect;
|
||||
final Set<Closeable> _closed;
|
||||
final CountDownLatch _noEndPoints = new CountDownLatch(1);
|
||||
final CountDownLatch _complete = new CountDownLatch(1);
|
||||
|
||||
private ConnectTimeout(Connect connect)
|
||||
public CloseConnections()
|
||||
{
|
||||
this.connect = connect;
|
||||
this(null);
|
||||
}
|
||||
|
||||
public CloseConnections(Set<Closeable> closed)
|
||||
{
|
||||
_closed = closed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
SelectableChannel channel = connect.channel;
|
||||
if (_selectorManager.isConnectionPending(channel))
|
||||
public void update(Selector selector)
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Closing {} connections on {}", selector.keys().size(), ManagedSelector.this);
|
||||
boolean zero = true;
|
||||
for (SelectionKey key : selector.keys())
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Channel {} timed out while connecting, closing it", channel);
|
||||
connect.failed(new SocketTimeoutException("Connect Timeout"));
|
||||
if (key.isValid())
|
||||
{
|
||||
Closeable closeable = null;
|
||||
Object attachment = key.attachment();
|
||||
if (attachment instanceof EndPoint)
|
||||
{
|
||||
EndPoint endp = (EndPoint)attachment;
|
||||
if (!endp.isOutputShutdown())
|
||||
zero = false;
|
||||
Connection connection = endp.getConnection();
|
||||
if (connection != null)
|
||||
closeable = connection;
|
||||
else
|
||||
closeable = endp;
|
||||
}
|
||||
|
||||
if (closeable!=null)
|
||||
{
|
||||
if (_closed==null)
|
||||
{
|
||||
closeNoExceptions(closeable);
|
||||
}
|
||||
else if (!_closed.contains(closeable))
|
||||
{
|
||||
_closed.add(closeable);
|
||||
closeNoExceptions(closeable);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (zero)
|
||||
_noEndPoints.countDown();
|
||||
_complete.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
private class CloseEndPoints extends Invocable.NonBlocking
|
||||
|
||||
private class StopSelector implements SelectorUpdate
|
||||
{
|
||||
private final CountDownLatch _latch = new CountDownLatch(1);
|
||||
private CountDownLatch _allClosed;
|
||||
|
||||
CountDownLatch _stopped = new CountDownLatch(1);
|
||||
boolean _forcedEndPointClose = false;
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
public void update(Selector selector)
|
||||
{
|
||||
List<EndPoint> end_points = new ArrayList<>();
|
||||
for (SelectionKey key : _selector.keys())
|
||||
for (SelectionKey key : selector.keys())
|
||||
{
|
||||
if (key.isValid())
|
||||
{
|
||||
Object attachment = key.attachment();
|
||||
if (attachment instanceof EndPoint)
|
||||
end_points.add((EndPoint)attachment);
|
||||
{
|
||||
EndPoint endp = (EndPoint)attachment;
|
||||
if (!endp.isOutputShutdown())
|
||||
_forcedEndPointClose = true;
|
||||
closeNoExceptions((EndPoint)attachment);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int size = end_points.size();
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Closing {} endPoints on {}", size, ManagedSelector.this);
|
||||
|
||||
_allClosed = new CountDownLatch(size);
|
||||
_latch.countDown();
|
||||
|
||||
for (EndPoint endp : end_points)
|
||||
submit(new EndPointCloser(endp, _allClosed));
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Closed {} endPoints on {}", size, ManagedSelector.this);
|
||||
}
|
||||
|
||||
public boolean await(long timeout)
|
||||
{
|
||||
try
|
||||
{
|
||||
return _latch.await(timeout, TimeUnit.MILLISECONDS) &&
|
||||
_allClosed.await(timeout, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
catch (InterruptedException x)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class EndPointCloser implements Runnable
|
||||
{
|
||||
private final EndPoint _endPoint;
|
||||
private final CountDownLatch _latch;
|
||||
|
||||
private EndPointCloser(EndPoint endPoint, CountDownLatch latch)
|
||||
{
|
||||
_endPoint = endPoint;
|
||||
_latch = latch;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
closeNoExceptions(_endPoint.getConnection());
|
||||
_latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
private class CloseSelector extends Invocable.NonBlocking
|
||||
{
|
||||
private CountDownLatch _latch = new CountDownLatch(1);
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
Selector selector = _selector;
|
||||
|
||||
_selector = null;
|
||||
closeNoExceptions(selector);
|
||||
_latch.countDown();
|
||||
}
|
||||
|
||||
public boolean await(long timeout)
|
||||
{
|
||||
try
|
||||
{
|
||||
return _latch.await(timeout, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
catch (InterruptedException x)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
_stopped.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private class DestroyEndPoint implements Runnable, Closeable
|
||||
{
|
||||
private final EndPoint endPoint;
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.nio.channels.SelectionKey;
|
|||
import java.nio.channels.Selector;
|
||||
import java.nio.channels.ServerSocketChannel;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.util.Arrays;
|
||||
import java.util.concurrent.Executor;
|
||||
|
||||
import org.eclipse.jetty.util.annotation.ManagedAttribute;
|
||||
|
@ -62,6 +63,7 @@ public abstract class SelectorManager extends ContainerLifeCycle implements Dump
|
|||
private long _selectorIndex;
|
||||
private int _reservedThreads = -1;
|
||||
private ThreadPoolBudget.Lease _lease;
|
||||
private ReservedThreadExecutor _reservedThreadExecutor;
|
||||
|
||||
private static int defaultSelectors(Executor executor)
|
||||
{
|
||||
|
@ -296,7 +298,8 @@ public abstract class SelectorManager extends ContainerLifeCycle implements Dump
|
|||
@Override
|
||||
protected void doStart() throws Exception
|
||||
{
|
||||
addBean(new ReservedThreadExecutor(getExecutor(),_reservedThreads,this),true);
|
||||
_reservedThreadExecutor = new ReservedThreadExecutor(getExecutor(),_reservedThreads,this);
|
||||
addBean(_reservedThreadExecutor,true);
|
||||
_lease = ThreadPoolBudget.leaseFrom(getExecutor(), this, _selectors.length);
|
||||
for (int i = 0; i < _selectors.length; i++)
|
||||
{
|
||||
|
@ -321,11 +324,25 @@ public abstract class SelectorManager extends ContainerLifeCycle implements Dump
|
|||
@Override
|
||||
protected void doStop() throws Exception
|
||||
{
|
||||
super.doStop();
|
||||
for (ManagedSelector selector : _selectors)
|
||||
removeBean(selector);
|
||||
if (_lease != null)
|
||||
_lease.close();
|
||||
try
|
||||
{
|
||||
super.doStop();
|
||||
}
|
||||
finally
|
||||
{
|
||||
// Cleanup
|
||||
for (ManagedSelector selector : _selectors)
|
||||
{
|
||||
if (selector!=null)
|
||||
removeBean(selector);
|
||||
}
|
||||
Arrays.fill(_selectors,null);
|
||||
if (_reservedThreadExecutor!=null)
|
||||
removeBean(_reservedThreadExecutor);
|
||||
_reservedThreadExecutor = null;
|
||||
if (_lease != null)
|
||||
_lease.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,169 @@
|
|||
//
|
||||
// ========================================================================
|
||||
// Copyright (c) 1995-2018 Mort Bay Consulting Pty. Ltd.
|
||||
// ------------------------------------------------------------------------
|
||||
// All rights reserved. This program and the accompanying materials
|
||||
// are made available under the terms of the Eclipse Public License v1.0
|
||||
// and Apache License v2.0 which accompanies this distribution.
|
||||
//
|
||||
// The Eclipse Public License is available at
|
||||
// http://www.eclipse.org/legal/epl-v10.html
|
||||
//
|
||||
// The Apache License v2.0 is available at
|
||||
// http://www.opensource.org/licenses/apache2.0.php
|
||||
//
|
||||
// You may elect to redistribute this code under either of these licenses.
|
||||
// ========================================================================
|
||||
//
|
||||
|
||||
package org.eclipse.jetty.io;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.eclipse.jetty.toolchain.test.TestTracker;
|
||||
import org.eclipse.jetty.util.thread.QueuedThreadPool;
|
||||
import org.eclipse.jetty.util.thread.ScheduledExecutorScheduler;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
||||
public class CyclicTimeoutTest
|
||||
{
|
||||
@Rule
|
||||
public TestTracker tracker = new TestTracker();
|
||||
|
||||
private volatile boolean _expired;
|
||||
private ScheduledExecutorScheduler _timer = new ScheduledExecutorScheduler();
|
||||
private CyclicTimeout _timeout;
|
||||
|
||||
@Before
|
||||
public void before() throws Exception
|
||||
{
|
||||
_expired=false;
|
||||
_timer.start();
|
||||
|
||||
_timeout=new CyclicTimeout(_timer)
|
||||
{
|
||||
@Override
|
||||
public void onTimeoutExpired()
|
||||
{
|
||||
_expired = true;
|
||||
}
|
||||
};
|
||||
|
||||
_timeout.schedule(1000,TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() throws Exception
|
||||
{
|
||||
_timeout.destroy();
|
||||
_timer.stop();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReschedule() throws Exception
|
||||
{
|
||||
for (int i=0;i<20;i++)
|
||||
{
|
||||
Thread.sleep(100);
|
||||
Assert.assertTrue(_timeout.schedule(1000,TimeUnit.MILLISECONDS));
|
||||
}
|
||||
Assert.assertFalse(_expired);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExpire() throws Exception
|
||||
{
|
||||
for (int i=0;i<5;i++)
|
||||
{
|
||||
Thread.sleep(100);
|
||||
Assert.assertTrue(_timeout.schedule(1000,TimeUnit.MILLISECONDS));
|
||||
}
|
||||
Thread.sleep(1500);
|
||||
Assert.assertTrue(_expired);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCancel() throws Exception
|
||||
{
|
||||
for (int i=0;i<5;i++)
|
||||
{
|
||||
Thread.sleep(100);
|
||||
Assert.assertTrue(_timeout.schedule(1000,TimeUnit.MILLISECONDS));
|
||||
}
|
||||
_timeout.cancel();
|
||||
Thread.sleep(1500);
|
||||
Assert.assertFalse(_expired);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testShorten() throws Exception
|
||||
{
|
||||
for (int i=0;i<5;i++)
|
||||
{
|
||||
Thread.sleep(100);
|
||||
Assert.assertTrue(_timeout.schedule(1000,TimeUnit.MILLISECONDS));
|
||||
}
|
||||
Assert.assertTrue(_timeout.schedule(100,TimeUnit.MILLISECONDS));
|
||||
Thread.sleep(400);
|
||||
Assert.assertTrue(_expired);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLengthen() throws Exception
|
||||
{
|
||||
for (int i=0;i<5;i++)
|
||||
{
|
||||
Thread.sleep(100);
|
||||
Assert.assertTrue(_timeout.schedule(1000,TimeUnit.MILLISECONDS));
|
||||
}
|
||||
Assert.assertTrue(_timeout.schedule(10000,TimeUnit.MILLISECONDS));
|
||||
Thread.sleep(1500);
|
||||
Assert.assertFalse(_expired);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultiple() throws Exception
|
||||
{
|
||||
Thread.sleep(1500);
|
||||
Assert.assertTrue(_expired);
|
||||
_expired=false;
|
||||
Assert.assertFalse(_timeout.schedule(500,TimeUnit.MILLISECONDS));
|
||||
Thread.sleep(1000);
|
||||
Assert.assertTrue(_expired);
|
||||
_expired=false;
|
||||
_timeout.schedule(500,TimeUnit.MILLISECONDS);
|
||||
Thread.sleep(1000);
|
||||
Assert.assertTrue(_expired);
|
||||
}
|
||||
|
||||
@Test
|
||||
@Ignore
|
||||
public void testBusy() throws Exception
|
||||
{
|
||||
QueuedThreadPool pool = new QueuedThreadPool(200);
|
||||
pool.start();
|
||||
|
||||
long test_until = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(1500);
|
||||
|
||||
Assert.assertTrue(_timeout.schedule(100,TimeUnit.MILLISECONDS));
|
||||
while(System.nanoTime()<test_until)
|
||||
{
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
pool.execute(()->
|
||||
{
|
||||
_timeout.schedule(100,TimeUnit.MILLISECONDS);
|
||||
latch.countDown();
|
||||
});
|
||||
latch.await();
|
||||
}
|
||||
|
||||
Assert.assertFalse(_expired);
|
||||
pool.stop();
|
||||
}
|
||||
}
|
|
@ -29,6 +29,8 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.eclipse.jetty.io.ManagedSelector.SelectorUpdate;
|
||||
import org.eclipse.jetty.io.ManagedSelector.Connect;
|
||||
import org.eclipse.jetty.toolchain.test.annotation.Slow;
|
||||
import org.eclipse.jetty.util.Callback;
|
||||
import org.eclipse.jetty.util.thread.QueuedThreadPool;
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
<Item>
|
||||
<New class="org.eclipse.jetty.server.HttpConnectionFactory">
|
||||
<Arg name="config"><Ref refid="httpConfig" /></Arg>
|
||||
<Arg name="compliance"><Call class="org.eclipse.jetty.http.HttpCompliance" name="valueOf"><Arg><Property name="jetty.http.compliance" default="RFC7230"/></Arg></Call></Arg>
|
||||
<Arg name="compliance"><Call class="org.eclipse.jetty.http.HttpCompliance" name="valueOf"><Arg><Property name="jetty.http.compliance" default="RFC7230_LEGACY"/></Arg></Call></Arg>
|
||||
</New>
|
||||
</Item>
|
||||
</Array>
|
||||
|
|
|
@ -46,5 +46,5 @@ etc/jetty-http.xml
|
|||
## Connect Timeout in milliseconds
|
||||
# jetty.http.connectTimeout=15000
|
||||
|
||||
## HTTP Compliance: RFC7230, RFC2616, LEGACY
|
||||
# jetty.http.compliance=RFC7230
|
||||
## HTTP Compliance: RFC7230, RFC7230_LEGACY, RFC2616, RFC2616_LEGACY, LEGACY or CUSTOMn
|
||||
# jetty.http.compliance=RFC7230_LEGACY
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.List;
|
|||
import org.eclipse.jetty.http.BadMessageException;
|
||||
import org.eclipse.jetty.http.HostPortHttpField;
|
||||
import org.eclipse.jetty.http.HttpCompliance;
|
||||
import org.eclipse.jetty.http.HttpComplianceSection;
|
||||
import org.eclipse.jetty.http.HttpField;
|
||||
import org.eclipse.jetty.http.HttpFields;
|
||||
import org.eclipse.jetty.http.HttpGenerator;
|
||||
|
@ -513,7 +514,7 @@ public class HttpChannelOverHttp extends HttpChannel implements HttpParser.Reque
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onComplianceViolation(HttpCompliance compliance, HttpCompliance required, String reason)
|
||||
public void onComplianceViolation(HttpCompliance compliance, HttpComplianceSection violation, String reason)
|
||||
{
|
||||
if (_httpConnection.isRecordHttpComplianceViolations())
|
||||
{
|
||||
|
@ -521,10 +522,11 @@ public class HttpChannelOverHttp extends HttpChannel implements HttpParser.Reque
|
|||
{
|
||||
_complianceViolations = new ArrayList<>();
|
||||
}
|
||||
String violation = String.format("%s<%s: %s for %s", compliance, required, reason, getHttpTransport());
|
||||
_complianceViolations.add(violation);
|
||||
String record = String.format("%s (see %s) in mode %s for %s in %s",
|
||||
violation.getDescription(), violation.getURL(), compliance, reason, getHttpTransport());
|
||||
_complianceViolations.add(record);
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug(violation);
|
||||
LOG.debug(record);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ public class HttpConnection extends AbstractConnection implements Runnable, Http
|
|||
_channel = newHttpChannel();
|
||||
_input = _channel.getRequest().getHttpInput();
|
||||
_parser = newHttpParser(compliance);
|
||||
_recordHttpComplianceViolations=recordComplianceViolations;
|
||||
_recordHttpComplianceViolations = recordComplianceViolations;
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("New HTTP Connection {}", this);
|
||||
}
|
||||
|
|
|
@ -342,13 +342,6 @@ public class ServerConnector extends AbstractNetworkConnector
|
|||
|
||||
return serverChannel;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Future<Void> shutdown()
|
||||
{
|
||||
// shutdown all the connections
|
||||
return super.shutdown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close()
|
||||
|
|
|
@ -24,22 +24,27 @@ import java.io.DataOutputStream;
|
|||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.FilenameFilter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.ObjectOutputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.file.FileVisitOption;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.LinkOption;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.FileTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.StringTokenizer;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.eclipse.jetty.util.ClassLoadingObjectInputStream;
|
||||
import org.eclipse.jetty.util.MultiException;
|
||||
import org.eclipse.jetty.util.StringUtil;
|
||||
import org.eclipse.jetty.util.annotation.ManagedAttribute;
|
||||
import org.eclipse.jetty.util.annotation.ManagedObject;
|
||||
|
@ -54,11 +59,21 @@ import org.eclipse.jetty.util.log.Logger;
|
|||
@ManagedObject
|
||||
public class FileSessionDataStore extends AbstractSessionDataStore
|
||||
{
|
||||
|
||||
|
||||
private final static Logger LOG = Log.getLogger("org.eclipse.jetty.server.session");
|
||||
private File _storeDir;
|
||||
private boolean _deleteUnrestorableFiles = false;
|
||||
|
||||
private Map<String,String> _sessionFileMap = new ConcurrentHashMap<>();
|
||||
private String _contextString;
|
||||
protected long _lastSweepTime = 0L;
|
||||
|
||||
@Override
|
||||
public void initialize(SessionContext context) throws Exception
|
||||
{
|
||||
super.initialize(context);
|
||||
_contextString = _context.getCanonicalContextPath()+"_"+_context.getVhost();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() throws Exception
|
||||
|
@ -70,6 +85,8 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
@Override
|
||||
protected void doStop() throws Exception
|
||||
{
|
||||
_sessionFileMap.clear();
|
||||
_lastSweepTime = 0;
|
||||
super.doStop();
|
||||
}
|
||||
|
||||
|
@ -99,117 +116,162 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
|
||||
|
||||
/**
|
||||
* @see org.eclipse.jetty.server.session.SessionDataStore#delete(java.lang.String)
|
||||
* Delete a session
|
||||
*
|
||||
* @param id session id
|
||||
*/
|
||||
@Override
|
||||
public boolean delete(String id) throws Exception
|
||||
{
|
||||
File file = null;
|
||||
if (_storeDir != null)
|
||||
{
|
||||
file = getFile(_storeDir, id);
|
||||
if (file != null && file.exists() && file.getParentFile().equals(_storeDir))
|
||||
{
|
||||
return file.delete();
|
||||
}
|
||||
//remove from our map
|
||||
String filename = _sessionFileMap.remove(getIdWithContext(id));
|
||||
if (filename == null)
|
||||
return false;
|
||||
|
||||
//remove the file
|
||||
return deleteFile(filename);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Delete the file associated with a session
|
||||
*
|
||||
* @param filename name of the file containing the session's information
|
||||
*
|
||||
* @return true if file was deleted, false otherwise
|
||||
* @throws Exception
|
||||
*/
|
||||
public boolean deleteFile (String filename) throws Exception
|
||||
{
|
||||
if (filename == null)
|
||||
return false;
|
||||
File file = new File(_storeDir, filename);
|
||||
return Files.deleteIfExists(file.toPath());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @see org.eclipse.jetty.server.session.SessionDataStore#getExpired(Set)
|
||||
* Check to see which sessions have expired.
|
||||
*
|
||||
* @param candidates the set of session ids that the SessionCache believes
|
||||
* have expired
|
||||
* @return the complete set of sessions that have expired, including those
|
||||
* that are not currently loaded into the SessionCache
|
||||
*/
|
||||
@Override
|
||||
public Set<String> doGetExpired(final Set<String> candidates)
|
||||
{
|
||||
final long now = System.currentTimeMillis();
|
||||
HashSet<String> expired = new HashSet<String>();
|
||||
|
||||
HashSet<String> idsWithContext = new HashSet<>();
|
||||
|
||||
|
||||
//one pass to get all idWithContext
|
||||
File [] files = _storeDir.listFiles(new FilenameFilter()
|
||||
{
|
||||
@Override
|
||||
public boolean accept(File dir, String name)
|
||||
{
|
||||
if (dir != _storeDir)
|
||||
return false;
|
||||
|
||||
//dir may contain files that don't match our naming pattern
|
||||
if (!match(name))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
String idWithContext = getIdWithContextFromString(name);
|
||||
if (!StringUtil.isBlank(idWithContext))
|
||||
idsWithContext.add(idWithContext);
|
||||
return true;
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
//got the list of all sessionids with their contexts, remove all old files for each one
|
||||
for (String idWithContext:idsWithContext)
|
||||
//iterate over the files and work out which have expired
|
||||
for (String filename:_sessionFileMap.values())
|
||||
{
|
||||
deleteOldFiles(_storeDir, idWithContext);
|
||||
}
|
||||
|
||||
|
||||
//now find sessions that have expired in any context
|
||||
files = _storeDir.listFiles(new FilenameFilter()
|
||||
{
|
||||
@Override
|
||||
public boolean accept(File dir, String name)
|
||||
try
|
||||
{
|
||||
if (dir != _storeDir)
|
||||
return false;
|
||||
|
||||
//dir may contain files that don't match our naming pattern
|
||||
if (!match(name))
|
||||
return false;
|
||||
|
||||
try
|
||||
{
|
||||
long expiry = getExpiryFromString(name);
|
||||
return expiry > 0 && expiry < now;
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
long expiry = getExpiryFromFilename(filename);
|
||||
if (expiry > 0 && expiry < now)
|
||||
expired.add(getIdFromFilename(filename));
|
||||
}
|
||||
});
|
||||
|
||||
if (files != null)
|
||||
{
|
||||
for (File f:files)
|
||||
catch (Exception e)
|
||||
{
|
||||
expired.add(getIdFromFile(f));
|
||||
LOG.warn(e);
|
||||
}
|
||||
}
|
||||
|
||||
//check candidates that were not found to be expired, perhaps they no
|
||||
//longer exist and they should be expired
|
||||
//check candidates that were not found to be expired, perhaps
|
||||
//because they no longer exist and they should be expired
|
||||
for (String c:candidates)
|
||||
{
|
||||
if (!expired.contains(c))
|
||||
{
|
||||
//check if the file exists
|
||||
File f = getFile(_storeDir, c);
|
||||
if (f == null || !f.exists())
|
||||
//if it doesn't have a file then the session doesn't exist
|
||||
String filename = _sessionFileMap.get(getIdWithContext(c));
|
||||
if (filename == null)
|
||||
expired.add(c);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//Infrequently iterate over all files in the store, and delete those
|
||||
//that expired a long time ago, even if they belong to
|
||||
//another context. This ensures that files that
|
||||
//belong to defunct contexts are cleaned up.
|
||||
//If the graceperiod is disabled, don't do the sweep!
|
||||
if ((_gracePeriodSec > 0) && ((_lastSweepTime == 0) || ((now - _lastSweepTime) >= (5*TimeUnit.SECONDS.toMillis(_gracePeriodSec)))))
|
||||
{
|
||||
_lastSweepTime = now;
|
||||
sweepDisk();
|
||||
}
|
||||
return expired;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Check all session files that do not belong to this context and
|
||||
* remove any that expired long ago (ie at least 5 gracePeriods ago).
|
||||
*/
|
||||
public void sweepDisk()
|
||||
{
|
||||
//iterate over the files in the store dir and check expiry times
|
||||
long now = System.currentTimeMillis();
|
||||
if (LOG.isDebugEnabled()) LOG.debug("Sweeping {} for old session files", _storeDir);
|
||||
try
|
||||
{
|
||||
Files.walk(_storeDir.toPath(), 1, FileVisitOption.FOLLOW_LINKS)
|
||||
.filter(p->!Files.isDirectory(p)).filter(p->!isOurContextSessionFilename(p.getFileName().toString()))
|
||||
.filter(p->isSessionFilename(p.getFileName().toString()))
|
||||
.forEach(p->{
|
||||
|
||||
try
|
||||
{
|
||||
sweepFile(now, p);
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
LOG.warn(e);
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
LOG.warn(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Check to see if the expiry on the file is very old, and
|
||||
* delete the file if so. "Old" means that it expired at least
|
||||
* 5 gracePeriods ago. The session can belong to any context.
|
||||
*
|
||||
* @param now the time now in msec
|
||||
* @param p the file to check
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
public void sweepFile (long now, Path p)
|
||||
throws Exception
|
||||
{
|
||||
if (p == null)
|
||||
return;
|
||||
|
||||
long expiry = getExpiryFromFilename(p.getFileName().toString());
|
||||
//files with 0 expiry never expire
|
||||
if (expiry >0 && ((now - expiry) >= (5*TimeUnit.SECONDS.toMillis(_gracePeriodSec))))
|
||||
{
|
||||
Files.deleteIfExists(p);
|
||||
if (LOG.isDebugEnabled()) LOG.debug("Sweep deleted {}", p.getFileName());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.eclipse.jetty.server.session.SessionDataStore#load(java.lang.String)
|
||||
|
@ -223,15 +285,22 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
{
|
||||
public void run ()
|
||||
{
|
||||
//get rid of all but the newest file for a session
|
||||
File file = deleteOldFiles(_storeDir, getIdWithContext(id));
|
||||
|
||||
if (file == null || !file.exists())
|
||||
//load session info from its file
|
||||
String idWithContext = getIdWithContext(id);
|
||||
String filename = _sessionFileMap.get(idWithContext);
|
||||
if (filename == null)
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("No file: {}",file);
|
||||
LOG.debug("Unknown file {}",filename);
|
||||
return;
|
||||
}
|
||||
File file = new File (_storeDir, filename);
|
||||
if (!file.exists())
|
||||
{
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("No such file {}",filename);
|
||||
return;
|
||||
}
|
||||
|
||||
try (FileInputStream in = new FileInputStream(file))
|
||||
{
|
||||
|
@ -243,8 +312,15 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
{
|
||||
if (isDeleteUnrestorableFiles() && file.exists() && file.getParentFile().equals(_storeDir))
|
||||
{
|
||||
file.delete();
|
||||
LOG.warn("Deleted unrestorable file for session {}", id);
|
||||
try
|
||||
{
|
||||
delete(id);
|
||||
LOG.warn("Deleted unrestorable file for session {}", id);
|
||||
}
|
||||
catch (Exception x)
|
||||
{
|
||||
LOG.warn("Unable to delete unrestorable file {} for session {}", filename, id, x);
|
||||
}
|
||||
}
|
||||
|
||||
exception.set(e);
|
||||
|
@ -255,6 +331,7 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
}
|
||||
}
|
||||
};
|
||||
|
||||
//ensure this runs with the context classloader set
|
||||
_context.run(r);
|
||||
|
||||
|
@ -275,19 +352,20 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
File file = null;
|
||||
if (_storeDir != null)
|
||||
{
|
||||
//remove any existing files for the session
|
||||
deleteAllFiles(_storeDir, getIdWithContext(id));
|
||||
|
||||
delete(id);
|
||||
|
||||
//make a fresh file using the latest session expiry
|
||||
file = new File(_storeDir, getIdWithContextAndExpiry(data));
|
||||
String filename = getIdWithContextAndExpiry(data);
|
||||
String idWithContext = getIdWithContext(id);
|
||||
file = new File(_storeDir, filename);
|
||||
|
||||
try(FileOutputStream fos = new FileOutputStream(file,false))
|
||||
{
|
||||
save(fos, id, data);
|
||||
_sessionFileMap.put(idWithContext, filename);
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
e.printStackTrace();
|
||||
if (file != null)
|
||||
file.delete(); // No point keeping the file if we didn't save the whole session
|
||||
throw new UnwriteableSessionDataException(id, _context,e);
|
||||
|
@ -296,16 +374,100 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
}
|
||||
|
||||
/**
|
||||
* Read the names of the existing session files and build a map of
|
||||
* fully qualified session ids (ie with context) to filename. If there
|
||||
* is more than one file for the same session, only the most recently modified will
|
||||
* be kept and the rest deleted. At the same time, any files - for any context -
|
||||
* that expired a long time ago will be cleaned up.
|
||||
*
|
||||
* @throws IllegalStateException if storeDir doesn't exist, isn't readable/writeable
|
||||
* or contains 2 files with the same lastmodify time for the same session. Throws IOException
|
||||
* if the lastmodifytimes can't be read.
|
||||
*/
|
||||
public void initializeStore ()
|
||||
throws Exception
|
||||
{
|
||||
if (_storeDir == null)
|
||||
throw new IllegalStateException("No file store specified");
|
||||
|
||||
if (!_storeDir.exists())
|
||||
_storeDir.mkdirs();
|
||||
else
|
||||
{
|
||||
if (!(_storeDir.isDirectory() &&_storeDir.canWrite() && _storeDir.canRead()))
|
||||
throw new IllegalStateException(_storeDir.getAbsolutePath()+" must be readable/writeable dir");
|
||||
|
||||
//iterate over files in _storeDir and build map of session id to filename.
|
||||
//if we come across files for sessions in other contexts, check if they're
|
||||
//ancient and remove if necessary.
|
||||
MultiException me = new MultiException();
|
||||
long now = System.currentTimeMillis();
|
||||
|
||||
Files.walk(_storeDir.toPath(), 1, FileVisitOption.FOLLOW_LINKS)
|
||||
.filter(p->!Files.isDirectory(p)).filter(p->isSessionFilename(p.getFileName().toString()))
|
||||
.forEach(p->{
|
||||
//first get rid of all ancient files, regardless of which
|
||||
//context they are for
|
||||
try
|
||||
{
|
||||
sweepFile(now, p);
|
||||
}
|
||||
catch (Exception x)
|
||||
{
|
||||
me.add(x);
|
||||
}
|
||||
|
||||
String filename = p.getFileName().toString();
|
||||
String context = getContextFromFilename(filename);
|
||||
//now process it if it wasn't deleted, and it is for our context
|
||||
if (Files.exists(p) && _contextString.equals(context))
|
||||
{
|
||||
//the session is for our context, populate the map with it
|
||||
String sessionIdWithContext = getIdWithContextFromFilename(filename);
|
||||
if (sessionIdWithContext != null)
|
||||
{
|
||||
//handle multiple session files existing for the same session: remove all
|
||||
//but the file with the most recent expiry time
|
||||
String existing = _sessionFileMap.putIfAbsent(sessionIdWithContext, filename);
|
||||
if (existing != null)
|
||||
{
|
||||
//if there was a prior filename, work out which has the most
|
||||
//recent modify time
|
||||
try
|
||||
{
|
||||
long existingExpiry = getExpiryFromFilename(existing);
|
||||
long thisExpiry = getExpiryFromFilename(filename);
|
||||
|
||||
if (thisExpiry > existingExpiry)
|
||||
{
|
||||
//replace with more recent file
|
||||
Path existingPath = _storeDir.toPath().resolve(existing);
|
||||
//update the file we're keeping
|
||||
_sessionFileMap.put(sessionIdWithContext, filename);
|
||||
//delete the old file
|
||||
Files.delete(existingPath);
|
||||
if (LOG.isDebugEnabled()) LOG.debug("Replaced {} with {}", existing, filename);
|
||||
}
|
||||
else
|
||||
{
|
||||
//we found an older file, delete it
|
||||
Files.delete(p);
|
||||
if (LOG.isDebugEnabled()) LOG.debug("Deleted expired session file {}", filename);
|
||||
}
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
me.add(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
me.ifExceptionThrow();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @see org.eclipse.jetty.server.session.SessionDataStore#isPassivating()
|
||||
|
@ -326,12 +488,14 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
@Override
|
||||
public boolean exists(String id) throws Exception
|
||||
{
|
||||
File sessionFile = deleteOldFiles(_storeDir, getIdWithContext(id));
|
||||
if (sessionFile == null || !sessionFile.exists())
|
||||
String idWithContext = getIdWithContext(id);
|
||||
String filename = _sessionFileMap.get(idWithContext);
|
||||
|
||||
if (filename == null)
|
||||
return false;
|
||||
|
||||
//check the expiry
|
||||
long expiry = getExpiryFromFile(sessionFile);
|
||||
long expiry = getExpiryFromFilename(filename);
|
||||
if (expiry <= 0)
|
||||
return true; //never expires
|
||||
else
|
||||
|
@ -368,7 +532,8 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
oos.writeObject(data.getAttribute(name));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Get the session id with its context.
|
||||
*
|
||||
|
@ -377,7 +542,7 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
*/
|
||||
private String getIdWithContext (String id)
|
||||
{
|
||||
return _context.getCanonicalContextPath()+"_"+_context.getVhost()+"_"+id;
|
||||
return _contextString+"_"+id;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -391,35 +556,16 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* Work out which session id the file relates to.
|
||||
* @param file the file to check
|
||||
* @return the session id the file relates to.
|
||||
*/
|
||||
private String getIdFromFile (File file)
|
||||
private String getIdFromFilename (String filename)
|
||||
{
|
||||
if (file == null)
|
||||
if (filename == null)
|
||||
return null;
|
||||
String name = file.getName();
|
||||
|
||||
return name.substring(name.lastIndexOf('_')+1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the expiry time of the session stored in the file.
|
||||
* @param file the file from which to extract the expiry time
|
||||
* @return the expiry time
|
||||
*/
|
||||
private long getExpiryFromFile (File file)
|
||||
{
|
||||
if (file == null)
|
||||
return 0;
|
||||
|
||||
return getExpiryFromString(file.getName());
|
||||
return filename.substring(filename.lastIndexOf('_')+1);
|
||||
}
|
||||
|
||||
|
||||
private long getExpiryFromString (String filename)
|
||||
|
||||
private long getExpiryFromFilename (String filename)
|
||||
{
|
||||
if (StringUtil.isBlank(filename) || filename.indexOf("_") < 0)
|
||||
throw new IllegalStateException ("Invalid or missing filename");
|
||||
|
@ -428,26 +574,24 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
return (s==null?0:Long.parseLong(s));
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the session id and context from the filename.
|
||||
* @param file the file whose name to use
|
||||
* @return the session id plus context
|
||||
*/
|
||||
private String getIdWithContextFromFile (File file)
|
||||
|
||||
private String getContextFromFilename (String filename)
|
||||
{
|
||||
if (file == null)
|
||||
if (StringUtil.isBlank(filename))
|
||||
return null;
|
||||
|
||||
String s = getIdWithContextFromString(file.getName());
|
||||
return s;
|
||||
int start = filename.indexOf('_');
|
||||
int end = filename.lastIndexOf('_');
|
||||
return filename.substring(start+1, end);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Extract the session id and context from the filename
|
||||
* @param filename the name of the file to use
|
||||
* @return the session id plus context
|
||||
*/
|
||||
private String getIdWithContextFromString (String filename)
|
||||
private String getIdWithContextFromFilename (String filename)
|
||||
{
|
||||
if (StringUtil.isBlank(filename) || filename.indexOf('_') < 0)
|
||||
return null;
|
||||
|
@ -456,11 +600,12 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
}
|
||||
|
||||
/**
|
||||
* Check if the filename matches our session pattern
|
||||
* @param filename
|
||||
* @return
|
||||
* Check if the filename is a session filename.
|
||||
*
|
||||
* @param filename the filename to check
|
||||
* @return true if the filename has the correct filename format
|
||||
*/
|
||||
private boolean match (String filename)
|
||||
private boolean isSessionFilename (String filename)
|
||||
{
|
||||
if (StringUtil.isBlank(filename))
|
||||
return false;
|
||||
|
@ -469,169 +614,33 @@ public class FileSessionDataStore extends AbstractSessionDataStore
|
|||
//Need at least 4 parts for a valid filename
|
||||
if (parts.length < 4)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Find a File for the session id for the current context.
|
||||
* Check if the filename matches our session pattern
|
||||
* and is a session for our context.
|
||||
*
|
||||
* @param storeDir the session storage directory
|
||||
* @param id the session id
|
||||
* @return the file
|
||||
*/
|
||||
private File getFile (final File storeDir, final String id)
|
||||
* @param filename the filename to check
|
||||
* @return true if the filename has the correct filename format and is for this context
|
||||
*/
|
||||
private boolean isOurContextSessionFilename (String filename)
|
||||
{
|
||||
File[] files = storeDir.listFiles (new FilenameFilter() {
|
||||
|
||||
/**
|
||||
* @see java.io.FilenameFilter#accept(java.io.File, java.lang.String)
|
||||
*/
|
||||
@Override
|
||||
public boolean accept(File dir, String name)
|
||||
{
|
||||
if (dir != storeDir)
|
||||
return false;
|
||||
return (name.contains(getIdWithContext(id)));
|
||||
}
|
||||
|
||||
});
|
||||
if (StringUtil.isBlank(filename))
|
||||
return false;
|
||||
String[] parts = filename.split("_");
|
||||
|
||||
if (files == null || files.length < 1)
|
||||
return null;
|
||||
return files[0];
|
||||
//Need at least 4 parts for a valid filename
|
||||
if (parts.length < 4)
|
||||
return false;
|
||||
|
||||
//Also needs to be for our context
|
||||
String context = getContextFromFilename(filename);
|
||||
if (context == null)
|
||||
return false;
|
||||
return (_contextString.equals(context));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Remove all existing session files for the session in the context
|
||||
* @param storeDir where the session files are stored
|
||||
* @param idInContext the session id within a particular context
|
||||
*/
|
||||
private void deleteAllFiles(final File storeDir, final String idInContext)
|
||||
{
|
||||
File[] files = storeDir.listFiles (new FilenameFilter() {
|
||||
|
||||
/**
|
||||
* @see java.io.FilenameFilter#accept(java.io.File, java.lang.String)
|
||||
*/
|
||||
@Override
|
||||
public boolean accept(File dir, String name)
|
||||
{
|
||||
if (dir != storeDir)
|
||||
return false;
|
||||
return (name.contains(idInContext));
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
//no files for that id
|
||||
if (files == null || files.length < 1)
|
||||
return;
|
||||
|
||||
//delete all files
|
||||
for (File f:files)
|
||||
{
|
||||
try
|
||||
{
|
||||
Files.deleteIfExists(f.toPath());
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
LOG.warn("Unable to delete session file", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Delete all but the most recent file for a given session id in a context.
|
||||
*
|
||||
* @param storeDir the directory in which sessions are stored
|
||||
* @param idWithContext the id of the session
|
||||
* @return the most recent remaining file for the session, can be null
|
||||
*/
|
||||
private File deleteOldFiles (final File storeDir, final String idWithContext)
|
||||
{
|
||||
File[] files = storeDir.listFiles (new FilenameFilter() {
|
||||
|
||||
/**
|
||||
* @see java.io.FilenameFilter#accept(java.io.File, java.lang.String)
|
||||
*/
|
||||
@Override
|
||||
public boolean accept(File dir, String name)
|
||||
{
|
||||
if (dir != storeDir)
|
||||
return false;
|
||||
|
||||
if (!match(name))
|
||||
return false;
|
||||
|
||||
return (name.contains(idWithContext));
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
//no file for that session
|
||||
if (files == null || files.length == 0)
|
||||
return null;
|
||||
|
||||
|
||||
//delete all but the most recent file
|
||||
File newest = null;
|
||||
|
||||
for (File f:files)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (newest == null)
|
||||
{
|
||||
//haven't looked at any files yet
|
||||
newest = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (f.lastModified() > newest.lastModified())
|
||||
{
|
||||
//this file is more recent
|
||||
Files.deleteIfExists(newest.toPath());
|
||||
newest = f;
|
||||
}
|
||||
else if (f.lastModified() < newest.lastModified())
|
||||
{
|
||||
//this file is older
|
||||
Files.deleteIfExists(f.toPath());
|
||||
}
|
||||
else
|
||||
{
|
||||
//files have same last modified times, decide based on latest expiry time
|
||||
long exp1 = getExpiryFromFile(newest);
|
||||
long exp2 = getExpiryFromFile(f);
|
||||
if (exp2 >= exp1)
|
||||
{
|
||||
//this file has a later expiry date
|
||||
Files.deleteIfExists(newest.toPath());
|
||||
newest = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
//this file has an earlier expiry date
|
||||
Files.deleteIfExists(f.toPath());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
LOG.warn("Unable to delete old session file", e);
|
||||
}
|
||||
}
|
||||
|
||||
return newest;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ public class ExtendedServerTest extends HttpServerTestBase
|
|||
{
|
||||
public ExtendedHttpConnection(HttpConfiguration config, Connector connector, EndPoint endPoint)
|
||||
{
|
||||
super(config,connector,endPoint,HttpCompliance.RFC7230,false);
|
||||
super(config,connector,endPoint,HttpCompliance.RFC7230_LEGACY,false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,12 +25,15 @@ import static org.hamcrest.Matchers.lessThan;
|
|||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assume.assumeTrue;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.net.ConnectException;
|
||||
import java.net.Socket;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
@ -41,14 +44,21 @@ import javax.servlet.ServletException;
|
|||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.eclipse.jetty.io.Connection;
|
||||
import org.eclipse.jetty.io.EndPoint;
|
||||
import org.eclipse.jetty.io.EofException;
|
||||
import org.eclipse.jetty.server.handler.AbstractHandler;
|
||||
import org.eclipse.jetty.server.handler.StatisticsHandler;
|
||||
import org.eclipse.jetty.toolchain.test.AdvancedRunner;
|
||||
import org.eclipse.jetty.toolchain.test.OS;
|
||||
import org.eclipse.jetty.util.IO;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
import org.eclipse.jetty.util.log.Logger;
|
||||
import org.eclipse.jetty.util.thread.QueuedThreadPool;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
|
@ -265,6 +275,169 @@ public class GracefulStopTest
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
public void testSlowClose(long stopTimeout, long closeWait, Matcher<Long> stopTimeMatcher) throws Exception
|
||||
{
|
||||
Server server= new Server();
|
||||
server.setStopTimeout(stopTimeout);
|
||||
|
||||
CountDownLatch closed = new CountDownLatch(1);
|
||||
ServerConnector connector = new ServerConnector(server, 2, 2, new HttpConnectionFactory()
|
||||
{
|
||||
|
||||
@Override
|
||||
public Connection newConnection(Connector connector, EndPoint endPoint)
|
||||
{
|
||||
// Slow closing connection
|
||||
HttpConnection conn = new HttpConnection(getHttpConfiguration(), connector, endPoint, getHttpCompliance(), isRecordHttpComplianceViolations())
|
||||
{
|
||||
@Override
|
||||
public void close()
|
||||
{
|
||||
try
|
||||
{
|
||||
new Thread(()->
|
||||
{
|
||||
try
|
||||
{
|
||||
Thread.sleep(closeWait);
|
||||
}
|
||||
catch (InterruptedException e)
|
||||
{
|
||||
}
|
||||
finally
|
||||
{
|
||||
super.close();
|
||||
}
|
||||
|
||||
}).start();
|
||||
}
|
||||
catch(Exception e)
|
||||
{
|
||||
// e.printStackTrace();
|
||||
}
|
||||
finally
|
||||
{
|
||||
closed.countDown();
|
||||
}
|
||||
}
|
||||
};
|
||||
return configure(conn, connector, endPoint);
|
||||
}
|
||||
|
||||
});
|
||||
connector.setPort(0);
|
||||
server.addConnector(connector);
|
||||
|
||||
NoopHandler handler = new NoopHandler();
|
||||
server.setHandler(handler);
|
||||
|
||||
server.start();
|
||||
final int port=connector.getLocalPort();
|
||||
Socket client = new Socket("127.0.0.1", port);
|
||||
client.setSoTimeout(10000);
|
||||
client.getOutputStream().write((
|
||||
"GET / HTTP/1.1\r\n"+
|
||||
"Host: localhost:"+port+"\r\n" +
|
||||
"Content-Type: plain/text\r\n" +
|
||||
"\r\n"
|
||||
).getBytes());
|
||||
client.getOutputStream().flush();
|
||||
handler.latch.await();
|
||||
|
||||
// look for a response
|
||||
BufferedReader in = new BufferedReader(new InputStreamReader(client.getInputStream() ,StandardCharsets.ISO_8859_1));
|
||||
while(true)
|
||||
{
|
||||
String line = in.readLine();
|
||||
if (line==null)
|
||||
Assert.fail();
|
||||
if (line.length()==0)
|
||||
break;
|
||||
}
|
||||
|
||||
long start = System.nanoTime();
|
||||
try
|
||||
{
|
||||
server.stop();
|
||||
Assert.assertTrue(stopTimeout==0 || stopTimeout>closeWait);
|
||||
}
|
||||
catch(Exception e)
|
||||
{
|
||||
Assert.assertTrue(stopTimeout>0 && stopTimeout<closeWait);
|
||||
}
|
||||
long stop = System.nanoTime();
|
||||
|
||||
// Check stop time was correct
|
||||
assertThat(TimeUnit.NANOSECONDS.toMillis(stop-start),stopTimeMatcher);
|
||||
|
||||
// Connection closed
|
||||
while(true)
|
||||
{
|
||||
int r = client.getInputStream().read();
|
||||
if (r==-1)
|
||||
break;
|
||||
}
|
||||
|
||||
// onClose Thread interrupted or completed
|
||||
if (stopTimeout>0)
|
||||
Assert.assertTrue(closed.await(1000,TimeUnit.MILLISECONDS));
|
||||
|
||||
if (!client.isClosed())
|
||||
client.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of non graceful stop when a connection close is slow
|
||||
* @throws Exception on test failure
|
||||
*/
|
||||
@Test
|
||||
public void testSlowCloseNotGraceful() throws Exception
|
||||
{
|
||||
Log.getLogger(QueuedThreadPool.class).info("Expect some threads can't be stopped");
|
||||
testSlowClose(0,5000,lessThan(750L));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of graceful stop when close is slower than timeout
|
||||
* @throws Exception on test failure
|
||||
*/
|
||||
@Test
|
||||
@Ignore // TODO disable while #2046 is fixed
|
||||
public void testSlowCloseTinyGraceful() throws Exception
|
||||
{
|
||||
Log.getLogger(QueuedThreadPool.class).info("Expect some threads can't be stopped");
|
||||
testSlowClose(1,5000,lessThan(1500L));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test of graceful stop when close is faster than timeout;
|
||||
* @throws Exception on test failure
|
||||
*/
|
||||
@Test
|
||||
@Ignore // TODO disable while #2046 is fixed
|
||||
public void testSlowCloseGraceful() throws Exception
|
||||
{
|
||||
testSlowClose(5000,1000,Matchers.allOf(greaterThan(750L),lessThan(4999L)));
|
||||
}
|
||||
|
||||
|
||||
static class NoopHandler extends AbstractHandler
|
||||
{
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
NoopHandler()
|
||||
{
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response)
|
||||
throws IOException, ServletException
|
||||
{
|
||||
baseRequest.setHandled(true);
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
static class TestHandler extends AbstractHandler
|
||||
{
|
||||
|
|
|
@ -245,7 +245,7 @@ public class HttpConnectionTest
|
|||
@Test
|
||||
public void test_0_9() throws Exception
|
||||
{
|
||||
connector.getConnectionFactory(HttpConnectionFactory.class).setHttpCompliance(HttpCompliance.RFC2616);
|
||||
connector.getConnectionFactory(HttpConnectionFactory.class).setHttpCompliance(HttpCompliance.RFC2616_LEGACY);
|
||||
LocalEndPoint endp = connector.executeRequest("GET /R1\n");
|
||||
endp.waitUntilClosed();
|
||||
String response=BufferUtil.toString(endp.takeOutput());
|
||||
|
|
|
@ -18,6 +18,9 @@
|
|||
|
||||
package org.eclipse.jetty.server.session;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FilenameFilter;
|
||||
import java.util.Collections;
|
||||
|
@ -37,6 +40,7 @@ import org.junit.Test;
|
|||
|
||||
public class FileSessionManagerTest
|
||||
{
|
||||
public static final long ONE_DAY = (1000L*60L*60L*24L);
|
||||
private static StdErrLog _log;
|
||||
private static boolean _stacks;
|
||||
|
||||
|
@ -110,6 +114,12 @@ public class FileSessionManagerTest
|
|||
|
||||
|
||||
|
||||
/**
|
||||
* When starting the filestore, check that really old expired
|
||||
* files are deleted irrespective of context session belongs to.
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testDeleteOfOlderFiles() throws Exception
|
||||
{
|
||||
|
@ -129,40 +139,137 @@ public class FileSessionManagerTest
|
|||
File testDir = MavenTestingUtils.getTargetTestingDir("hashes");
|
||||
FS.ensureEmpty(testDir);
|
||||
ds.setStoreDir(testDir);
|
||||
handler.setSessionIdManager(idmgr);
|
||||
handler.start();
|
||||
|
||||
//create a bunch of older files for same session abc
|
||||
|
||||
//create a really old file for session abc
|
||||
String name1 = "100__0.0.0.0_abc";
|
||||
File f1 = new File(testDir, name1);
|
||||
if (f1.exists())
|
||||
Assert.assertTrue(f1.delete());
|
||||
f1.createNewFile();
|
||||
|
||||
//create another really old file for session abc
|
||||
Thread.sleep(1100);
|
||||
|
||||
String name2 = "101__0.0.0.0_abc";
|
||||
File f2 = new File(testDir, name2);
|
||||
if (f2.exists())
|
||||
Assert.assertTrue(f2.delete());
|
||||
f2.createNewFile();
|
||||
|
||||
|
||||
//make one file for session abc that should not have expired
|
||||
Thread.sleep(1100);
|
||||
|
||||
String name3 = "102__0.0.0.0_abc";
|
||||
long exp = System.currentTimeMillis() + ONE_DAY;
|
||||
String name3 = Long.toString(exp)+"__0.0.0.0_abc";
|
||||
File f3 = new File(testDir, name3);
|
||||
if (f3.exists())
|
||||
Assert.assertTrue(f3.delete());
|
||||
f3.createNewFile();
|
||||
|
||||
//make a file that is for a different context
|
||||
//that expired a long time ago - should be
|
||||
//removed by sweep on startup
|
||||
Thread.sleep(1100);
|
||||
String name4 = "1099_foo_0.0.0.0_abc";
|
||||
File f4 = new File(testDir, name4);
|
||||
if (f4.exists())
|
||||
Assert.assertTrue(f4.delete());
|
||||
f4.createNewFile();
|
||||
|
||||
//make a file that is for a different context
|
||||
//that should not have expired - ensure it is
|
||||
//not removed
|
||||
exp = System.currentTimeMillis() + ONE_DAY;
|
||||
String name5 = Long.toString(exp)+"_foo_0.0.0.0_abcdefg";
|
||||
File f5 = new File(testDir, name5);
|
||||
if (f5.exists())
|
||||
Assert.assertTrue(f5.delete());
|
||||
f5.createNewFile();
|
||||
|
||||
//make a file that is for a different context
|
||||
//that expired, but only recently - it should
|
||||
//not be removed by the startup process
|
||||
exp = System.currentTimeMillis() - 1000L;
|
||||
String name6 = Long.toString(exp)+"_foo_0.0.0.0_abcdefg";
|
||||
File f6 = new File(testDir, name5);
|
||||
if (f6.exists())
|
||||
Assert.assertTrue(f6.delete());
|
||||
f6.createNewFile();
|
||||
|
||||
handler.setSessionIdManager(idmgr);
|
||||
handler.start();
|
||||
|
||||
Thread.sleep(1100);
|
||||
|
||||
|
||||
Session session = handler.getSession("abc");
|
||||
Assert.assertTrue(!f1.exists());
|
||||
Assert.assertTrue(!f2.exists());
|
||||
Assert.assertTrue(f3.exists());
|
||||
Assert.assertTrue(!f4.exists());
|
||||
Assert.assertTrue(f5.exists());
|
||||
Assert.assertTrue(f6.exists());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Tests that only the most recent file will be
|
||||
* loaded into the cache, even if it is already
|
||||
* expired. Other recently expired files for
|
||||
* same session should be deleted.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testLoadOnlyMostRecent() throws Exception
|
||||
{
|
||||
Server server = new Server();
|
||||
SessionHandler handler = new SessionHandler();
|
||||
handler.setServer(server);
|
||||
final DefaultSessionIdManager idmgr = new DefaultSessionIdManager(server);
|
||||
idmgr.setServer(server);
|
||||
server.setSessionIdManager(idmgr);
|
||||
|
||||
FileSessionDataStore ds = new FileSessionDataStore();
|
||||
ds.setGracePeriodSec(100); //set graceperiod to 100sec to control what we consider as very old
|
||||
ds.setDeleteUnrestorableFiles(false); //turn off deletion of unreadable session files
|
||||
DefaultSessionCache ss = new DefaultSessionCache(handler);
|
||||
handler.setSessionCache(ss);
|
||||
ss.setSessionDataStore(ds);
|
||||
|
||||
File testDir = MavenTestingUtils.getTargetTestingDir("hashes");
|
||||
FS.ensureEmpty(testDir);
|
||||
ds.setStoreDir(testDir);
|
||||
|
||||
long now = System.currentTimeMillis();
|
||||
|
||||
//create a file for session abc that expired 5sec ago
|
||||
long exp = now -5000L;
|
||||
String name1 = Long.toString(exp)+"__0.0.0.0_abc";
|
||||
File f1 = new File(testDir, name1);
|
||||
if (f1.exists())
|
||||
Assert.assertTrue(f1.delete());
|
||||
f1.createNewFile();
|
||||
|
||||
//create a file for same session that expired 4 sec ago
|
||||
exp = now - 4000L;
|
||||
String name2 = Long.toString(exp)+"__0.0.0.0_abc";
|
||||
File f2 = new File(testDir, name2);
|
||||
if (f2.exists())
|
||||
Assert.assertTrue(f2.delete());
|
||||
f2.createNewFile();
|
||||
|
||||
|
||||
//make a file for same session that expired 3 sec ago
|
||||
exp = now - 3000L;
|
||||
String name3 = Long.toString(exp)+"__0.0.0.0_abc";
|
||||
File f3 = new File(testDir, name3);
|
||||
if (f3.exists())
|
||||
Assert.assertTrue(f3.delete());
|
||||
f3.createNewFile();
|
||||
|
||||
handler.setSessionIdManager(idmgr);
|
||||
handler.start();
|
||||
|
||||
Assert.assertFalse(f1.exists());
|
||||
Assert.assertFalse(f2.exists());
|
||||
Assert.assertTrue(f3.exists());
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -176,25 +283,22 @@ public class FileSessionManagerTest
|
|||
idmgr.setServer(server);
|
||||
server.setSessionIdManager(idmgr);
|
||||
|
||||
File testDir = MavenTestingUtils.getTargetTestingDir("hashes");
|
||||
FS.ensureEmpty(testDir);
|
||||
String expectedFilename = (System.currentTimeMillis()+ 10000)+"__0.0.0.0_validFile123";
|
||||
|
||||
Assert.assertTrue(new File(testDir, expectedFilename).createNewFile());
|
||||
Assert.assertTrue("File should exist!", new File(testDir, expectedFilename).exists());
|
||||
|
||||
DefaultSessionCache ss = new DefaultSessionCache(handler);
|
||||
FileSessionDataStore ds = new FileSessionDataStore();
|
||||
ss.setSessionDataStore(ds);
|
||||
handler.setSessionCache(ss);
|
||||
ds.setDeleteUnrestorableFiles(true); //invalid file will be removed
|
||||
handler.setSessionIdManager(idmgr);
|
||||
|
||||
File testDir = MavenTestingUtils.getTargetTestingDir("hashes");
|
||||
FS.ensureEmpty(testDir);
|
||||
|
||||
ds.setStoreDir(testDir);
|
||||
handler.start();
|
||||
|
||||
String expectedFilename = (System.currentTimeMillis()+ 10000)+"__0.0.0.0_validFile123";
|
||||
|
||||
Assert.assertTrue(new File(testDir, expectedFilename).createNewFile());
|
||||
|
||||
Assert.assertTrue("File should exist!", new File(testDir, expectedFilename).exists());
|
||||
|
||||
Session session = handler.getSession("validFile123");
|
||||
|
||||
Assert.assertTrue("File shouldn't exist!", !new File(testDir,expectedFilename).exists());
|
||||
|
@ -315,6 +419,6 @@ public class FileSessionManagerTest
|
|||
{
|
||||
nonNumber.delete();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ public class ComplianceViolations2616Test
|
|||
HttpConfiguration config = new HttpConfiguration();
|
||||
config.setSendServerVersion(false);
|
||||
|
||||
HttpConnectionFactory httpConnectionFactory = new HttpConnectionFactory(config, HttpCompliance.RFC2616);
|
||||
HttpConnectionFactory httpConnectionFactory = new HttpConnectionFactory(config, HttpCompliance.RFC2616_LEGACY);
|
||||
httpConnectionFactory.setRecordHttpComplianceViolations(true);
|
||||
connector = new LocalConnector(server, null, null, null, -1, httpConnectionFactory);
|
||||
|
||||
|
@ -147,7 +147,7 @@ public class ComplianceViolations2616Test
|
|||
|
||||
String response = connector.getResponse(req1.toString());
|
||||
assertThat("Response status", response, containsString("HTTP/1.1 200 OK"));
|
||||
assertThat("Response headers", response, containsString("X-Http-Violation-0: RFC2616<RFC7230: https://tools.ietf.org/html/rfc7230#section-3.2 No colon"));
|
||||
assertThat("Response headers", response, containsString("X-Http-Violation-0: Fields must have a Colon"));
|
||||
assertThat("Response body", response, containsString("[Name] = []"));
|
||||
}
|
||||
|
||||
|
@ -164,7 +164,7 @@ public class ComplianceViolations2616Test
|
|||
|
||||
String response = connector.getResponse(req1.toString());
|
||||
assertThat("Response status", response, containsString("HTTP/1.1 200"));
|
||||
assertThat("Response headers", response, containsString("X-Http-Violation-0: RFC2616<RFC7230: https://tools.ietf.org/html/rfc7230#section-3.2 No colon"));
|
||||
assertThat("Response headers", response, containsString("X-Http-Violation-0: Fields must have a Colon"));
|
||||
assertThat("Response body", response, containsString("[Name] = []"));
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,8 @@ public class ComplianceViolations2616Test
|
|||
|
||||
String response = connector.getResponse(req1.toString());
|
||||
assertThat("Response status", response, containsString("HTTP/1.1 200"));
|
||||
assertThat("Response headers", response, containsString("X-Http-Violation-0: RFC2616<RFC7230: https://tools.ietf.org/html/rfc7230#section-3.2.4 folding"));
|
||||
assertThat("Response headers", response, containsString("X-Http-Violation-0: No line Folding"));
|
||||
assertThat("Response body", response, containsString("[Name] = [Some Value]"));
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.List;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
import org.eclipse.jetty.util.MultiException;
|
||||
import org.eclipse.jetty.util.annotation.ManagedObject;
|
||||
import org.eclipse.jetty.util.annotation.ManagedOperation;
|
||||
import org.eclipse.jetty.util.log.Log;
|
||||
|
@ -106,6 +107,7 @@ public class ContainerLifeCycle extends AbstractLifeCycle implements Container,
|
|||
if (!l.isRunning())
|
||||
start(l);
|
||||
break;
|
||||
|
||||
case AUTO:
|
||||
if (l.isRunning())
|
||||
unmanage(b);
|
||||
|
@ -115,6 +117,9 @@ public class ContainerLifeCycle extends AbstractLifeCycle implements Container,
|
|||
start(l);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -154,14 +159,23 @@ public class ContainerLifeCycle extends AbstractLifeCycle implements Container,
|
|||
super.doStop();
|
||||
List<Bean> reverse = new ArrayList<>(_beans);
|
||||
Collections.reverse(reverse);
|
||||
MultiException mex = new MultiException();
|
||||
for (Bean b : reverse)
|
||||
{
|
||||
if (b._managed==Managed.MANAGED && b._bean instanceof LifeCycle)
|
||||
{
|
||||
LifeCycle l = (LifeCycle)b._bean;
|
||||
stop(l);
|
||||
try
|
||||
{
|
||||
stop(l);
|
||||
}
|
||||
catch (Throwable th)
|
||||
{
|
||||
mex.add(th);
|
||||
}
|
||||
}
|
||||
}
|
||||
mex.ifExceptionThrow();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -178,7 +192,14 @@ public class ContainerLifeCycle extends AbstractLifeCycle implements Container,
|
|||
if (b._bean instanceof Destroyable && (b._managed==Managed.MANAGED || b._managed==Managed.POJO))
|
||||
{
|
||||
Destroyable d = (Destroyable)b._bean;
|
||||
d.destroy();
|
||||
try
|
||||
{
|
||||
d.destroy();
|
||||
}
|
||||
catch(Throwable th)
|
||||
{
|
||||
LOG.warn(th);
|
||||
}
|
||||
}
|
||||
}
|
||||
_beans.clear();
|
||||
|
|
|
@ -101,13 +101,7 @@ public class ScheduledExecutorScheduler extends AbstractLifeCycle implements Sch
|
|||
{
|
||||
ScheduledThreadPoolExecutor s = scheduler;
|
||||
if (s==null)
|
||||
return new Task(){
|
||||
@Override
|
||||
public boolean cancel()
|
||||
{
|
||||
return false;
|
||||
}};
|
||||
|
||||
return ()->false;
|
||||
ScheduledFuture<?> result = s.schedule(task, delay, unit);
|
||||
return new ScheduledFutureTask(result);
|
||||
}
|
||||
|
|
|
@ -16,10 +16,11 @@
|
|||
// ========================================================================
|
||||
//
|
||||
|
||||
package org.eclipse.jetty.client;
|
||||
package org.eclipse.jetty.http.client;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketTimeoutException;
|
||||
|
@ -35,49 +36,46 @@ import javax.servlet.ServletException;
|
|||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.eclipse.jetty.client.HttpClient;
|
||||
import org.eclipse.jetty.client.api.Connection;
|
||||
import org.eclipse.jetty.client.api.ContentResponse;
|
||||
import org.eclipse.jetty.client.api.Destination;
|
||||
import org.eclipse.jetty.client.api.Request;
|
||||
import org.eclipse.jetty.client.api.Response;
|
||||
import org.eclipse.jetty.client.api.Result;
|
||||
import org.eclipse.jetty.client.util.BufferingResponseListener;
|
||||
import org.eclipse.jetty.client.util.InputStreamContentProvider;
|
||||
import org.eclipse.jetty.http.HttpStatus;
|
||||
import org.eclipse.jetty.io.ByteBufferPool;
|
||||
import org.eclipse.jetty.io.ClientConnectionFactory;
|
||||
import org.eclipse.jetty.io.EndPoint;
|
||||
import org.eclipse.jetty.io.ssl.SslClientConnectionFactory;
|
||||
import org.eclipse.jetty.io.ssl.SslConnection;
|
||||
import org.eclipse.jetty.server.handler.AbstractHandler;
|
||||
import org.eclipse.jetty.toolchain.test.annotation.Slow;
|
||||
import org.eclipse.jetty.util.FuturePromise;
|
||||
import org.eclipse.jetty.util.IO;
|
||||
import org.eclipse.jetty.util.ssl.SslContextFactory;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Test;
|
||||
|
||||
public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
||||
public class HttpClientTimeoutTest extends AbstractTest
|
||||
{
|
||||
public HttpClientTimeoutTest(SslContextFactory sslContextFactory)
|
||||
public HttpClientTimeoutTest(Transport transport)
|
||||
{
|
||||
super(sslContextFactory);
|
||||
super(transport);
|
||||
}
|
||||
|
||||
@Slow
|
||||
@Test(expected = TimeoutException.class)
|
||||
public void testTimeoutOnFuture() throws Exception
|
||||
{
|
||||
long timeout = 1000;
|
||||
start(new TimeoutHandler(2 * timeout));
|
||||
|
||||
client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
client.newRequest(newURI())
|
||||
.timeout(timeout, TimeUnit.MILLISECONDS)
|
||||
.send();
|
||||
}
|
||||
|
||||
@Slow
|
||||
@Test
|
||||
public void testTimeoutOnListener() throws Exception
|
||||
{
|
||||
|
@ -85,22 +83,16 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
start(new TimeoutHandler(2 * timeout));
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
Request request = client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
Request request = client.newRequest(newURI())
|
||||
.timeout(timeout, TimeUnit.MILLISECONDS);
|
||||
request.send(new Response.CompleteListener()
|
||||
request.send(result ->
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
Assert.assertTrue(result.isFailed());
|
||||
latch.countDown();
|
||||
}
|
||||
Assert.assertTrue(result.isFailed());
|
||||
latch.countDown();
|
||||
});
|
||||
Assert.assertTrue(latch.await(3 * timeout, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
|
||||
@Slow
|
||||
@Test
|
||||
public void testTimeoutOnQueuedRequest() throws Exception
|
||||
{
|
||||
|
@ -112,32 +104,22 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
|
||||
// The first request has a long timeout
|
||||
final CountDownLatch firstLatch = new CountDownLatch(1);
|
||||
Request request = client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
Request request = client.newRequest(newURI())
|
||||
.timeout(4 * timeout, TimeUnit.MILLISECONDS);
|
||||
request.send(new Response.CompleteListener()
|
||||
request.send(result ->
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
Assert.assertFalse(result.isFailed());
|
||||
firstLatch.countDown();
|
||||
}
|
||||
Assert.assertFalse(result.isFailed());
|
||||
firstLatch.countDown();
|
||||
});
|
||||
|
||||
// Second request has a short timeout and should fail in the queue
|
||||
final CountDownLatch secondLatch = new CountDownLatch(1);
|
||||
request = client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
request = client.newRequest(newURI())
|
||||
.timeout(timeout, TimeUnit.MILLISECONDS);
|
||||
request.send(new Response.CompleteListener()
|
||||
request.send(result ->
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
Assert.assertTrue(result.isFailed());
|
||||
secondLatch.countDown();
|
||||
}
|
||||
Assert.assertTrue(result.isFailed());
|
||||
secondLatch.countDown();
|
||||
});
|
||||
|
||||
Assert.assertTrue(secondLatch.await(2 * timeout, TimeUnit.MILLISECONDS));
|
||||
|
@ -146,7 +128,6 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
Assert.assertTrue(firstLatch.await(5 * timeout, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
|
||||
@Slow
|
||||
@Test
|
||||
public void testTimeoutIsCancelledOnSuccess() throws Exception
|
||||
{
|
||||
|
@ -155,8 +136,7 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
final byte[] content = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
|
||||
Request request = client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
Request request = client.newRequest(newURI())
|
||||
.content(new InputStreamContentProvider(new ByteArrayInputStream(content)))
|
||||
.timeout(2 * timeout, TimeUnit.MILLISECONDS);
|
||||
request.send(new BufferingResponseListener()
|
||||
|
@ -177,7 +157,6 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
Assert.assertNull(request.getAbortCause());
|
||||
}
|
||||
|
||||
@Slow
|
||||
@Test
|
||||
public void testTimeoutOnListenerWithExplicitConnection() throws Exception
|
||||
{
|
||||
|
@ -185,29 +164,23 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
start(new TimeoutHandler(2 * timeout));
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
Destination destination = client.getDestination(scheme, "localhost", connector.getLocalPort());
|
||||
Destination destination = client.getDestination(getScheme(), "localhost", connector.getLocalPort());
|
||||
FuturePromise<Connection> futureConnection = new FuturePromise<>();
|
||||
destination.newConnection(futureConnection);
|
||||
try (Connection connection = futureConnection.get(5, TimeUnit.SECONDS))
|
||||
{
|
||||
Request request = client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
Request request = client.newRequest(newURI())
|
||||
.timeout(timeout, TimeUnit.MILLISECONDS);
|
||||
connection.send(request, new Response.CompleteListener()
|
||||
connection.send(request, result ->
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
Assert.assertTrue(result.isFailed());
|
||||
latch.countDown();
|
||||
}
|
||||
Assert.assertTrue(result.isFailed());
|
||||
latch.countDown();
|
||||
});
|
||||
|
||||
Assert.assertTrue(latch.await(3 * timeout, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
@Slow
|
||||
@Test
|
||||
public void testTimeoutIsCancelledOnSuccessWithExplicitConnection() throws Exception
|
||||
{
|
||||
|
@ -215,24 +188,19 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
start(new TimeoutHandler(timeout));
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
Destination destination = client.getDestination(scheme, "localhost", connector.getLocalPort());
|
||||
Destination destination = client.getDestination(getScheme(), "localhost", connector.getLocalPort());
|
||||
FuturePromise<Connection> futureConnection = new FuturePromise<>();
|
||||
destination.newConnection(futureConnection);
|
||||
try (Connection connection = futureConnection.get(5, TimeUnit.SECONDS))
|
||||
{
|
||||
Request request = client.newRequest(destination.getHost(), destination.getPort())
|
||||
.scheme(scheme)
|
||||
Request request = client.newRequest(newURI())
|
||||
.timeout(2 * timeout, TimeUnit.MILLISECONDS);
|
||||
connection.send(request, new Response.CompleteListener()
|
||||
connection.send(request, result ->
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
Response response = result.getResponse();
|
||||
Assert.assertEquals(200, response.getStatus());
|
||||
Assert.assertFalse(result.isFailed());
|
||||
latch.countDown();
|
||||
}
|
||||
Response response = result.getResponse();
|
||||
Assert.assertEquals(200, response.getStatus());
|
||||
Assert.assertFalse(result.isFailed());
|
||||
latch.countDown();
|
||||
});
|
||||
|
||||
Assert.assertTrue(latch.await(3 * timeout, TimeUnit.MILLISECONDS));
|
||||
|
@ -243,14 +211,14 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
@Test(expected = TimeoutException.class)
|
||||
public void testIdleTimeout() throws Throwable
|
||||
{
|
||||
long timeout = 1000;
|
||||
start(new TimeoutHandler(2 * timeout));
|
||||
client.stop();
|
||||
final AtomicBoolean sslIdle = new AtomicBoolean();
|
||||
client = new HttpClient(sslContextFactory)
|
||||
startServer(new TimeoutHandler(2 * timeout));
|
||||
|
||||
AtomicBoolean sslIdle = new AtomicBoolean();
|
||||
client = new HttpClient(provideClientTransport(transport), sslContextFactory)
|
||||
{
|
||||
@Override
|
||||
public ClientConnectionFactory newSslClientConnectionFactory(ClientConnectionFactory connectionFactory)
|
||||
|
@ -278,26 +246,23 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
|
||||
try
|
||||
{
|
||||
client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
client.newRequest(newURI())
|
||||
.send();
|
||||
Assert.fail();
|
||||
}
|
||||
catch (Exception x)
|
||||
{
|
||||
Assert.assertFalse(sslIdle.get());
|
||||
Assert.assertThat(x.getCause(), Matchers.instanceOf(TimeoutException.class));
|
||||
throw x;
|
||||
}
|
||||
}
|
||||
|
||||
@Slow
|
||||
@Test
|
||||
public void testBlockingConnectTimeoutFailsRequest() throws Exception
|
||||
{
|
||||
testConnectTimeoutFailsRequest(true);
|
||||
}
|
||||
|
||||
@Slow
|
||||
@Test
|
||||
public void testNonBlockingConnectTimeoutFailsRequest() throws Exception
|
||||
{
|
||||
|
@ -319,22 +284,17 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
Request request = client.newRequest(host, port);
|
||||
request.scheme(scheme)
|
||||
.send(new Response.CompleteListener()
|
||||
request.scheme(getScheme())
|
||||
.send(result ->
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
if (result.isFailed())
|
||||
latch.countDown();
|
||||
}
|
||||
if (result.isFailed())
|
||||
latch.countDown();
|
||||
});
|
||||
|
||||
Assert.assertTrue(latch.await(2 * connectTimeout, TimeUnit.MILLISECONDS));
|
||||
Assert.assertNotNull(request.getAbortCause());
|
||||
}
|
||||
|
||||
@Slow
|
||||
@Test
|
||||
public void testConnectTimeoutIsCancelledByShorterRequestTimeout() throws Exception
|
||||
{
|
||||
|
@ -351,16 +311,12 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
final AtomicInteger completes = new AtomicInteger();
|
||||
final CountDownLatch latch = new CountDownLatch(2);
|
||||
Request request = client.newRequest(host, port);
|
||||
request.scheme(scheme)
|
||||
request.scheme(getScheme())
|
||||
.timeout(connectTimeout / 2, TimeUnit.MILLISECONDS)
|
||||
.send(new Response.CompleteListener()
|
||||
.send(result ->
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
completes.incrementAndGet();
|
||||
latch.countDown();
|
||||
}
|
||||
completes.incrementAndGet();
|
||||
latch.countDown();
|
||||
});
|
||||
|
||||
Assert.assertFalse(latch.await(2 * connectTimeout, TimeUnit.MILLISECONDS));
|
||||
|
@ -383,27 +339,19 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
Request request = client.newRequest(host, port);
|
||||
request.scheme(scheme)
|
||||
.send(new Response.CompleteListener()
|
||||
request.scheme(getScheme())
|
||||
.send(result ->
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
if (result.isFailed())
|
||||
{
|
||||
if (result.isFailed())
|
||||
{
|
||||
// Retry
|
||||
client.newRequest(host, port)
|
||||
.scheme(scheme)
|
||||
.send(new Response.CompleteListener()
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
if (result.isFailed())
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
}
|
||||
// Retry
|
||||
client.newRequest(host, port)
|
||||
.scheme(getScheme())
|
||||
.send(retryResult ->
|
||||
{
|
||||
if (retryResult.isFailed())
|
||||
latch.countDown();
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -417,17 +365,9 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
start(new EmptyServerHandler());
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client.newRequest("localhost", connector.getLocalPort())
|
||||
.scheme(scheme)
|
||||
client.newRequest(newURI())
|
||||
.timeout(1, TimeUnit.MILLISECONDS) // Very short timeout
|
||||
.send(new Response.CompleteListener()
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
.send(result -> latch.countDown());
|
||||
|
||||
Assert.assertTrue(latch.await(5, TimeUnit.SECONDS));
|
||||
}
|
||||
|
@ -443,16 +383,10 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
try
|
||||
{
|
||||
request.timeout(timeout, TimeUnit.MILLISECONDS)
|
||||
.send(new Response.CompleteListener()
|
||||
{
|
||||
@Override
|
||||
public void onComplete(Result result)
|
||||
{
|
||||
}
|
||||
});
|
||||
.send(result -> {});
|
||||
Assert.fail();
|
||||
}
|
||||
catch (Exception expected)
|
||||
catch (Exception ignored)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -462,7 +396,49 @@ public class HttpClientTimeoutTest extends AbstractHttpClientServerTest
|
|||
Assert.assertNull(request.getAbortCause());
|
||||
}
|
||||
|
||||
private void assumeConnectTimeout(String host, int port, int connectTimeout) throws IOException
|
||||
@Test
|
||||
public void testFirstRequestTimeoutAfterSecondRequestCompletes() throws Exception
|
||||
{
|
||||
long timeout = 2000;
|
||||
start(new EmptyServerHandler()
|
||||
{
|
||||
@Override
|
||||
protected void service(String target, org.eclipse.jetty.server.Request jettyRequest, HttpServletRequest request, HttpServletResponse response) throws IOException
|
||||
{
|
||||
if (request.getRequestURI().startsWith("/one"))
|
||||
{
|
||||
try
|
||||
{
|
||||
Thread.sleep(3 * timeout);
|
||||
}
|
||||
catch (InterruptedException x)
|
||||
{
|
||||
throw new InterruptedIOException();
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
client.newRequest(newURI())
|
||||
.path("/one")
|
||||
.timeout(2 * timeout, TimeUnit.MILLISECONDS)
|
||||
.send(result ->
|
||||
{
|
||||
if (result.isFailed() && result.getFailure() instanceof TimeoutException)
|
||||
latch.countDown();
|
||||
});
|
||||
|
||||
ContentResponse response = client.newRequest(newURI())
|
||||
.path("/two")
|
||||
.timeout(timeout, TimeUnit.MILLISECONDS)
|
||||
.send();
|
||||
|
||||
Assert.assertEquals(HttpStatus.OK_200, response.getStatus());
|
||||
Assert.assertTrue(latch.await(5, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
private void assumeConnectTimeout(String host, int port, int connectTimeout)
|
||||
{
|
||||
try (Socket socket = new Socket())
|
||||
{
|
|
@ -24,6 +24,8 @@ import static org.junit.Assert.assertNotNull;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
|
||||
import org.eclipse.jetty.util.IO;
|
||||
|
||||
|
@ -87,12 +89,13 @@ public class FileTestHelper
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (fname != null)
|
||||
return new File (_tmpDir, fname);
|
||||
return null;
|
||||
}
|
||||
|
||||
public static void assertFileExists (String sessionId, boolean exists)
|
||||
public static void assertSessionExists (String sessionId, boolean exists)
|
||||
{
|
||||
assertNotNull(_tmpDir);
|
||||
assertTrue(_tmpDir.exists());
|
||||
|
@ -115,6 +118,29 @@ public class FileTestHelper
|
|||
assertFalse(found);
|
||||
}
|
||||
|
||||
public static void assertFileExists (String filename, boolean exists)
|
||||
{
|
||||
assertNotNull(_tmpDir);
|
||||
assertTrue(_tmpDir.exists());
|
||||
File file = new File (_tmpDir, filename);
|
||||
if (exists)
|
||||
assertTrue(file.exists());
|
||||
else
|
||||
assertFalse(file.exists());
|
||||
}
|
||||
|
||||
|
||||
public static void createFile (String filename)
|
||||
throws IOException
|
||||
{
|
||||
assertNotNull(_tmpDir);
|
||||
assertTrue(_tmpDir.exists());
|
||||
|
||||
File file = new File (_tmpDir, filename);
|
||||
Files.deleteIfExists(file.toPath());
|
||||
file.createNewFile();
|
||||
}
|
||||
|
||||
|
||||
public static void deleteFile (String sessionId)
|
||||
{
|
||||
|
|
|
@ -47,7 +47,7 @@ public class NonClusteredSessionScavengingTest extends AbstractNonClusteredSessi
|
|||
@Override
|
||||
public void assertSession(String id, boolean exists)
|
||||
{
|
||||
FileTestHelper.assertFileExists(id, exists);
|
||||
FileTestHelper.assertSessionExists(id, exists);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -20,10 +20,12 @@
|
|||
package org.eclipse.jetty.server.session;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.http.HttpServlet;
|
||||
|
@ -66,11 +68,91 @@ public class TestFileSessions extends AbstractTestBase
|
|||
{
|
||||
return FileTestHelper.newSessionDataStoreFactory();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@Test
|
||||
public void testSweep () throws Exception
|
||||
{
|
||||
int scavengePeriod = 2;
|
||||
String contextPath = "/test";
|
||||
String servletMapping = "/server";
|
||||
int inactivePeriod = 5;
|
||||
int gracePeriod = 10;
|
||||
DefaultSessionCacheFactory cacheFactory = new DefaultSessionCacheFactory();
|
||||
cacheFactory.setEvictionPolicy(SessionCache.NEVER_EVICT);
|
||||
FileSessionDataStoreFactory storeFactory = (FileSessionDataStoreFactory)createSessionDataStoreFactory();
|
||||
storeFactory.setGracePeriodSec(gracePeriod);
|
||||
TestServer server1 = new TestServer(0, inactivePeriod, scavengePeriod, cacheFactory, storeFactory);
|
||||
server1.addContext(contextPath).addServlet(TestServlet.class, servletMapping);
|
||||
|
||||
try
|
||||
{
|
||||
server1.start();
|
||||
|
||||
//create file not for our context that expired long ago and should be removed by sweep
|
||||
FileTestHelper.createFile("101_foobar_0.0.0.0_sessiona");
|
||||
FileTestHelper.assertSessionExists("sessiona", true);
|
||||
|
||||
//create a file not for our context that is not expired and should be ignored
|
||||
String nonExpiredForeign = (System.currentTimeMillis()+TimeUnit.DAYS.toMillis(1))+"_foobar_0.0.0.0_sessionb";
|
||||
FileTestHelper.createFile(nonExpiredForeign);
|
||||
FileTestHelper.assertFileExists(nonExpiredForeign, true);
|
||||
|
||||
//create a file not for our context that is recently expired, a thus ignored by sweep
|
||||
String expiredForeign = (System.currentTimeMillis()-TimeUnit.SECONDS.toMillis(1))+"_foobar_0.0.0.0_sessionc";
|
||||
FileTestHelper.createFile(expiredForeign);
|
||||
FileTestHelper.assertFileExists(expiredForeign, true);
|
||||
|
||||
//create a file that is not a session file, it should be ignored
|
||||
FileTestHelper.createFile("whatever.txt");
|
||||
FileTestHelper.assertFileExists("whatever.txt", true);
|
||||
|
||||
//create a file that is a non-expired session file for our context that should be ignored
|
||||
String nonExpired = (System.currentTimeMillis()+TimeUnit.DAYS.toMillis(1))+"_test_0.0.0.0_sessionb";
|
||||
FileTestHelper.createFile(nonExpired);
|
||||
FileTestHelper.assertFileExists(nonExpired, true);
|
||||
|
||||
//create a file that is a never-expire session file for our context that should be ignored
|
||||
String neverExpired = "0_test_0.0.0.0_sessionc";
|
||||
FileTestHelper.createFile(neverExpired);
|
||||
FileTestHelper.assertFileExists(neverExpired, true);
|
||||
|
||||
//create a file that is a never-expire session file for another context that should be ignored
|
||||
String foreignNeverExpired = "0_test_0.0.0.0_sessionc";
|
||||
FileTestHelper.createFile(foreignNeverExpired);
|
||||
FileTestHelper.assertFileExists(foreignNeverExpired, true);
|
||||
|
||||
|
||||
//need to wait to ensure scavenge runs so sweeper runs
|
||||
Thread.currentThread().sleep(2000L*scavengePeriod);
|
||||
FileTestHelper.assertSessionExists("sessiona", false);
|
||||
FileTestHelper.assertFileExists("whatever.txt", true);
|
||||
FileTestHelper.assertFileExists(nonExpired, true);
|
||||
FileTestHelper.assertFileExists(nonExpiredForeign, true);
|
||||
FileTestHelper.assertFileExists(expiredForeign, true);
|
||||
FileTestHelper.assertFileExists(neverExpired, true);
|
||||
FileTestHelper.assertFileExists(foreignNeverExpired, true);
|
||||
}
|
||||
finally
|
||||
{
|
||||
server1.stop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@Test
|
||||
public void test () throws Exception
|
||||
{
|
||||
String contextPath = "";
|
||||
String contextPath = "/test";
|
||||
String servletMapping = "/server";
|
||||
int inactivePeriod = 5;
|
||||
DefaultSessionCacheFactory cacheFactory = new DefaultSessionCacheFactory();
|
||||
|
@ -97,26 +179,37 @@ public class TestFileSessions extends AbstractTestBase
|
|||
sessionCookie = sessionCookie.replaceFirst("(\\W)(P|p)ath=", "$1\\$Path=");
|
||||
|
||||
//check that the file for the session exists after creating the session
|
||||
FileTestHelper.assertFileExists(TestServer.extractSessionId(sessionCookie), true);
|
||||
FileTestHelper.assertSessionExists(TestServer.extractSessionId(sessionCookie), true);
|
||||
File file1 = FileTestHelper.getFile(TestServer.extractSessionId(sessionCookie));
|
||||
|
||||
|
||||
//request the session and check that the file for the session exists with an updated lastmodify
|
||||
//request the session and check that the file for the session was changed
|
||||
Request request = client.newRequest("http://localhost:" + port1 + contextPath + servletMapping + "?action=check");
|
||||
request.header("Cookie", sessionCookie);
|
||||
ContentResponse response2 = request.send();
|
||||
assertEquals(HttpServletResponse.SC_OK,response2.getStatus());
|
||||
FileTestHelper.assertFileExists(TestServer.extractSessionId(sessionCookie), true);
|
||||
FileTestHelper.assertSessionExists(TestServer.extractSessionId(sessionCookie), true);
|
||||
File file2 = FileTestHelper.getFile(TestServer.extractSessionId(sessionCookie));
|
||||
assertTrue (!file1.equals(file2));
|
||||
assertTrue (file2.lastModified() > file1.lastModified());
|
||||
|
||||
assertFalse (file1.exists());
|
||||
assertTrue(file2.exists());
|
||||
|
||||
//check expiry time in filename changed
|
||||
String tmp = file1.getName();
|
||||
tmp = tmp.substring(0, tmp.indexOf("_"));
|
||||
|
||||
long f1 = Long.valueOf(tmp);
|
||||
tmp = file2.getName();
|
||||
tmp = tmp.substring(0, tmp.indexOf("_"));
|
||||
long f2 = Long.valueOf(tmp);
|
||||
assertTrue (f2>f1);
|
||||
|
||||
//invalidate the session and verify that the session file is deleted
|
||||
request = client.newRequest("http://localhost:" + port1 + contextPath + servletMapping + "?action=remove");
|
||||
request.header("Cookie", sessionCookie);
|
||||
response2 = request.send();
|
||||
assertEquals(HttpServletResponse.SC_OK,response2.getStatus());
|
||||
FileTestHelper.assertFileExists(TestServer.extractSessionId(sessionCookie), false);
|
||||
FileTestHelper.assertSessionExists(TestServer.extractSessionId(sessionCookie), false);
|
||||
|
||||
//make another session
|
||||
response1 = client.GET("http://localhost:" + port1 + contextPath + servletMapping + "?action=init");
|
||||
|
@ -124,11 +217,11 @@ public class TestFileSessions extends AbstractTestBase
|
|||
sessionCookie = response1.getHeaders().get("Set-Cookie");
|
||||
assertTrue(sessionCookie != null);
|
||||
sessionCookie = sessionCookie.replaceFirst("(\\W)(P|p)ath=", "$1\\$Path=");
|
||||
FileTestHelper.assertFileExists(TestServer.extractSessionId(sessionCookie), true);
|
||||
FileTestHelper.assertSessionExists(TestServer.extractSessionId(sessionCookie), true);
|
||||
|
||||
//wait for it to be scavenged
|
||||
Thread.currentThread().sleep((inactivePeriod + 2)*1000);
|
||||
FileTestHelper.assertFileExists(TestServer.extractSessionId(sessionCookie), false);
|
||||
FileTestHelper.assertSessionExists(TestServer.extractSessionId(sessionCookie), false);
|
||||
|
||||
}
|
||||
finally
|
||||
|
@ -157,11 +250,12 @@ public class TestFileSessions extends AbstractTestBase
|
|||
{
|
||||
HttpSession session = request.getSession(false);
|
||||
session.invalidate();
|
||||
//assertTrue(session == null);
|
||||
}
|
||||
else if ("check".equals(action))
|
||||
{
|
||||
HttpSession session = request.getSession(false);
|
||||
assertTrue(session != null);
|
||||
try {Thread.currentThread().sleep(1);}catch (Exception e) {e.printStackTrace();}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue