HBASE-17809 cleanup unused class
This commit is contained in:
parent
f2d1b8db89
commit
fe3c32ebd5
|
@ -65,14 +65,6 @@
|
||||||
<Bug pattern="INT_VACUOUS_COMPARISON"/>
|
<Bug pattern="INT_VACUOUS_COMPARISON"/>
|
||||||
</Match>
|
</Match>
|
||||||
|
|
||||||
<Match>
|
|
||||||
<Class name="org.apache.hadoop.hbase.regionserver.LruHashMap"/>
|
|
||||||
<Or>
|
|
||||||
<Method name="equals"/>
|
|
||||||
</Or>
|
|
||||||
<Bug pattern="EQ_UNUSUAL"/>
|
|
||||||
</Match>
|
|
||||||
|
|
||||||
<Match>
|
<Match>
|
||||||
<Class name="org.apache.hadoop.hbase.util.ByteBufferUtils"/>
|
<Class name="org.apache.hadoop.hbase.util.ByteBufferUtils"/>
|
||||||
<Or>
|
<Or>
|
||||||
|
|
|
@ -1,64 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hbase.client;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Helper callable for internal use when you just want to override a single method of a {@link
|
|
||||||
* RetryingCallable}. By default, this just delegates all {@link RetryingCallable} methods to the
|
|
||||||
* specified delegate.
|
|
||||||
* @param <T> Result class from calls to the delegate {@link RetryingCallable}
|
|
||||||
* @param <D> Type of the delegate class
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Private
|
|
||||||
public class DelegatingRetryingCallable<T, D extends RetryingCallable<T>> implements
|
|
||||||
RetryingCallable<T> {
|
|
||||||
protected final D delegate;
|
|
||||||
|
|
||||||
public DelegatingRetryingCallable(D delegate) {
|
|
||||||
this.delegate = delegate;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public T call(int callTimeout) throws Exception {
|
|
||||||
return delegate.call(callTimeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void prepare(boolean reload) throws IOException {
|
|
||||||
delegate.prepare(reload);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void throwable(Throwable t, boolean retrying) {
|
|
||||||
delegate.throwable(t, retrying);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String getExceptionMessageAdditionalDetail() {
|
|
||||||
return delegate.getExceptionMessageAdditionalDetail();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long sleep(long pause, int tries) {
|
|
||||||
return delegate.sleep(pause, tries);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,44 +0,0 @@
|
||||||
/**
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.client;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Thrown when a scanner has timed out.
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Public
|
|
||||||
@InterfaceStability.Stable
|
|
||||||
public class ScannerTimeoutException extends DoNotRetryIOException {
|
|
||||||
|
|
||||||
private static final long serialVersionUID = 8788838690290688313L;
|
|
||||||
|
|
||||||
/** default constructor */
|
|
||||||
ScannerTimeoutException() {
|
|
||||||
super();
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @param s */
|
|
||||||
ScannerTimeoutException(String s) {
|
|
||||||
super(s);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright The Apache Software Foundation
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hbase.exceptions;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Thrown when there is a timeout when trying to acquire a lock
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Public
|
|
||||||
@InterfaceStability.Stable
|
|
||||||
public class LockTimeoutException extends DoNotRetryIOException {
|
|
||||||
|
|
||||||
private static final long serialVersionUID = -1770764924258999825L;
|
|
||||||
|
|
||||||
/** Default constructor */
|
|
||||||
public LockTimeoutException() {
|
|
||||||
super();
|
|
||||||
}
|
|
||||||
|
|
||||||
public LockTimeoutException(String s) {
|
|
||||||
super(s);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,49 +0,0 @@
|
||||||
/**
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hbase.exceptions;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The exception that is thrown if there's duplicate execution of non-idempotent operation.
|
|
||||||
* Client should not retry; may use "get" to get the desired value.
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Public
|
|
||||||
@InterfaceStability.Evolving
|
|
||||||
public class OperationConflictException extends DoNotRetryIOException {
|
|
||||||
private static final long serialVersionUID = -8930333627489862872L;
|
|
||||||
|
|
||||||
public OperationConflictException() {
|
|
||||||
super();
|
|
||||||
}
|
|
||||||
|
|
||||||
public OperationConflictException(String message) {
|
|
||||||
super(message);
|
|
||||||
}
|
|
||||||
|
|
||||||
public OperationConflictException(Throwable cause) {
|
|
||||||
super(cause);
|
|
||||||
}
|
|
||||||
|
|
||||||
public OperationConflictException(String message, Throwable cause) {
|
|
||||||
super(message, cause);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,32 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.quotas;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generic quota exceeded exception for invalid settings
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Private
|
|
||||||
public class InvalidQuotaSettingsException extends DoNotRetryIOException {
|
|
||||||
public InvalidQuotaSettingsException(String msg) {
|
|
||||||
super(msg);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,39 +0,0 @@
|
||||||
/**
|
|
||||||
* Copyright The Apache Software Foundation
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hbase;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A cell implementing this interface would mean that the memory area backing this cell will refer
|
|
||||||
* to a memory area that could be part of a larger common memory area used by the RegionServer. This
|
|
||||||
* might be the bigger memory chunk where the RPC requests are read into. If an exclusive instance
|
|
||||||
* is required, use the {@link #cloneToCell()} to have the contents of the cell copied to an
|
|
||||||
* exclusive memory area.
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Private
|
|
||||||
public interface ShareableMemory {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Does a deep copy of the contents to a new memory area and returns it in the form of a cell.
|
|
||||||
* @return The deep cloned cell
|
|
||||||
*/
|
|
||||||
Cell cloneToCell();
|
|
||||||
}
|
|
|
@ -1,81 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hbase.util;
|
|
||||||
|
|
||||||
import java.util.AbstractQueue;
|
|
||||||
import java.util.Iterator;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A bounded non-thread safe implementation of {@link java.util.Queue}.
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Private
|
|
||||||
public class BoundedArrayQueue<E> extends AbstractQueue<E> {
|
|
||||||
|
|
||||||
private Object[] items;
|
|
||||||
private int takeIndex, putIndex;
|
|
||||||
private int count;
|
|
||||||
|
|
||||||
public BoundedArrayQueue(int maxElements) {
|
|
||||||
items = new Object[maxElements];
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int size() {
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Not implemented and will throw {@link UnsupportedOperationException}
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public Iterator<E> iterator() {
|
|
||||||
// We don't need this. Leaving it as not implemented.
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean offer(E e) {
|
|
||||||
if (count == items.length) return false;
|
|
||||||
items[putIndex] = e;
|
|
||||||
if (++putIndex == items.length) putIndex = 0;
|
|
||||||
count++;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public E poll() {
|
|
||||||
return (count == 0) ? null : dequeue();
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
private E dequeue() {
|
|
||||||
E x = (E) items[takeIndex];
|
|
||||||
items[takeIndex] = null;
|
|
||||||
if (++takeIndex == items.length) takeIndex = 0;
|
|
||||||
count--;
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
@Override
|
|
||||||
public E peek() {
|
|
||||||
return (E) items[takeIndex];
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,99 +0,0 @@
|
||||||
/**
|
|
||||||
* Copyright The Apache Software Foundation
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hbase.util;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.lang.reflect.Constructor;
|
|
||||||
import java.util.zip.Checksum;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Utility class that is used to generate a Checksum object.
|
|
||||||
* The Checksum implementation is pluggable and an application
|
|
||||||
* can specify their own class that implements their own
|
|
||||||
* Checksum algorithm.
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Private
|
|
||||||
public class ChecksumFactory {
|
|
||||||
|
|
||||||
static private final Class<?>[] EMPTY_ARRAY = new Class[]{};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a new instance of a Checksum object.
|
|
||||||
* @return The newly created Checksum object
|
|
||||||
*/
|
|
||||||
static public Checksum newInstance(String className) throws IOException {
|
|
||||||
try {
|
|
||||||
Class<?> clazz = getClassByName(className);
|
|
||||||
return (Checksum)newInstance(clazz);
|
|
||||||
} catch (ClassNotFoundException e) {
|
|
||||||
throw new IOException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns a Constructor that can be used to create a Checksum object.
|
|
||||||
* @param className classname for which an constructor is created
|
|
||||||
* @return a new Constructor object
|
|
||||||
*/
|
|
||||||
static public Constructor<?> newConstructor(String className)
|
|
||||||
throws IOException {
|
|
||||||
try {
|
|
||||||
Class<?> clazz = getClassByName(className);
|
|
||||||
Constructor<?> ctor = clazz.getDeclaredConstructor(EMPTY_ARRAY);
|
|
||||||
ctor.setAccessible(true);
|
|
||||||
return ctor;
|
|
||||||
} catch (ClassNotFoundException e) {
|
|
||||||
throw new IOException(e);
|
|
||||||
} catch (java.lang.NoSuchMethodException e) {
|
|
||||||
throw new IOException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Create an object for the given class and initialize it from conf
|
|
||||||
*
|
|
||||||
* @param theClass class of which an object is created
|
|
||||||
* @return a new object
|
|
||||||
*/
|
|
||||||
static private <T> T newInstance(Class<T> theClass) {
|
|
||||||
T result;
|
|
||||||
try {
|
|
||||||
Constructor<T> ctor = theClass.getDeclaredConstructor(EMPTY_ARRAY);
|
|
||||||
ctor.setAccessible(true);
|
|
||||||
result = ctor.newInstance();
|
|
||||||
} catch (Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Load a class by name.
|
|
||||||
* @param name the class name.
|
|
||||||
* @return the class object.
|
|
||||||
* @throws ClassNotFoundException if the class is not found.
|
|
||||||
*/
|
|
||||||
static private Class<?> getClassByName(String name)
|
|
||||||
throws ClassNotFoundException {
|
|
||||||
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
|
|
||||||
return Class.forName(name, true, classLoader);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,60 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hbase.util;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertNull;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.experimental.categories.Category;
|
|
||||||
|
|
||||||
@Category({ MiscTests.class, SmallTests.class })
|
|
||||||
public class TestBoundedArrayQueue {
|
|
||||||
|
|
||||||
private int qMaxElements = 5;
|
|
||||||
private BoundedArrayQueue<Integer> queue = new BoundedArrayQueue<>(qMaxElements);
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testBoundedArrayQueueOperations() throws Exception {
|
|
||||||
assertEquals(0, queue.size());
|
|
||||||
assertNull(queue.poll());
|
|
||||||
assertNull(queue.peek());
|
|
||||||
for(int i=0;i<qMaxElements;i++){
|
|
||||||
assertTrue(queue.offer(i));
|
|
||||||
}
|
|
||||||
assertEquals(qMaxElements, queue.size());
|
|
||||||
assertFalse(queue.offer(0));
|
|
||||||
assertEquals(0, queue.peek().intValue());
|
|
||||||
assertEquals(0, queue.peek().intValue());
|
|
||||||
for (int i = 0; i < qMaxElements; i++) {
|
|
||||||
assertEquals(i, queue.poll().intValue());
|
|
||||||
}
|
|
||||||
assertEquals(0, queue.size());
|
|
||||||
assertNull(queue.poll());
|
|
||||||
// Write after one cycle is over
|
|
||||||
assertTrue(queue.offer(100));
|
|
||||||
assertTrue(queue.offer(1000));
|
|
||||||
assertEquals(100, queue.peek().intValue());
|
|
||||||
assertEquals(100, queue.poll().intValue());
|
|
||||||
assertEquals(1000, queue.poll().intValue());
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,64 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize;
|
|
||||||
|
|
||||||
import java.util.Comparator;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Determines order of nodes in the output array. Maybe possible to optimize further.
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Private
|
|
||||||
public class TokenDepthComparator implements Comparator<TokenizerNode> {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int compare(TokenizerNode a, TokenizerNode b) {
|
|
||||||
if(a==null){
|
|
||||||
throw new IllegalArgumentException("a cannot be null");
|
|
||||||
}
|
|
||||||
if(b==null){
|
|
||||||
throw new IllegalArgumentException("b cannot be null");
|
|
||||||
}
|
|
||||||
|
|
||||||
// put leaves at the end
|
|
||||||
if (!a.isLeaf() && b.isLeaf()) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (a.isLeaf() && !b.isLeaf()) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (a.isLeaf() && b.isLeaf()) {// keep leaves in sorted order (for debugability)
|
|
||||||
return a.getId() < b.getId() ? -1 : 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// compare depth
|
|
||||||
if (a.getTokenOffset() < b.getTokenOffset()) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (a.getTokenOffset() > b.getTokenOffset()) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if same depth, return lower id first. ids are unique
|
|
||||||
return a.getId() < b.getId() ? -1 : 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,248 +0,0 @@
|
||||||
/**
|
|
||||||
* Copyright The Apache Software Foundation
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with this
|
|
||||||
* work for additional information regarding copyright ownership. The ASF
|
|
||||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
* License for the specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hbase.regionserver;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.Server;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|
||||||
import org.apache.hadoop.hbase.security.User;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Executes region merge as a "transaction". It is similar with
|
|
||||||
* SplitTransaction. Call {@link #prepare(RegionServerServices)} to setup the
|
|
||||||
* transaction, {@link #execute(Server, RegionServerServices)} to run the
|
|
||||||
* transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if
|
|
||||||
* execute fails.
|
|
||||||
*
|
|
||||||
* <p>Here is an example of how you would use this interface:
|
|
||||||
* <pre>
|
|
||||||
* RegionMergeTransactionFactory factory = new RegionMergeTransactionFactory(conf);
|
|
||||||
* RegionMergeTransaction mt = factory.create(parent, midKey)
|
|
||||||
* .registerTransactionListener(new TransactionListener() {
|
|
||||||
* public void transition(RegionMergeTransaction transaction,
|
|
||||||
* RegionMergeTransactionPhase from, RegionMergeTransactionPhase to) throws IOException {
|
|
||||||
* // ...
|
|
||||||
* }
|
|
||||||
* public void rollback(RegionMergeTransaction transaction,
|
|
||||||
* RegionMergeTransactionPhase from, RegionMergeTransactionPhase to) {
|
|
||||||
* // ...
|
|
||||||
* }
|
|
||||||
* });
|
|
||||||
* if (!mt.prepare()) return;
|
|
||||||
* try {
|
|
||||||
* mt.execute(server, services);
|
|
||||||
* } catch (IOException ioe) {
|
|
||||||
* try {
|
|
||||||
* mt.rollback(server, services);
|
|
||||||
* return;
|
|
||||||
* } catch (RuntimeException e) {
|
|
||||||
* // abort the server
|
|
||||||
* }
|
|
||||||
* }
|
|
||||||
* </Pre>
|
|
||||||
* <p>A merge transaction is not thread safe. Callers must ensure a split is run by
|
|
||||||
* one thread only.
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
|
||||||
@InterfaceStability.Evolving
|
|
||||||
public interface RegionMergeTransaction {
|
|
||||||
/**
|
|
||||||
* Each enum is a step in the merge transaction.
|
|
||||||
*/
|
|
||||||
enum RegionMergeTransactionPhase {
|
|
||||||
STARTED,
|
|
||||||
/**
|
|
||||||
* Prepared
|
|
||||||
*/
|
|
||||||
PREPARED,
|
|
||||||
/**
|
|
||||||
* Set region as in transition, set it into MERGING state.
|
|
||||||
*/
|
|
||||||
SET_MERGING,
|
|
||||||
/**
|
|
||||||
* We created the temporary merge data directory.
|
|
||||||
*/
|
|
||||||
CREATED_MERGE_DIR,
|
|
||||||
/**
|
|
||||||
* Closed the merging region A.
|
|
||||||
*/
|
|
||||||
CLOSED_REGION_A,
|
|
||||||
/**
|
|
||||||
* The merging region A has been taken out of the server's online regions list.
|
|
||||||
*/
|
|
||||||
OFFLINED_REGION_A,
|
|
||||||
/**
|
|
||||||
* Closed the merging region B.
|
|
||||||
*/
|
|
||||||
CLOSED_REGION_B,
|
|
||||||
/**
|
|
||||||
* The merging region B has been taken out of the server's online regions list.
|
|
||||||
*/
|
|
||||||
OFFLINED_REGION_B,
|
|
||||||
/**
|
|
||||||
* Started in on creation of the merged region.
|
|
||||||
*/
|
|
||||||
STARTED_MERGED_REGION_CREATION,
|
|
||||||
/**
|
|
||||||
* Point of no return. If we got here, then transaction is not recoverable
|
|
||||||
* other than by crashing out the regionserver.
|
|
||||||
*/
|
|
||||||
PONR,
|
|
||||||
/**
|
|
||||||
* Completed
|
|
||||||
*/
|
|
||||||
COMPLETED
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Split transaction journal entry
|
|
||||||
*/
|
|
||||||
public interface JournalEntry {
|
|
||||||
|
|
||||||
/** @return the completed phase marked by this journal entry */
|
|
||||||
RegionMergeTransactionPhase getPhase();
|
|
||||||
|
|
||||||
/** @return the time of phase completion */
|
|
||||||
long getTimeStamp();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Split transaction listener
|
|
||||||
*/
|
|
||||||
public interface TransactionListener {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Invoked when transitioning forward from one transaction phase to another
|
|
||||||
* @param transaction the transaction
|
|
||||||
* @param from the current phase
|
|
||||||
* @param to the next phase
|
|
||||||
* @throws IOException listener can throw this to abort
|
|
||||||
*/
|
|
||||||
void transition(RegionMergeTransaction transaction, RegionMergeTransactionPhase from,
|
|
||||||
RegionMergeTransactionPhase to) throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Invoked when rolling back a transaction from one transaction phase to the
|
|
||||||
* previous
|
|
||||||
* @param transaction the transaction
|
|
||||||
* @param from the current phase
|
|
||||||
* @param to the previous phase
|
|
||||||
*/
|
|
||||||
void rollback(RegionMergeTransaction transaction, RegionMergeTransactionPhase from,
|
|
||||||
RegionMergeTransactionPhase to);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check merge inputs and prepare the transaction.
|
|
||||||
* @param services
|
|
||||||
* @return <code>true</code> if the regions are mergeable else
|
|
||||||
* <code>false</code> if they are not (e.g. its already closed, etc.).
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
boolean prepare(RegionServerServices services) throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Run the transaction.
|
|
||||||
* @param server Hosting server instance. Can be null when testing
|
|
||||||
* @param services Used to online/offline regions.
|
|
||||||
* @throws IOException If thrown, transaction failed. Call
|
|
||||||
* {@link #rollback(Server, RegionServerServices)}
|
|
||||||
* @return merged region
|
|
||||||
* @throws IOException
|
|
||||||
* @see #rollback(Server, RegionServerServices)
|
|
||||||
* @deprecated use #execute(Server, RegionServerServices, User)
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
Region execute(Server server, RegionServerServices services) throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Run the transaction.
|
|
||||||
* @param server Hosting server instance. Can be null when testing
|
|
||||||
* @param services Used to online/offline regions.
|
|
||||||
* @param user
|
|
||||||
* @throws IOException If thrown, transaction failed. Call
|
|
||||||
* {@link #rollback(Server, RegionServerServices)}
|
|
||||||
* @return merged region
|
|
||||||
* @throws IOException
|
|
||||||
* @see #rollback(Server, RegionServerServices, User)
|
|
||||||
*/
|
|
||||||
Region execute(Server server, RegionServerServices services, User user) throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Roll back a failed transaction
|
|
||||||
* @param server Hosting server instance (May be null when testing).
|
|
||||||
* @param services Services of regionserver, used to online regions.
|
|
||||||
* @throws IOException If thrown, rollback failed. Take drastic action.
|
|
||||||
* @return True if we successfully rolled back, false if we got to the point
|
|
||||||
* of no return and so now need to abort the server to minimize
|
|
||||||
* damage.
|
|
||||||
* @deprecated use #rollback(Server, RegionServerServices, User)
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
boolean rollback(Server server, RegionServerServices services) throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Roll back a failed transaction
|
|
||||||
* @param server Hosting server instance (May be null when testing).
|
|
||||||
* @param services Services of regionserver, used to online regions.
|
|
||||||
* @param user
|
|
||||||
* @throws IOException If thrown, rollback failed. Take drastic action.
|
|
||||||
* @return True if we successfully rolled back, false if we got to the point
|
|
||||||
* of no return and so now need to abort the server to minimize
|
|
||||||
* damage.
|
|
||||||
*/
|
|
||||||
boolean rollback(Server server, RegionServerServices services, User user) throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Register a listener for transaction preparation, execution, and possibly
|
|
||||||
* rollback phases.
|
|
||||||
* <p>A listener can abort a transaction by throwing an exception.
|
|
||||||
* @param listener the listener
|
|
||||||
* @return 'this' for chaining
|
|
||||||
*/
|
|
||||||
RegionMergeTransaction registerTransactionListener(TransactionListener listener);
|
|
||||||
|
|
||||||
/** @return merged region info */
|
|
||||||
HRegionInfo getMergedRegionInfo();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the journal for the transaction.
|
|
||||||
* <p>Journal entries are an opaque type represented as JournalEntry. They can
|
|
||||||
* also provide useful debugging information via their toString method.
|
|
||||||
* @return the transaction journal
|
|
||||||
*/
|
|
||||||
List<JournalEntry> getJournal();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the Server running the transaction or rollback
|
|
||||||
* @return server instance
|
|
||||||
*/
|
|
||||||
Server getServer();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the RegonServerServices of the server running the transaction or rollback
|
|
||||||
* @return region server services
|
|
||||||
*/
|
|
||||||
RegionServerServices getRegionServerServices();
|
|
||||||
}
|
|
|
@ -1,155 +0,0 @@
|
||||||
/**
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.util;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.TreeMap;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
|
||||||
import org.apache.hadoop.hbase.wal.WAL;
|
|
||||||
import org.apache.hadoop.hbase.wal.WALFactory;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Contains utility methods for manipulating HBase meta tables.
|
|
||||||
* Be sure to call {@link #shutdown()} when done with this class so it closes
|
|
||||||
* resources opened during meta processing (ROOT, META, etc.). Be careful
|
|
||||||
* how you use this class. If used during migrations, be careful when using
|
|
||||||
* this class to check whether migration is needed.
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Private
|
|
||||||
public class MetaUtils {
|
|
||||||
private static final Log LOG = LogFactory.getLog(MetaUtils.class);
|
|
||||||
private final Configuration conf;
|
|
||||||
private final FSTableDescriptors descriptors;
|
|
||||||
private FileSystem fs;
|
|
||||||
private WALFactory walFactory;
|
|
||||||
private HRegion metaRegion;
|
|
||||||
private Map<byte [], HRegion> metaRegions = Collections.synchronizedSortedMap(
|
|
||||||
new TreeMap<byte [], HRegion>(Bytes.BYTES_COMPARATOR));
|
|
||||||
|
|
||||||
/** Default constructor
|
|
||||||
* @throws IOException e
|
|
||||||
*/
|
|
||||||
public MetaUtils() throws IOException {
|
|
||||||
this(HBaseConfiguration.create());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param conf Configuration
|
|
||||||
* @throws IOException e
|
|
||||||
*/
|
|
||||||
public MetaUtils(Configuration conf) throws IOException {
|
|
||||||
this.conf = conf;
|
|
||||||
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
|
|
||||||
this.metaRegion = null;
|
|
||||||
this.descriptors = new FSTableDescriptors(conf);
|
|
||||||
initialize();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Verifies that DFS is available and that HBase is off-line.
|
|
||||||
* @throws IOException e
|
|
||||||
*/
|
|
||||||
private void initialize() throws IOException {
|
|
||||||
this.fs = FileSystem.get(this.conf);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return the WAL associated with the given region
|
|
||||||
* @throws IOException e
|
|
||||||
*/
|
|
||||||
public synchronized WAL getLog(HRegionInfo info) throws IOException {
|
|
||||||
if (this.walFactory == null) {
|
|
||||||
String logName =
|
|
||||||
HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis();
|
|
||||||
final Configuration walConf = new Configuration(this.conf);
|
|
||||||
FSUtils.setRootDir(walConf, fs.getHomeDirectory());
|
|
||||||
this.walFactory = new WALFactory(walConf, null, logName);
|
|
||||||
}
|
|
||||||
final byte[] region = info.getEncodedNameAsBytes();
|
|
||||||
final byte[] namespace = info.getTable().getNamespace();
|
|
||||||
return info.isMetaRegion() ? walFactory.getMetaWAL(region) : walFactory.getWAL(region,
|
|
||||||
namespace);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return HRegion for meta region
|
|
||||||
* @throws IOException e
|
|
||||||
*/
|
|
||||||
public synchronized HRegion getMetaRegion() throws IOException {
|
|
||||||
return this.metaRegion == null? openMetaRegion(): this.metaRegion;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Closes catalog regions if open. Also closes and deletes the WAL. You
|
|
||||||
* must call this method if you want to persist changes made during a
|
|
||||||
* MetaUtils edit session.
|
|
||||||
*/
|
|
||||||
public synchronized void shutdown() {
|
|
||||||
if (this.metaRegion != null) {
|
|
||||||
try {
|
|
||||||
this.metaRegion.close();
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.error("closing meta region", e);
|
|
||||||
} finally {
|
|
||||||
this.metaRegion = null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
for (HRegion r: metaRegions.values()) {
|
|
||||||
LOG.info("CLOSING hbase:meta " + r.toString());
|
|
||||||
r.close();
|
|
||||||
}
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.error("closing meta region", e);
|
|
||||||
} finally {
|
|
||||||
metaRegions.clear();
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
if (this.walFactory != null) {
|
|
||||||
this.walFactory.close();
|
|
||||||
}
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.error("closing WAL", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private synchronized HRegion openMetaRegion() throws IOException {
|
|
||||||
if (this.metaRegion != null) {
|
|
||||||
return this.metaRegion;
|
|
||||||
}
|
|
||||||
this.metaRegion = HRegion.openHRegion(HRegionInfo.FIRST_META_REGIONINFO,
|
|
||||||
descriptors.get(TableName.META_TABLE_NAME), getLog(HRegionInfo.FIRST_META_REGIONINFO),
|
|
||||||
this.conf);
|
|
||||||
this.metaRegion.compactStores();
|
|
||||||
return this.metaRegion;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,177 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.util;
|
|
||||||
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Comparator;
|
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.SortedSet;
|
|
||||||
import java.util.TreeSet;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Simple {@link java.util.SortedSet} implementation that uses an internal
|
|
||||||
* {@link java.util.TreeSet} to provide ordering. All mutation operations
|
|
||||||
* create a new copy of the <code>TreeSet</code> instance, so are very
|
|
||||||
* expensive. This class is only intended for use on small, very rarely
|
|
||||||
* written collections that expect highly concurrent reads. Read operations
|
|
||||||
* are performed on a reference to the internal <code>TreeSet</code> at the
|
|
||||||
* time of invocation, so will not see any mutations to the collection during
|
|
||||||
* their operation.
|
|
||||||
*
|
|
||||||
* <p>Note that due to the use of a {@link java.util.TreeSet} internally,
|
|
||||||
* a {@link java.util.Comparator} instance must be provided, or collection
|
|
||||||
* elements must implement {@link java.lang.Comparable}.
|
|
||||||
* </p>
|
|
||||||
* @param <E> A class implementing {@link java.lang.Comparable} or able to be
|
|
||||||
* compared by a provided comparator.
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Private
|
|
||||||
public class SortedCopyOnWriteSet<E> implements SortedSet<E> {
|
|
||||||
private volatile SortedSet<E> internalSet;
|
|
||||||
|
|
||||||
public SortedCopyOnWriteSet() {
|
|
||||||
this.internalSet = new TreeSet<>();
|
|
||||||
}
|
|
||||||
|
|
||||||
public SortedCopyOnWriteSet(Collection<? extends E> c) {
|
|
||||||
this.internalSet = new TreeSet<>(c);
|
|
||||||
}
|
|
||||||
|
|
||||||
public SortedCopyOnWriteSet(Comparator<? super E> comparator) {
|
|
||||||
this.internalSet = new TreeSet<>(comparator);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int size() {
|
|
||||||
return internalSet.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isEmpty() {
|
|
||||||
return internalSet.isEmpty();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean contains(Object o) {
|
|
||||||
return internalSet.contains(o);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Iterator<E> iterator() {
|
|
||||||
return internalSet.iterator();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Object[] toArray() {
|
|
||||||
return internalSet.toArray();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public <T> T[] toArray(T[] a) {
|
|
||||||
return internalSet.toArray(a);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized boolean add(E e) {
|
|
||||||
SortedSet<E> newSet = new TreeSet<>(internalSet);
|
|
||||||
boolean added = newSet.add(e);
|
|
||||||
internalSet = newSet;
|
|
||||||
return added;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized boolean remove(Object o) {
|
|
||||||
SortedSet<E> newSet = new TreeSet<>(internalSet);
|
|
||||||
boolean removed = newSet.remove(o);
|
|
||||||
internalSet = newSet;
|
|
||||||
return removed;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean containsAll(Collection<?> c) {
|
|
||||||
return internalSet.containsAll(c);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized boolean addAll(Collection<? extends E> c) {
|
|
||||||
SortedSet<E> newSet = new TreeSet<>(internalSet);
|
|
||||||
boolean changed = newSet.addAll(c);
|
|
||||||
internalSet = newSet;
|
|
||||||
return changed;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized boolean retainAll(Collection<?> c) {
|
|
||||||
SortedSet<E> newSet = new TreeSet<>(internalSet);
|
|
||||||
boolean changed = newSet.retainAll(c);
|
|
||||||
internalSet = newSet;
|
|
||||||
return changed;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized boolean removeAll(Collection<?> c) {
|
|
||||||
SortedSet<E> newSet = new TreeSet<>(internalSet);
|
|
||||||
boolean changed = newSet.removeAll(c);
|
|
||||||
internalSet = newSet;
|
|
||||||
return changed;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized void clear() {
|
|
||||||
Comparator<? super E> comparator = internalSet.comparator();
|
|
||||||
if (comparator != null) {
|
|
||||||
internalSet = new TreeSet<>(comparator);
|
|
||||||
} else {
|
|
||||||
internalSet = new TreeSet<>();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Comparator<? super E> comparator() {
|
|
||||||
return internalSet.comparator();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public SortedSet<E> subSet(E fromElement, E toElement) {
|
|
||||||
return internalSet.subSet(fromElement, toElement);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public SortedSet<E> headSet(E toElement) {
|
|
||||||
return internalSet.headSet(toElement);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public SortedSet<E> tailSet(E fromElement) {
|
|
||||||
return internalSet.tailSet(fromElement);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public E first() {
|
|
||||||
return internalSet.first();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public E last() {
|
|
||||||
return internalSet.last();
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -65,7 +65,7 @@ public class TestHeapSize {
|
||||||
private static final Log LOG = LogFactory.getLog(TestHeapSize.class);
|
private static final Log LOG = LogFactory.getLog(TestHeapSize.class);
|
||||||
// List of classes implementing HeapSize
|
// List of classes implementing HeapSize
|
||||||
// BatchOperation, BatchUpdate, BlockIndex, Entry, Entry<K,V>, HStoreKey
|
// BatchOperation, BatchUpdate, BlockIndex, Entry, Entry<K,V>, HStoreKey
|
||||||
// KeyValue, LruBlockCache, LruHashMap<K,V>, Put, WALKey
|
// KeyValue, LruBlockCache, Put, WALKey
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void beforeClass() throws Exception {
|
public static void beforeClass() throws Exception {
|
||||||
|
|
|
@ -82,7 +82,6 @@ import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
|
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
|
||||||
import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
|
import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
|
||||||
import org.apache.hadoop.hbase.exceptions.OperationConflictException;
|
|
||||||
import org.apache.hadoop.hbase.exceptions.RegionInRecoveryException;
|
import org.apache.hadoop.hbase.exceptions.RegionInRecoveryException;
|
||||||
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
|
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
|
||||||
import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
|
import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
|
||||||
|
@ -396,7 +395,7 @@ public class TestDistributedLogSplitting {
|
||||||
try {
|
try {
|
||||||
ht.increment(incr);
|
ht.increment(incr);
|
||||||
fail("should have thrown");
|
fail("should have thrown");
|
||||||
} catch (OperationConflictException ope) {
|
} catch (IOException ope) {
|
||||||
LOG.debug("Caught as expected: " + ope.getMessage());
|
LOG.debug("Caught as expected: " + ope.getMessage());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
|
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.exceptions.OperationConflictException;
|
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
|
||||||
import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
|
import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
@ -297,7 +296,7 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase {
|
||||||
}
|
}
|
||||||
totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
|
totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
if (ignoreNonceConflicts && (e instanceof OperationConflictException)) {
|
if (ignoreNonceConflicts) {
|
||||||
LOG.info("Detected nonce conflict, ignoring: " + e.getMessage());
|
LOG.info("Detected nonce conflict, ignoring: " + e.getMessage());
|
||||||
totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
|
totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -1,106 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.util;
|
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
|
||||||
|
|
||||||
import java.util.Iterator;
|
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.junit.experimental.categories.Category;
|
|
||||||
|
|
||||||
@Category({MiscTests.class, SmallTests.class})
|
|
||||||
public class TestSortedCopyOnWriteSet {
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSorting() throws Exception {
|
|
||||||
SortedCopyOnWriteSet<String> set = new SortedCopyOnWriteSet<>();
|
|
||||||
set.add("c");
|
|
||||||
set.add("d");
|
|
||||||
set.add("a");
|
|
||||||
set.add("b");
|
|
||||||
|
|
||||||
String[] expected = new String[]{"a", "b", "c", "d"};
|
|
||||||
String[] stored = set.toArray(new String[4]);
|
|
||||||
assertArrayEquals(expected, stored);
|
|
||||||
|
|
||||||
set.add("c");
|
|
||||||
assertEquals(4, set.size());
|
|
||||||
stored = set.toArray(new String[4]);
|
|
||||||
assertArrayEquals(expected, stored);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testIteratorIsolation() throws Exception {
|
|
||||||
SortedCopyOnWriteSet<String> set = new SortedCopyOnWriteSet<>(Lists.newArrayList("a", "b", "c", "d", "e"));
|
|
||||||
|
|
||||||
// isolation of remove()
|
|
||||||
Iterator<String> iter = set.iterator();
|
|
||||||
set.remove("c");
|
|
||||||
boolean found = false;
|
|
||||||
while (iter.hasNext() && !found) {
|
|
||||||
found = "c".equals(iter.next());
|
|
||||||
}
|
|
||||||
assertTrue(found);
|
|
||||||
|
|
||||||
iter = set.iterator();
|
|
||||||
found = false;
|
|
||||||
while (iter.hasNext() && !found) {
|
|
||||||
found = "c".equals(iter.next());
|
|
||||||
}
|
|
||||||
assertFalse(found);
|
|
||||||
|
|
||||||
// isolation of add()
|
|
||||||
iter = set.iterator();
|
|
||||||
set.add("f");
|
|
||||||
found = false;
|
|
||||||
while (iter.hasNext() && !found) {
|
|
||||||
String next = iter.next();
|
|
||||||
found = "f".equals(next);
|
|
||||||
}
|
|
||||||
assertFalse(found);
|
|
||||||
|
|
||||||
// isolation of addAll()
|
|
||||||
iter = set.iterator();
|
|
||||||
set.addAll(Lists.newArrayList("g", "h", "i"));
|
|
||||||
found = false;
|
|
||||||
while (iter.hasNext() && !found) {
|
|
||||||
String next = iter.next();
|
|
||||||
found = "g".equals(next) || "h".equals(next) || "i".equals(next);
|
|
||||||
}
|
|
||||||
assertFalse(found);
|
|
||||||
|
|
||||||
// isolation of clear()
|
|
||||||
iter = set.iterator();
|
|
||||||
set.clear();
|
|
||||||
assertEquals(0, set.size());
|
|
||||||
int size = 0;
|
|
||||||
while (iter.hasNext()) {
|
|
||||||
iter.next();
|
|
||||||
size++;
|
|
||||||
}
|
|
||||||
assertTrue(size > 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in New Issue