Signed-off-by: Wellington Chevreuil <wchevreuil@apache.org>
This commit is contained in:
parent
c94ec54185
commit
82f966474e
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.exceptions;
|
||||
|
||||
import com.google.errorprone.annotations.RestrictedApi;
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.SyncFailedException;
|
||||
|
@ -140,6 +141,10 @@ public final class ClientExceptionsUtil {
|
|||
* For test only. Usually you should use the {@link #isConnectionException(Throwable)} method
|
||||
* below.
|
||||
*/
|
||||
@RestrictedApi(explanation = "Should only be called in tests", link = "",
|
||||
allowedOnPath = ".*/src/test/.*")
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "test only")
|
||||
public static Set<Class<? extends Throwable>> getConnectionExceptionTypes() {
|
||||
return CONNECTION_EXCEPTION_TYPES;
|
||||
}
|
||||
|
|
|
@ -1473,18 +1473,12 @@ public final class ProtobufUtil {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @see #buildGetServerInfoRequest()
|
||||
*/
|
||||
private static GetServerInfoRequest GET_SERVER_INFO_REQUEST =
|
||||
GetServerInfoRequest.newBuilder().build();
|
||||
|
||||
/**
|
||||
* Create a new GetServerInfoRequest
|
||||
* @return a GetServerInfoRequest
|
||||
*/
|
||||
public static GetServerInfoRequest buildGetServerInfoRequest() {
|
||||
return GET_SERVER_INFO_REQUEST;
|
||||
return GetServerInfoRequest.getDefaultInstance();
|
||||
}
|
||||
|
||||
public static ScanMetrics toScanMetrics(final byte[] bytes) {
|
||||
|
|
|
@ -1032,32 +1032,20 @@ public final class RequestConverter {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see #buildRollWALWriterRequest()
|
||||
*/
|
||||
private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST =
|
||||
RollWALWriterRequest.newBuilder().build();
|
||||
|
||||
/**
|
||||
* Create a new RollWALWriterRequest
|
||||
* @return a ReplicateWALEntryRequest
|
||||
*/
|
||||
public static RollWALWriterRequest buildRollWALWriterRequest() {
|
||||
return ROLL_WAL_WRITER_REQUEST;
|
||||
return RollWALWriterRequest.getDefaultInstance();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see #buildGetServerInfoRequest()
|
||||
*/
|
||||
private static GetServerInfoRequest GET_SERVER_INFO_REQUEST =
|
||||
GetServerInfoRequest.newBuilder().build();
|
||||
|
||||
/**
|
||||
* Create a new GetServerInfoRequest
|
||||
* @return a GetServerInfoRequest
|
||||
*/
|
||||
public static GetServerInfoRequest buildGetServerInfoRequest() {
|
||||
return GET_SERVER_INFO_REQUEST;
|
||||
return GetServerInfoRequest.getDefaultInstance();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1438,18 +1426,12 @@ public final class RequestConverter {
|
|||
.addAllOptions(ClusterMetricsBuilder.toOptions(options)).build();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see #buildCatalogScanRequest
|
||||
*/
|
||||
private static final RunCatalogScanRequest CATALOG_SCAN_REQUEST =
|
||||
RunCatalogScanRequest.newBuilder().build();
|
||||
|
||||
/**
|
||||
* Creates a request for running a catalog scan
|
||||
* @return A {@link RunCatalogScanRequest}
|
||||
*/
|
||||
public static RunCatalogScanRequest buildCatalogScanRequest() {
|
||||
return CATALOG_SCAN_REQUEST;
|
||||
return RunCatalogScanRequest.getDefaultInstance();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1460,32 +1442,20 @@ public final class RequestConverter {
|
|||
return EnableCatalogJanitorRequest.newBuilder().setEnable(enable).build();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see #buildIsCatalogJanitorEnabledRequest()
|
||||
*/
|
||||
private static final IsCatalogJanitorEnabledRequest IS_CATALOG_JANITOR_ENABLED_REQUEST =
|
||||
IsCatalogJanitorEnabledRequest.newBuilder().build();
|
||||
|
||||
/**
|
||||
* Creates a request for querying the master whether the catalog janitor is enabled
|
||||
* @return A {@link IsCatalogJanitorEnabledRequest}
|
||||
*/
|
||||
public static IsCatalogJanitorEnabledRequest buildIsCatalogJanitorEnabledRequest() {
|
||||
return IS_CATALOG_JANITOR_ENABLED_REQUEST;
|
||||
return IsCatalogJanitorEnabledRequest.getDefaultInstance();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see #buildRunCleanerChoreRequest()
|
||||
*/
|
||||
private static final RunCleanerChoreRequest CLEANER_CHORE_REQUEST =
|
||||
RunCleanerChoreRequest.newBuilder().build();
|
||||
|
||||
/**
|
||||
* Creates a request for running cleaner chore
|
||||
* @return A {@link RunCleanerChoreRequest}
|
||||
*/
|
||||
public static RunCleanerChoreRequest buildRunCleanerChoreRequest() {
|
||||
return CLEANER_CHORE_REQUEST;
|
||||
return RunCleanerChoreRequest.getDefaultInstance();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1496,18 +1466,12 @@ public final class RequestConverter {
|
|||
return SetCleanerChoreRunningRequest.newBuilder().setOn(on).build();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see #buildIsCleanerChoreEnabledRequest()
|
||||
*/
|
||||
private static final IsCleanerChoreEnabledRequest IS_CLEANER_CHORE_ENABLED_REQUEST =
|
||||
IsCleanerChoreEnabledRequest.newBuilder().build();
|
||||
|
||||
/**
|
||||
* Creates a request for querying the master whether the cleaner chore is enabled
|
||||
* @return A {@link IsCleanerChoreEnabledRequest}
|
||||
*/
|
||||
public static IsCleanerChoreEnabledRequest buildIsCleanerChoreEnabledRequest() {
|
||||
return IS_CLEANER_CHORE_ENABLED_REQUEST;
|
||||
return IsCleanerChoreEnabledRequest.getDefaultInstance();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1727,34 +1691,25 @@ public final class RequestConverter {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
private static final GetSpaceQuotaRegionSizesRequest GET_SPACE_QUOTA_REGION_SIZES_REQUEST =
|
||||
GetSpaceQuotaRegionSizesRequest.newBuilder().build();
|
||||
|
||||
/**
|
||||
* Returns a {@link GetSpaceQuotaRegionSizesRequest} object.
|
||||
*/
|
||||
public static GetSpaceQuotaRegionSizesRequest buildGetSpaceQuotaRegionSizesRequest() {
|
||||
return GET_SPACE_QUOTA_REGION_SIZES_REQUEST;
|
||||
return GetSpaceQuotaRegionSizesRequest.getDefaultInstance();
|
||||
}
|
||||
|
||||
private static final GetSpaceQuotaSnapshotsRequest GET_SPACE_QUOTA_SNAPSHOTS_REQUEST =
|
||||
GetSpaceQuotaSnapshotsRequest.newBuilder().build();
|
||||
|
||||
/**
|
||||
* Returns a {@link GetSpaceQuotaSnapshotsRequest} object.
|
||||
*/
|
||||
public static GetSpaceQuotaSnapshotsRequest buildGetSpaceQuotaSnapshotsRequest() {
|
||||
return GET_SPACE_QUOTA_SNAPSHOTS_REQUEST;
|
||||
return GetSpaceQuotaSnapshotsRequest.getDefaultInstance();
|
||||
}
|
||||
|
||||
private static final GetQuotaStatesRequest GET_QUOTA_STATES_REQUEST =
|
||||
GetQuotaStatesRequest.newBuilder().build();
|
||||
|
||||
/**
|
||||
* Returns a {@link GetQuotaStatesRequest} object.
|
||||
*/
|
||||
public static GetQuotaStatesRequest buildGetQuotaStatesRequest() {
|
||||
return GET_QUOTA_STATES_REQUEST;
|
||||
return GetQuotaStatesRequest.getDefaultInstance();
|
||||
}
|
||||
|
||||
public static DecommissionRegionServersRequest
|
||||
|
|
|
@ -30,6 +30,8 @@ public final class CryptoCipherProvider implements CipherProvider {
|
|||
|
||||
private static CryptoCipherProvider instance;
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "singleton pattern")
|
||||
public static CryptoCipherProvider getInstance() {
|
||||
if (instance != null) {
|
||||
return instance;
|
||||
|
|
|
@ -30,6 +30,8 @@ public final class DefaultCipherProvider implements CipherProvider {
|
|||
|
||||
private static DefaultCipherProvider instance;
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "singleton pattern")
|
||||
public static DefaultCipherProvider getInstance() {
|
||||
if (instance != null) {
|
||||
return instance;
|
||||
|
|
|
@ -19,14 +19,14 @@ package org.apache.hadoop.hbase.security;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.AuthUtil;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;
|
||||
|
||||
/**
|
||||
* Keeps lists of superusers and super groups loaded from HBase configuration, checks if certain
|
||||
* user is regarded as superuser.
|
||||
|
@ -38,8 +38,8 @@ public final class Superusers {
|
|||
/** Configuration key for superusers */
|
||||
public static final String SUPERUSER_CONF_KEY = "hbase.superuser"; // Not getting a name
|
||||
|
||||
private static Set<String> superUsers;
|
||||
private static Set<String> superGroups;
|
||||
private static ImmutableSet<String> superUsers;
|
||||
private static ImmutableSet<String> superGroups;
|
||||
private static User systemUser;
|
||||
|
||||
private Superusers() {
|
||||
|
@ -53,8 +53,8 @@ public final class Superusers {
|
|||
* @throws IllegalStateException if current user is null
|
||||
*/
|
||||
public static void initialize(Configuration conf) throws IOException {
|
||||
superUsers = new HashSet<>();
|
||||
superGroups = new HashSet<>();
|
||||
ImmutableSet.Builder<String> superUsersBuilder = ImmutableSet.builder();
|
||||
ImmutableSet.Builder<String> superGroupsBuilder = ImmutableSet.builder();
|
||||
systemUser = User.getCurrent();
|
||||
|
||||
if (systemUser == null) {
|
||||
|
@ -64,17 +64,19 @@ public final class Superusers {
|
|||
|
||||
String currentUser = systemUser.getShortName();
|
||||
LOG.trace("Current user name is {}", currentUser);
|
||||
superUsers.add(currentUser);
|
||||
superUsersBuilder.add(currentUser);
|
||||
|
||||
String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new String[0]);
|
||||
for (String name : superUserList) {
|
||||
if (AuthUtil.isGroupPrincipal(name)) {
|
||||
// Let's keep the '@' for distinguishing from user.
|
||||
superGroups.add(name);
|
||||
superGroupsBuilder.add(name);
|
||||
} else {
|
||||
superUsers.add(name);
|
||||
superUsersBuilder.add(name);
|
||||
}
|
||||
}
|
||||
superUsers = superUsersBuilder.build();
|
||||
superGroups = superGroupsBuilder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -111,14 +113,20 @@ public final class Superusers {
|
|||
return superUsers.contains(user) || superGroups.contains(user);
|
||||
}
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "immutable")
|
||||
public static Collection<String> getSuperUsers() {
|
||||
return superUsers;
|
||||
}
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "immutable")
|
||||
public static Collection<String> getSuperGroups() {
|
||||
return superGroups;
|
||||
}
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "by design")
|
||||
public static User getSystemUser() {
|
||||
return systemUser;
|
||||
}
|
||||
|
|
|
@ -59,6 +59,8 @@ public class SpanReceiverHost {
|
|||
|
||||
}
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "by design")
|
||||
public static Configuration getConfiguration() {
|
||||
synchronized (SingletonHolder.INSTANCE.lock) {
|
||||
if (SingletonHolder.INSTANCE.host == null || SingletonHolder.INSTANCE.host.conf == null) {
|
||||
|
|
|
@ -76,6 +76,12 @@
|
|||
<groupId>io.dropwizard.metrics</groupId>
|
||||
<artifactId>metrics-core</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.stephenc.findbugs</groupId>
|
||||
<artifactId>findbugs-annotations</artifactId>
|
||||
<scope>compile</scope>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
|
|
|
@ -138,6 +138,8 @@ public class FastLongHistogram {
|
|||
/**
|
||||
* Computes the quantiles give the ratios.
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "FL_FLOATS_AS_LOOP_COUNTERS",
|
||||
justification = "valid usage")
|
||||
public long[] getQuantiles(double[] quantiles) {
|
||||
if (!hasData) {
|
||||
// No data yet.
|
||||
|
@ -266,10 +268,6 @@ public class FastLongHistogram {
|
|||
this.bins = new Bins(bins, numOfBins, 0.01, 0.999);
|
||||
}
|
||||
|
||||
private FastLongHistogram(Bins bins) {
|
||||
this.bins = bins;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a value to the histogram.
|
||||
*/
|
||||
|
|
|
@ -42,6 +42,12 @@
|
|||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.stephenc.findbugs</groupId>
|
||||
<artifactId>findbugs-annotations</artifactId>
|
||||
<scope>compile</scope>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
|
|
|
@ -65,6 +65,8 @@ public final class HBaseZeroCopyByteString extends LiteralByteString {
|
|||
* of a {@code LiteralByteString}.
|
||||
* @return byte[] representation
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "by design")
|
||||
public static byte[] zeroCopyGetBytes(final ByteString buf) {
|
||||
if (buf instanceof LiteralByteString) {
|
||||
return ((LiteralByteString) buf).bytes;
|
||||
|
|
|
@ -53,6 +53,8 @@ public class RESTServlet implements Constants {
|
|||
}
|
||||
|
||||
/** Returns the RESTServlet singleton instance */
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "singleton pattern")
|
||||
public synchronized static RESTServlet getInstance() {
|
||||
assert (INSTANCE != null);
|
||||
return INSTANCE;
|
||||
|
@ -66,8 +68,10 @@ public class RESTServlet implements Constants {
|
|||
/**
|
||||
* @param conf Existing configuration to use in rest servlet
|
||||
* @param userProvider the login user provider
|
||||
* @return the RESTServlet singleton instance n
|
||||
* @return the RESTServlet singleton instance
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "singleton pattern")
|
||||
public synchronized static RESTServlet getInstance(Configuration conf, UserProvider userProvider)
|
||||
throws IOException {
|
||||
if (INSTANCE == null) {
|
||||
|
|
|
@ -113,12 +113,10 @@ public class FileLink {
|
|||
res = in.read();
|
||||
} catch (FileNotFoundException e) {
|
||||
res = tryOpen().read();
|
||||
} catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
res = tryOpen().read();
|
||||
} catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
res = tryOpen().read();
|
||||
}
|
||||
if (res > 0) pos += 1;
|
||||
if (res > 0) {
|
||||
pos += 1;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -134,12 +132,10 @@ public class FileLink {
|
|||
n = in.read(b, off, len);
|
||||
} catch (FileNotFoundException e) {
|
||||
n = tryOpen().read(b, off, len);
|
||||
} catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
n = tryOpen().read(b, off, len);
|
||||
} catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
n = tryOpen().read(b, off, len);
|
||||
}
|
||||
if (n > 0) pos += n;
|
||||
if (n > 0) {
|
||||
pos += n;
|
||||
}
|
||||
assert (in.getPos() == pos);
|
||||
return n;
|
||||
}
|
||||
|
@ -151,10 +147,6 @@ public class FileLink {
|
|||
n = in.read(position, buffer, offset, length);
|
||||
} catch (FileNotFoundException e) {
|
||||
n = tryOpen().read(position, buffer, offset, length);
|
||||
} catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
n = tryOpen().read(position, buffer, offset, length);
|
||||
} catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
n = tryOpen().read(position, buffer, offset, length);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
@ -170,10 +162,6 @@ public class FileLink {
|
|||
in.readFully(position, buffer, offset, length);
|
||||
} catch (FileNotFoundException e) {
|
||||
tryOpen().readFully(position, buffer, offset, length);
|
||||
} catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
tryOpen().readFully(position, buffer, offset, length);
|
||||
} catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
tryOpen().readFully(position, buffer, offset, length);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -185,13 +173,11 @@ public class FileLink {
|
|||
skipped = in.skip(n);
|
||||
} catch (FileNotFoundException e) {
|
||||
skipped = tryOpen().skip(n);
|
||||
} catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
skipped = tryOpen().skip(n);
|
||||
} catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
skipped = tryOpen().skip(n);
|
||||
}
|
||||
|
||||
if (skipped > 0) pos += skipped;
|
||||
if (skipped > 0) {
|
||||
pos += skipped;
|
||||
}
|
||||
return skipped;
|
||||
}
|
||||
|
||||
|
@ -201,10 +187,6 @@ public class FileLink {
|
|||
return in.available();
|
||||
} catch (FileNotFoundException e) {
|
||||
return tryOpen().available();
|
||||
} catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
return tryOpen().available();
|
||||
} catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
return tryOpen().available();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -214,10 +196,6 @@ public class FileLink {
|
|||
in.seek(pos);
|
||||
} catch (FileNotFoundException e) {
|
||||
tryOpen().seek(pos);
|
||||
} catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
tryOpen().seek(pos);
|
||||
} catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
tryOpen().seek(pos);
|
||||
}
|
||||
this.pos = pos;
|
||||
}
|
||||
|
@ -234,10 +212,6 @@ public class FileLink {
|
|||
res = in.seekToNewSource(targetPos);
|
||||
} catch (FileNotFoundException e) {
|
||||
res = tryOpen().seekToNewSource(targetPos);
|
||||
} catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
res = tryOpen().seekToNewSource(targetPos);
|
||||
} catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
res = tryOpen().seekToNewSource(targetPos);
|
||||
}
|
||||
if (res) pos = targetPos;
|
||||
return res;
|
||||
|
|
|
@ -388,7 +388,7 @@ public class FixedFileTrailer {
|
|||
bufferSize = (int) fileSize;
|
||||
}
|
||||
|
||||
HFileUtil.seekOnMultipleSources(istream, seekPoint);
|
||||
istream.seek(seekPoint);
|
||||
|
||||
ByteBuffer buf = ByteBuffer.allocate(bufferSize);
|
||||
istream.readFully(buf.array(), buf.arrayOffset(), buf.arrayOffset() + buf.limit());
|
||||
|
|
|
@ -1427,7 +1427,7 @@ public class HFileBlock implements Cacheable {
|
|||
boolean peekIntoNextBlock, long fileOffset, boolean pread) throws IOException {
|
||||
if (!pread) {
|
||||
// Seek + read. Better for scanning.
|
||||
HFileUtil.seekOnMultipleSources(istream, fileOffset);
|
||||
istream.seek(fileOffset);
|
||||
long realOffset = istream.getPos();
|
||||
if (realOffset != fileOffset) {
|
||||
throw new IOException("Tried to seek to " + fileOffset + " to read " + size
|
||||
|
|
|
@ -73,10 +73,6 @@ public class HFilePreadReader extends HFileReaderImpl {
|
|||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Prefetch " + getPathOffsetEndStr(path, offset, end), e);
|
||||
}
|
||||
} catch (NullPointerException e) {
|
||||
LOG.warn(
|
||||
"Stream moved/closed or prefetch cancelled?" + getPathOffsetEndStr(path, offset, end),
|
||||
e);
|
||||
} catch (Exception e) {
|
||||
// Other exceptions are interesting
|
||||
LOG.warn("Prefetch " + getPathOffsetEndStr(path, offset, end), e);
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.io.hfile;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
class HFileUtil {
|
||||
|
||||
/**
|
||||
* guards against NullPointer utility which tries to seek on the DFSIS and will try an alternative
|
||||
* source if the FSDataInputStream throws an NPE HBASE-17501 nnn
|
||||
*/
|
||||
static public void seekOnMultipleSources(FSDataInputStream istream, long offset)
|
||||
throws IOException {
|
||||
try {
|
||||
// attempt to seek inside of current blockReader
|
||||
istream.seek(offset);
|
||||
} catch (NullPointerException e) {
|
||||
// retry the seek on an alternate copy of the data
|
||||
// this can occur if the blockReader on the DFSInputStream is null
|
||||
istream.seekToNewSource(offset);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -43,7 +43,7 @@ public class NamedQueueRecorder {
|
|||
private final Disruptor<RingBufferEnvelope> disruptor;
|
||||
private final LogEventHandler logEventHandler;
|
||||
|
||||
private static NamedQueueRecorder namedQueueRecorder;
|
||||
private static volatile NamedQueueRecorder namedQueueRecorder;
|
||||
private static boolean isInit = false;
|
||||
private static final Object LOCK = new Object();
|
||||
|
||||
|
@ -71,6 +71,8 @@ public class NamedQueueRecorder {
|
|||
this.disruptor.start();
|
||||
}
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "singleton pattern")
|
||||
public static NamedQueueRecorder getInstance(Configuration conf) {
|
||||
if (namedQueueRecorder != null) {
|
||||
return namedQueueRecorder;
|
||||
|
|
|
@ -32,6 +32,8 @@ public final class NoOpRegionSizeStore implements RegionSizeStore {
|
|||
private NoOpRegionSizeStore() {
|
||||
}
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "singleton pattern")
|
||||
public static NoOpRegionSizeStore getInstance() {
|
||||
return INSTANCE;
|
||||
}
|
||||
|
|
|
@ -39,6 +39,8 @@ public final class MissingSnapshotViolationPolicyEnforcement
|
|||
private MissingSnapshotViolationPolicyEnforcement() {
|
||||
}
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "singleton pattern")
|
||||
public static SpaceViolationPolicyEnforcement getInstance() {
|
||||
return SINGLETON;
|
||||
}
|
||||
|
|
|
@ -114,7 +114,8 @@ public class ChunkCreator {
|
|||
* @param heapMemoryManager the heapmemory manager
|
||||
* @return singleton MSLABChunkCreator
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "LI_LAZY_INIT_STATIC",
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
|
||||
value = { "LI_LAZY_INIT_STATIC", "MS_EXPOSE_REP" },
|
||||
justification = "Method is called by single thread at the starting of RS")
|
||||
public static ChunkCreator initialize(int chunkSize, boolean offheap, long globalMemStoreSize,
|
||||
float poolSizePercentage, float initialCountPercentage, HeapMemoryManager heapMemoryManager,
|
||||
|
@ -127,6 +128,8 @@ public class ChunkCreator {
|
|||
return instance;
|
||||
}
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "singleton pattern")
|
||||
public static ChunkCreator getInstance() {
|
||||
return instance;
|
||||
}
|
||||
|
|
|
@ -368,7 +368,7 @@ public class HMobStore extends HStore {
|
|||
private MobCell readCell(List<Path> locations, String fileName, Cell search,
|
||||
boolean cacheMobBlocks, long readPt, boolean readEmptyValueOnMobCellMiss) throws IOException {
|
||||
FileSystem fs = getFileSystem();
|
||||
Throwable throwable = null;
|
||||
IOException ioe = null;
|
||||
for (Path location : locations) {
|
||||
MobFile file = null;
|
||||
Path path = new Path(location, fileName);
|
||||
|
@ -379,7 +379,7 @@ public class HMobStore extends HStore {
|
|||
: file.readCell(search, cacheMobBlocks);
|
||||
} catch (IOException e) {
|
||||
mobFileCache.evictFile(fileName);
|
||||
throwable = e;
|
||||
ioe = e;
|
||||
if (
|
||||
(e instanceof FileNotFoundException) || (e.getCause() instanceof FileNotFoundException)
|
||||
) {
|
||||
|
@ -390,14 +390,6 @@ public class HMobStore extends HStore {
|
|||
} else {
|
||||
throw e;
|
||||
}
|
||||
} catch (NullPointerException e) { // HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
mobFileCache.evictFile(fileName);
|
||||
LOG.debug("Fail to read the cell", e);
|
||||
throwable = e;
|
||||
} catch (AssertionError e) { // assert in HDFS 1.x - DFSInputStream.getBlockAt()
|
||||
mobFileCache.evictFile(fileName);
|
||||
LOG.debug("Fail to read the cell", e);
|
||||
throwable = e;
|
||||
} finally {
|
||||
if (file != null) {
|
||||
mobFileCache.closeFile(file);
|
||||
|
@ -409,18 +401,15 @@ public class HMobStore extends HStore {
|
|||
if (readEmptyValueOnMobCellMiss) {
|
||||
return null;
|
||||
} else if (
|
||||
(throwable instanceof FileNotFoundException)
|
||||
|| (throwable.getCause() instanceof FileNotFoundException)
|
||||
(ioe instanceof FileNotFoundException) || (ioe.getCause() instanceof FileNotFoundException)
|
||||
) {
|
||||
// The region is re-opened when FileNotFoundException is thrown.
|
||||
// This is not necessary when MOB files cannot be found, because the store files
|
||||
// in a region only contain the references to MOB files and a re-open on a region
|
||||
// doesn't help fix the lost MOB files.
|
||||
throw new DoNotRetryIOException(throwable);
|
||||
} else if (throwable instanceof IOException) {
|
||||
throw (IOException) throwable;
|
||||
throw new DoNotRetryIOException(ioe);
|
||||
} else {
|
||||
throw new IOException(throwable);
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -45,9 +45,11 @@ public class NoLimitScannerContext extends ScannerContext {
|
|||
private static final ScannerContext NO_LIMIT = new NoLimitScannerContext();
|
||||
|
||||
/**
|
||||
* @return The static, immutable instance of {@link NoLimitScannerContext} to be used whenever
|
||||
* limits should not be enforced
|
||||
* Returns the static, immutable instance of {@link NoLimitScannerContext} to be used whenever
|
||||
* limits should not be enforced
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "singleton pattern")
|
||||
public static final ScannerContext getInstance() {
|
||||
return NO_LIMIT;
|
||||
}
|
||||
|
|
|
@ -421,6 +421,8 @@ public class StripeCompactionPolicy extends CompactionPolicy {
|
|||
return totalSize;
|
||||
}
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "FL_FLOATS_AS_LOOP_COUNTERS",
|
||||
justification = "valid usage")
|
||||
private Pair<Long, Integer> estimateTargetKvs(Collection<HStoreFile> files, double splitCount) {
|
||||
// If the size is larger than what we target, we don't want to split into proportionally
|
||||
// larger parts and then have to split again very soon. So, we will increase the multiplier
|
||||
|
@ -433,7 +435,10 @@ public class StripeCompactionPolicy extends CompactionPolicy {
|
|||
while (ratio > 1.0) {
|
||||
// Ratio of real to desired size if we increase the multiplier.
|
||||
double newRatio = totalSize / ((splitCount + 1.0) * targetPartSize);
|
||||
if ((1.0 / newRatio) >= ratio) break; // New ratio is < 1.0, but further than the last one.
|
||||
if ((1.0 / newRatio) >= ratio) {
|
||||
// New ratio is < 1.0, but further than the last one.
|
||||
break;
|
||||
}
|
||||
ratio = newRatio;
|
||||
splitCount += 1.0;
|
||||
}
|
||||
|
|
|
@ -63,15 +63,10 @@ public class ProtobufLogWriter extends AbstractProtobufLogWriter implements FSHL
|
|||
@Override
|
||||
public void close() throws IOException {
|
||||
if (this.output != null) {
|
||||
try {
|
||||
if (!trailerWritten) {
|
||||
writeWALTrailer();
|
||||
}
|
||||
this.output.close();
|
||||
} catch (NullPointerException npe) {
|
||||
// Can get a NPE coming up from down in DFSClient$DFSOutputStream#close
|
||||
LOG.warn(npe.toString(), npe);
|
||||
if (!trailerWritten) {
|
||||
writeWALTrailer();
|
||||
}
|
||||
this.output.close();
|
||||
this.output = null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -320,6 +320,8 @@ class WALEntryStream implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DCN_NULLPOINTER_EXCEPTION",
|
||||
justification = "HDFS-4380")
|
||||
private void openReader(Path path) throws IOException {
|
||||
try {
|
||||
// Detect if this is a new file, if so get a new reader else
|
||||
|
@ -370,6 +372,8 @@ class WALEntryStream implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DCN_NULLPOINTER_EXCEPTION",
|
||||
justification = "HDFS-4380")
|
||||
private void resetReader() throws IOException {
|
||||
try {
|
||||
currentEntry = null;
|
||||
|
|
|
@ -98,12 +98,7 @@ public class FsDelegationToken {
|
|||
userToken = userProvider.getCurrent().getToken(tokenKind, fs.getCanonicalServiceName());
|
||||
if (userToken == null) {
|
||||
hasForwardedToken = false;
|
||||
try {
|
||||
userToken = fs.getDelegationToken(renewer);
|
||||
} catch (NullPointerException npe) {
|
||||
// we need to handle NullPointerException in case HADOOP-10009 is missing
|
||||
LOG.error("Failed to get token for " + renewer);
|
||||
}
|
||||
userToken = fs.getDelegationToken(renewer);
|
||||
} else {
|
||||
hasForwardedToken = true;
|
||||
LOG.info("Use the existing token: " + userToken);
|
||||
|
|
|
@ -74,9 +74,11 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates the singleton instance, if not yet present, and returns the same. nn * @return
|
||||
* Singleton instance of VisibilityLabelsCache n
|
||||
* Creates the singleton instance, if not yet present, and returns the same.
|
||||
* @return Singleton instance of VisibilityLabelsCache
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "singleton pattern")
|
||||
public synchronized static VisibilityLabelsCache createAndGet(ZKWatcher watcher,
|
||||
Configuration conf) throws IOException {
|
||||
// VisibilityLabelService#init() for different regions (in same RS) passes same instance of
|
||||
|
@ -95,6 +97,8 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider {
|
|||
* @return Singleton instance of VisibilityLabelsCache n * when this is called before calling
|
||||
* {@link #createAndGet(ZKWatcher, Configuration)}
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "MS_EXPOSE_REP",
|
||||
justification = "singleton pattern")
|
||||
public static VisibilityLabelsCache get() {
|
||||
// By the time this method is called, the singleton instance of VisibilityLabelsCache should
|
||||
// have been created.
|
||||
|
|
|
@ -76,6 +76,8 @@ public final class BloomFilterUtil {
|
|||
* This gets used in {@link #contains(ByteBuff, int, int, Hash, int, HashKey)}
|
||||
* @param random The random number source to use, or null to compute actual hashes
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EI_EXPOSE_STATIC_REP2",
|
||||
justification = "ignore for now, improve TestCompoundBloomFilter later")
|
||||
public static void setRandomGeneratorForTest(Random random) {
|
||||
randomGeneratorForTest = random;
|
||||
}
|
||||
|
|
|
@ -186,6 +186,8 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
* {@link #fsvisited} is not {@code true}, i.e, we haven't done a full scan yet, to see if a newer
|
||||
* file has been created since the cached one was read.
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DCN_NULLPOINTER_EXCEPTION",
|
||||
justification = "Fixed in newer minor releases, not a blocker, just keep it as is for now")
|
||||
@Override
|
||||
@Nullable
|
||||
public TableDescriptor get(TableName tableName) {
|
||||
|
|
|
@ -985,12 +985,9 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
start = CellUtil.cloneRow(startKv.get());
|
||||
Optional<Cell> endKv = hf.getLastKey();
|
||||
end = CellUtil.cloneRow(endKv.get());
|
||||
} catch (IOException ioe) {
|
||||
} catch (Exception ioe) {
|
||||
LOG.warn("Problem reading orphan file " + hfile + ", skipping");
|
||||
continue;
|
||||
} catch (NullPointerException ioe) {
|
||||
LOG.warn("Orphan file " + hfile + " is possibly corrupted HFile, skipping");
|
||||
continue;
|
||||
} finally {
|
||||
if (hf != null) {
|
||||
hf.close();
|
||||
|
|
|
@ -483,8 +483,9 @@ public abstract class AbstractFSWALProvider<T extends AbstractFSWAL<?>> implemen
|
|||
* @param conf configuration
|
||||
* @return WAL Reader instance
|
||||
*/
|
||||
public static org.apache.hadoop.hbase.wal.WAL.Reader openReader(Path path, Configuration conf)
|
||||
throws IOException {
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DCN_NULLPOINTER_EXCEPTION",
|
||||
justification = "HDFS-4380")
|
||||
public static WAL.Reader openReader(Path path, Configuration conf) throws IOException {
|
||||
long retryInterval = 2000; // 2 sec
|
||||
int maxAttempts = 30;
|
||||
int attempt = 0;
|
||||
|
|
|
@ -214,8 +214,6 @@ public class MiniHBaseCluster extends HBaseCluster {
|
|||
try {
|
||||
LOG.info("Hook closing fs=" + this.fs);
|
||||
this.fs.close();
|
||||
} catch (NullPointerException npe) {
|
||||
LOG.debug("Need to fix these: " + npe.toString());
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Running hook", e);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue