HADOOP-17138. Fix spotbugs warnings surfaced after upgrade to 4.0.6. (#2155)
This commit is contained in:
parent
d23cc9d85d
commit
1b29c9bfee
|
@ -16,8 +16,8 @@
|
|||
-->
|
||||
<FindBugsFilter>
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.fs.cosn.CosNInputStream.ReadBuffer"/>
|
||||
<Class name="org.apache.hadoop.fs.cosn.CosNInputStream$ReadBuffer"/>
|
||||
<Method name="getBuffer"/>
|
||||
<Bug pattern="EI_EXPOSE_REP"/>h_LIB
|
||||
<Bug pattern="EI_EXPOSE_REP"/>
|
||||
</Match>
|
||||
</FindBugsFilter>
|
||||
|
|
|
@ -3714,7 +3714,7 @@ public abstract class Server {
|
|||
if (count == null) {
|
||||
count = 1;
|
||||
} else {
|
||||
count++;
|
||||
count = count + 1;
|
||||
}
|
||||
userToConnectionsMap.put(user, count);
|
||||
}
|
||||
|
@ -3726,7 +3726,7 @@ public abstract class Server {
|
|||
if (count == null) {
|
||||
return;
|
||||
} else {
|
||||
count--;
|
||||
count = count - 1;
|
||||
}
|
||||
if (count == 0) {
|
||||
userToConnectionsMap.remove(user);
|
||||
|
|
|
@ -354,23 +354,29 @@ public class DatasetVolumeChecker {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onSuccess(@Nonnull VolumeCheckResult result) {
|
||||
switch(result) {
|
||||
case HEALTHY:
|
||||
case DEGRADED:
|
||||
LOG.debug("Volume {} is {}.", reference.getVolume(), result);
|
||||
markHealthy();
|
||||
break;
|
||||
case FAILED:
|
||||
LOG.warn("Volume {} detected as being unhealthy",
|
||||
public void onSuccess(VolumeCheckResult result) {
|
||||
if (result == null) {
|
||||
LOG.error("Unexpected health check result null for volume {}",
|
||||
reference.getVolume());
|
||||
markFailed();
|
||||
break;
|
||||
default:
|
||||
LOG.error("Unexpected health check result {} for volume {}",
|
||||
result, reference.getVolume());
|
||||
markHealthy();
|
||||
break;
|
||||
} else {
|
||||
switch(result) {
|
||||
case HEALTHY:
|
||||
case DEGRADED:
|
||||
LOG.debug("Volume {} is {}.", reference.getVolume(), result);
|
||||
markHealthy();
|
||||
break;
|
||||
case FAILED:
|
||||
LOG.warn("Volume {} detected as being unhealthy",
|
||||
reference.getVolume());
|
||||
markFailed();
|
||||
break;
|
||||
default:
|
||||
LOG.error("Unexpected health check result {} for volume {}",
|
||||
result, reference.getVolume());
|
||||
markHealthy();
|
||||
break;
|
||||
}
|
||||
}
|
||||
cleanup();
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ public class ThrottledAsyncChecker<K, V> implements AsyncChecker<K, V> {
|
|||
Checkable<K, V> target, ListenableFuture<V> lf) {
|
||||
Futures.addCallback(lf, new FutureCallback<V>() {
|
||||
@Override
|
||||
public void onSuccess(@Nullable V result) {
|
||||
public void onSuccess(V result) {
|
||||
synchronized (ThrottledAsyncChecker.this) {
|
||||
checksInProgress.remove(target);
|
||||
completedChecks.put(target, new LastCheckResult<>(
|
||||
|
|
|
@ -1238,7 +1238,7 @@ public class FSEditLogLoader {
|
|||
holder = new Holder<Integer>(1);
|
||||
opCounts.put(opCode, holder);
|
||||
} else {
|
||||
holder.held++;
|
||||
holder.held = holder.held + 1;
|
||||
}
|
||||
counter.increment();
|
||||
}
|
||||
|
|
|
@ -533,5 +533,17 @@
|
|||
<Class name="org.apache.hadoop.mapreduce.v2.hs.CachedHistoryStorage$1" />
|
||||
<Bug pattern="SE_BAD_FIELD_INNER_CLASS" />
|
||||
</Match>
|
||||
|
||||
|
||||
<!--
|
||||
HADOOP-17138: Suppress warnings about unchecked Nullable
|
||||
since the methoad catches NullPointerException then registerError.
|
||||
-->
|
||||
<Match>
|
||||
<Or>
|
||||
<Class name="org.apache.hadoop.mapred.LocatedFileStatusFetcher$ProcessInputDirCallback" />
|
||||
<Class name="org.apache.hadoop.mapred.LocatedFileStatusFetcher$ProcessInitialInputPathCallback" />
|
||||
</Or>
|
||||
<Method name="onSuccess" />
|
||||
<Bug pattern="NP_PARAMETER_MUST_BE_NONNULL_BUT_MARKED_AS_NULLABLE" />
|
||||
</Match>
|
||||
</FindBugsFilter>
|
||||
|
|
|
@ -813,7 +813,7 @@ public class SLSRunner extends Configured implements Tool {
|
|||
if (appNum == null) {
|
||||
appNum = 1;
|
||||
} else {
|
||||
appNum++;
|
||||
appNum = appNum + 1;
|
||||
}
|
||||
|
||||
queueAppNumMap.put(queueName, appNum);
|
||||
|
|
|
@ -705,4 +705,10 @@
|
|||
<Method name="getDevices" />
|
||||
<Bug pattern="DMI_HARDCODED_ABSOLUTE_FILENAME" />
|
||||
</Match>
|
||||
|
||||
<!-- Suppress warning about anonymous class for mocking. -->
|
||||
<Match>
|
||||
<Class name="~org\.apache\.hadoop\.yarn\.server\.timelineservice\.reader\.TestTimelineReaderWebServicesHBaseStorage.*" />
|
||||
<Bug pattern="UMAC_UNCALLABLE_METHOD_OF_ANONYMOUS_CLASS" />
|
||||
</Match>
|
||||
</FindBugsFilter>
|
||||
|
|
|
@ -181,14 +181,13 @@ public class TestTimelineReaderHBaseDown {
|
|||
}
|
||||
}
|
||||
|
||||
private static void checkQuery(HBaseTimelineReaderImpl htr) throws
|
||||
IOException {
|
||||
private static Set<TimelineEntity> checkQuery(HBaseTimelineReaderImpl htr)
|
||||
throws IOException {
|
||||
TimelineReaderContext context =
|
||||
new TimelineReaderContext(YarnConfiguration.DEFAULT_RM_CLUSTER_ID,
|
||||
null, null, null, null, TimelineEntityType
|
||||
.YARN_FLOW_ACTIVITY.toString(), null, null);
|
||||
Set<TimelineEntity> entities = htr.getEntities(context, MONITOR_FILTERS,
|
||||
DATA_TO_RETRIEVE);
|
||||
return htr.getEntities(context, MONITOR_FILTERS, DATA_TO_RETRIEVE);
|
||||
}
|
||||
|
||||
private static void configure(HBaseTestingUtility util) {
|
||||
|
|
Loading…
Reference in New Issue