diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index 465a59367ca..f628841cb4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -148,7 +148,7 @@ public class SplitLogManager { return server.getCoordinatedStateManager().getSplitLogManagerCoordination(); } - private FileStatus[] getFileList(List logDirs, PathFilter filter) throws IOException { + private List getFileList(List logDirs, PathFilter filter) throws IOException { return getFileList(conf, logDirs, filter); } @@ -163,7 +163,7 @@ public class SplitLogManager { * {@link org.apache.hadoop.hbase.wal.WALSplitter#split(Path, Path, Path, FileSystem, * Configuration, org.apache.hadoop.hbase.wal.WALFactory)} for tests. */ - public static FileStatus[] getFileList(final Configuration conf, final List logDirs, + public static List getFileList(final Configuration conf, final List logDirs, final PathFilter filter) throws IOException { List fileStatus = new ArrayList<>(); @@ -180,8 +180,8 @@ public class SplitLogManager { Collections.addAll(fileStatus, logfiles); } } - FileStatus[] a = new FileStatus[fileStatus.size()]; - return fileStatus.toArray(a); + + return fileStatus; } /** @@ -239,11 +239,11 @@ public class SplitLogManager { long totalSize = 0; TaskBatch batch = null; long startTime = 0; - FileStatus[] logfiles = getFileList(logDirs, filter); - if (logfiles.length != 0) { + List logfiles = getFileList(logDirs, filter); + if (!logfiles.isEmpty()) { status.setStatus("Checking directory contents..."); SplitLogCounters.tot_mgr_log_split_batch_start.increment(); - LOG.info("Started splitting " + logfiles.length + " logs in " + logDirs + + LOG.info("Started splitting " + logfiles.size() + " logs in " + logDirs + " for " + serverNames); startTime = EnvironmentEdgeManager.currentTime(); batch = new TaskBatch(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java index 48c19c24ed0..aa91c84cb67 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WALSplitUtil; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -107,10 +106,10 @@ public class SplitWALManager { public List getWALsToSplit(ServerName serverName, boolean splitMeta) throws IOException { List logDirs = master.getMasterWalManager().getLogDirs(Collections.singleton(serverName)); - FileStatus[] fileStatuses = - SplitLogManager.getFileList(this.conf, logDirs, splitMeta ? META_FILTER : NON_META_FILTER); - LOG.info("{} WAL count={}, meta={}", serverName, fileStatuses.length, splitMeta); - return Lists.newArrayList(fileStatuses); + List fileStatuses = + SplitLogManager.getFileList(this.conf, logDirs, splitMeta ? META_FILTER : NON_META_FILTER); + LOG.info("{} WAL count={}, meta={}", serverName, fileStatuses.size(), splitMeta); + return fileStatuses; } private Path getWALSplitDir(ServerName serverName) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 6616a116b8b..ed684868cdd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -29,7 +29,6 @@ import java.util.Map; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicReference; -import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -217,17 +216,16 @@ public class WALSplitter { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem rootFS = rootDir.getFileSystem(conf); WALSplitter splitter = new WALSplitter(factory, conf, walRootDir, walFS, rootDir, rootFS); - final FileStatus[] wals = + final List wals = SplitLogManager.getFileList(conf, Collections.singletonList(walsDir), null); List splits = new ArrayList<>(); - if (ArrayUtils.isNotEmpty(wals)) { + if (!wals.isEmpty()) { for (FileStatus wal: wals) { SplitWALResult splitWALResult = splitter.splitWAL(wal, null); if (splitWALResult.isFinished()) { WALSplitUtil.archive(wal.getPath(), splitWALResult.isCorrupt(), archiveDir, walFS, conf); - if (splitter.outputSink.splits != null) { - splits.addAll(splitter.outputSink.splits); - } + //splitter.outputSink.splits is mark as final, do not need null check + splits.addAll(splitter.outputSink.splits); } } }