HBASE-7107 Snapshot References Utils (FileSystem Visitor) (Matteo Bertozzi)

git-svn-id: https://svn.apache.org/repos/asf/hbase/branches/hbase-7290@1445778 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jonathan Hsieh 2013-02-13 18:01:03 +00:00
parent 60e69c2410
commit be33f38ca5
5 changed files with 696 additions and 1 deletions

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
@ -420,7 +421,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
* @throws IllegalArgumentException If not null and not a legitimate family
* name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
* <code>b</code> can be null when deserializing). Cannot start with a '.'
* either. Also Family can not be an empty value.
* either. Also Family can not be an empty value or equal "recovered.edits".
*/
public static byte [] isLegalFamilyName(final byte [] b) {
if (b == null) {
@ -438,6 +439,11 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
Bytes.toString(b));
}
}
byte[] recoveredEdit = Bytes.toBytes(HLog.RECOVERED_EDITS_DIR);
if (Bytes.equals(recoveredEdit, b)) {
throw new IllegalArgumentException("Family name cannot be: " +
HLog.RECOVERED_EDITS_DIR);
}
return b;
}

View File

@ -0,0 +1,248 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;
import java.io.IOException;
import java.util.HashSet;
import java.util.TreeMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.FSVisitor;
/**
* Utility methods for interacting with the snapshot referenced files.
*/
@InterfaceAudience.Private
public final class SnapshotReferenceUtil {
public interface FileVisitor extends FSVisitor.StoreFileVisitor,
FSVisitor.RecoveredEditsVisitor, FSVisitor.LogFileVisitor {
}
private SnapshotReferenceUtil() {
// private constructor for utility class
}
/**
* Get log directory for a server in a snapshot.
*
* @param snapshotDir directory where the specific snapshot is stored
* @param serverName name of the parent regionserver for the log files
* @return path to the log home directory for the archive files.
*/
public static Path getLogsDir(Path snapshotDir, String serverName) {
return new Path(snapshotDir, HLogUtil.getHLogDirectoryName(serverName));
}
/**
* Get the snapshotted recovered.edits dir for the specified region.
*
* @param snapshotDir directory where the specific snapshot is stored
* @param regionName name of the region
* @return path to the recovered.edits directory for the specified region files.
*/
public static Path getRecoveredEditsDir(Path snapshotDir, String regionName) {
return HLogUtil.getRegionDirRecoveredEditsDir(new Path(snapshotDir, regionName));
}
/**
* Get the snapshot recovered.edits file
*
* @param snapshotDir directory where the specific snapshot is stored
* @param regionName name of the region
* @param logfile name of the edit file
* @return full path of the log file for the specified region files.
*/
public static Path getRecoveredEdits(Path snapshotDir, String regionName, String logfile) {
return new Path(getRecoveredEditsDir(snapshotDir, regionName), logfile);
}
/**
* Iterate over the snapshot store files, restored.edits and logs
*
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
* @param visitor callback object to get the referenced files
* @throws IOException if an error occurred while scanning the directory
*/
public static void visitReferencedFiles(final FileSystem fs, final Path snapshotDir,
final FileVisitor visitor) throws IOException {
visitTableStoreFiles(fs, snapshotDir, visitor);
visitRecoveredEdits(fs, snapshotDir, visitor);
visitLogFiles(fs, snapshotDir, visitor);
}
/**
* Iterate over the snapshot store files
*
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
* @param visitor callback object to get the store files
* @throws IOException if an error occurred while scanning the directory
*/
public static void visitTableStoreFiles(final FileSystem fs, final Path snapshotDir,
final FSVisitor.StoreFileVisitor visitor) throws IOException {
FSVisitor.visitTableStoreFiles(fs, snapshotDir, visitor);
}
/**
* Iterate over the snapshot store files in the specified region
*
* @param fs {@link FileSystem}
* @param regionDir {@link Path} to the Snapshot region directory
* @param visitor callback object to get the store files
* @throws IOException if an error occurred while scanning the directory
*/
public static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir,
final FSVisitor.StoreFileVisitor visitor) throws IOException {
FSVisitor.visitRegionStoreFiles(fs, regionDir, visitor);
}
/**
* Iterate over the snapshot recovered.edits
*
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
* @param visitor callback object to get the recovered.edits files
* @throws IOException if an error occurred while scanning the directory
*/
public static void visitRecoveredEdits(final FileSystem fs, final Path snapshotDir,
final FSVisitor.RecoveredEditsVisitor visitor) throws IOException {
FSVisitor.visitTableRecoveredEdits(fs, snapshotDir, visitor);
}
/**
* Iterate over the snapshot log files
*
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
* @param visitor callback object to get the log files
* @throws IOException if an error occurred while scanning the directory
*/
public static void visitLogFiles(final FileSystem fs, final Path snapshotDir,
final FSVisitor.LogFileVisitor visitor) throws IOException {
FSVisitor.visitLogFiles(fs, snapshotDir, visitor);
}
/**
* Returns the set of region names available in the snapshot.
*
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
* @throws IOException if an error occurred while scanning the directory
* @return the set of the regions contained in the snapshot
*/
public static Set<String> getSnapshotRegionNames(final FileSystem fs, final Path snapshotDir)
throws IOException {
FileStatus[] regionDirs = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs));
if (regionDirs == null) return null;
Set<String> regions = new HashSet<String>();
for (FileStatus regionDir: regionDirs) {
regions.add(regionDir.getPath().getName());
}
return regions;
}
/**
* Get the list of hfiles for the specified snapshot region.
* NOTE: The current implementation keeps one empty file per HFile in the region.
* The file name matches the one in the original table, and by reconstructing
* the path you can quickly jump to the referenced file.
*
* @param fs {@link FileSystem}
* @param snapshotRegionDir {@link Path} to the Snapshot region directory
* @return Map of hfiles per family, the key is the family name and values are hfile names
* @throws IOException if an error occurred while scanning the directory
*/
public static Map<String, List<String>> getRegionHFileReferences(final FileSystem fs,
final Path snapshotRegionDir) throws IOException {
final Map<String, List<String>> familyFiles = new TreeMap<String, List<String>>();
visitRegionStoreFiles(fs, snapshotRegionDir,
new FSVisitor.StoreFileVisitor() {
public void storeFile (final String region, final String family, final String hfile)
throws IOException {
List<String> hfiles = familyFiles.get(family);
if (hfiles == null) {
hfiles = new LinkedList<String>();
familyFiles.put(family, hfiles);
}
hfiles.add(hfile);
}
});
return familyFiles;
}
/**
* Returns the store file names in the snapshot.
*
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
* @param visitor callback object to get the log files
* @throws IOException if an error occurred while scanning the directory
* @return the names of hfiles in the specified snaphot
*/
public static Set<String> getHFileNames(final FileSystem fs, final Path snapshotDir)
throws IOException {
final Set<String> names = new HashSet<String>();
visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() {
public void storeFile (final String region, final String family, final String hfile)
throws IOException {
names.add(hfile);
}
});
return names;
}
/**
* Returns the log file names available in the snapshot.
*
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
* @throws IOException if an error occurred while scanning the directory
* @return the names of hlogs in the specified snaphot
*/
public static Set<String> getHLogNames(final FileSystem fs, final Path snapshotDir)
throws IOException {
final Set<String> names = new HashSet<String>();
visitLogFiles(fs, snapshotDir, new FSVisitor.LogFileVisitor() {
public void logFile (final String server, final String logfile) throws IOException {
names.add(logfile);
}
});
return names;
}
}

View File

@ -921,6 +921,27 @@ public abstract class FSUtils {
return true;
}
/**
* A {@link PathFilter} that returns only regular files.
*/
public static class FileFilter implements PathFilter {
private final FileSystem fs;
public FileFilter(final FileSystem fs) {
this.fs = fs;
}
@Override
public boolean accept(Path p) {
try {
return fs.isFile(p);
} catch (IOException e) {
LOG.debug("unable to verify if path=" + p + " is a regular file", e);
return false;
}
}
}
/**
* A {@link PathFilter} that returns directories.
*/

View File

@ -0,0 +1,194 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
import java.io.IOException;
import java.util.NavigableSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.util.FSUtils;
/**
* Utility methods for interacting with the hbase.root file system.
*/
@InterfaceAudience.Private
public final class FSVisitor {
private static final Log LOG = LogFactory.getLog(FSVisitor.class);
public interface StoreFileVisitor {
void storeFile(final String region, final String family, final String hfileName)
throws IOException;
}
public interface RecoveredEditsVisitor {
void recoveredEdits (final String region, final String logfile)
throws IOException;
}
public interface LogFileVisitor {
void logFile (final String server, final String logfile)
throws IOException;
}
private FSVisitor() {
// private constructor for utility class
}
/**
* Iterate over the table store files
*
* @param fs {@link FileSystem}
* @param tableDir {@link Path} to the table directory
* @param visitor callback object to get the store files
* @throws IOException if an error occurred while scanning the directory
*/
public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir,
final StoreFileVisitor visitor) throws IOException {
FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
if (regions == null) {
LOG.info("No regions under directory:" + tableDir);
return;
}
for (FileStatus region: regions) {
visitRegionStoreFiles(fs, region.getPath(), visitor);
}
}
/**
* Iterate over the region store files
*
* @param fs {@link FileSystem}
* @param regionDir {@link Path} to the region directory
* @param visitor callback object to get the store files
* @throws IOException if an error occurred while scanning the directory
*/
public static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir,
final StoreFileVisitor visitor) throws IOException {
FileStatus[] families = FSUtils.listStatus(fs, regionDir, new FSUtils.FamilyDirFilter(fs));
if (families == null) {
LOG.info("No families under region directory:" + regionDir);
return;
}
PathFilter fileFilter = new FSUtils.FileFilter(fs);
for (FileStatus family: families) {
Path familyDir = family.getPath();
String familyName = familyDir.getName();
// get all the storeFiles in the family
FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir, fileFilter);
if (storeFiles == null) {
LOG.debug("No hfiles found for family: " + familyDir + ", skipping.");
continue;
}
for (FileStatus hfile: storeFiles) {
Path hfilePath = hfile.getPath();
visitor.storeFile(regionDir.getName(), familyName, hfilePath.getName());
}
}
}
/**
* Iterate over each region in the table the table and inform about recovered.edits
*
* @param fs {@link FileSystem}
* @param tableDir {@link Path} to the table directory
* @param visitor callback object to get the recovered.edits files
* @throws IOException if an error occurred while scanning the directory
*/
public static void visitTableRecoveredEdits(final FileSystem fs, final Path tableDir,
final FSVisitor.RecoveredEditsVisitor visitor) throws IOException {
FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
if (regions == null) {
LOG.info("No regions under directory:" + tableDir);
return;
}
for (FileStatus region: regions) {
visitRegionRecoveredEdits(fs, region.getPath(), visitor);
}
}
/**
* Iterate over recovered.edits of the specified region
*
* @param fs {@link FileSystem}
* @param regionDir {@link Path} to the Region directory
* @param visitor callback object to get the recovered.edits files
* @throws IOException if an error occurred while scanning the directory
*/
public static void visitRegionRecoveredEdits(final FileSystem fs, final Path regionDir,
final FSVisitor.RecoveredEditsVisitor visitor) throws IOException {
NavigableSet<Path> files = HLogUtil.getSplitEditFilesSorted(fs, regionDir);
if (files == null || files.size() == 0) return;
for (Path source: files) {
// check to see if the file is zero length, in which case we can skip it
FileStatus stat = fs.getFileStatus(source);
if (stat.getLen() <= 0) continue;
visitor.recoveredEdits(regionDir.getName(), source.getName());
}
}
/**
* Iterate over hbase log files
*
* @param fs {@link FileSystem}
* @param rootDir {@link Path} to the HBase root folder
* @param visitor callback object to get the log files
* @throws IOException if an error occurred while scanning the directory
*/
public static void visitLogFiles(final FileSystem fs, final Path rootDir,
final LogFileVisitor visitor) throws IOException {
Path logsDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
FileStatus[] logServerDirs = FSUtils.listStatus(fs, logsDir);
if (logServerDirs == null) {
LOG.info("No logs under directory:" + logsDir);
return;
}
for (FileStatus serverLogs: logServerDirs) {
String serverName = serverLogs.getPath().getName();
FileStatus[] hlogs = FSUtils.listStatus(fs, serverLogs.getPath());
if (hlogs == null) {
LOG.debug("No hfiles found for server: " + serverName + ", skipping.");
continue;
}
for (FileStatus hlogRef: hlogs) {
visitor.logFile(serverName, hlogRef.getPath().getName());
}
}
}
}

View File

@ -0,0 +1,226 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.UUID;
import java.util.Set;
import java.util.HashSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.DeserializationException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.*;
import org.junit.experimental.categories.Category;
/**
* Test {@link FSUtils}.
*/
@Category(MediumTests.class)
public class TestFSVisitor {
final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final String TABLE_NAME = "testtb";
private Set<String> tableFamilies;
private Set<String> tableRegions;
private Set<String> recoveredEdits;
private Set<String> tableHFiles;
private Set<String> regionServers;
private Set<String> serverLogs;
private FileSystem fs;
private Path tableDir;
private Path logsDir;
private Path rootDir;
@Before
public void setUp() throws Exception {
fs = FileSystem.get(TEST_UTIL.getConfiguration());
rootDir = TEST_UTIL.getDataTestDir("hbase");
logsDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
tableFamilies = new HashSet<String>();
tableRegions = new HashSet<String>();
recoveredEdits = new HashSet<String>();
tableHFiles = new HashSet<String>();
regionServers = new HashSet<String>();
serverLogs = new HashSet<String>();
tableDir = createTableFiles(rootDir, TABLE_NAME, tableRegions, tableFamilies, tableHFiles);
createRecoverEdits(tableDir, tableRegions, recoveredEdits);
createLogs(logsDir, regionServers, serverLogs);
FSUtils.logFileSystemState(fs, rootDir, LOG);
}
@After
public void tearDown() throws Exception {
fs.delete(rootDir);
}
@Test
public void testVisitStoreFiles() throws IOException {
final Set<String> regions = new HashSet<String>();
final Set<String> families = new HashSet<String>();
final Set<String> hfiles = new HashSet<String>();
FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() {
public void storeFile(final String region, final String family, final String hfileName)
throws IOException {
regions.add(region);
families.add(family);
hfiles.add(hfileName);
}
});
assertEquals(tableRegions, regions);
assertEquals(tableFamilies, families);
assertEquals(tableHFiles, hfiles);
}
@Test
public void testVisitRecoveredEdits() throws IOException {
final Set<String> regions = new HashSet<String>();
final Set<String> edits = new HashSet<String>();
FSVisitor.visitTableRecoveredEdits(fs, tableDir, new FSVisitor.RecoveredEditsVisitor() {
public void recoveredEdits (final String region, final String logfile)
throws IOException {
regions.add(region);
edits.add(logfile);
}
});
assertEquals(tableRegions, regions);
assertEquals(recoveredEdits, edits);
}
@Test
public void testVisitLogFiles() throws IOException {
final Set<String> servers = new HashSet<String>();
final Set<String> logs = new HashSet<String>();
FSVisitor.visitLogFiles(fs, rootDir, new FSVisitor.LogFileVisitor() {
public void logFile (final String server, final String logfile) throws IOException {
servers.add(server);
logs.add(logfile);
}
});
assertEquals(regionServers, servers);
assertEquals(serverLogs, logs);
}
/*
* |-testtb/
* |----f1d3ff8443297732862df21dc4e57262/
* |-------f1/
* |----------d0be84935ba84b66b1e866752ec5d663
* |----------9fc9d481718f4878b29aad0a597ecb94
* |-------f2/
* |----------4b0fe6068c564737946bcf4fd4ab8ae1
*/
private Path createTableFiles(final Path rootDir, final String tableName,
final Set<String> tableRegions, final Set<String> tableFamilies,
final Set<String> tableHFiles) throws IOException {
Path tableDir = new Path(rootDir, tableName);
for (int r = 0; r < 10; ++r) {
String regionName = MD5Hash.getMD5AsHex(Bytes.toBytes(r));
tableRegions.add(regionName);
Path regionDir = new Path(tableDir, regionName);
for (int f = 0; f < 3; ++f) {
String familyName = "f" + f;
tableFamilies.add(familyName);
Path familyDir = new Path(regionDir, familyName);
fs.mkdirs(familyDir);
for (int h = 0; h < 5; ++h) {
String hfileName = UUID.randomUUID().toString().replaceAll("-", "");
tableHFiles.add(hfileName);
fs.createNewFile(new Path(familyDir, hfileName));
}
}
}
return tableDir;
}
/*
* |-testtb/
* |----f1d3ff8443297732862df21dc4e57262/
* |-------recovered.edits/
* |----------0000001351969633479
* |----------0000001351969633481
*/
private void createRecoverEdits(final Path tableDir, final Set<String> tableRegions,
final Set<String> recoverEdits) throws IOException {
for (String region: tableRegions) {
Path regionEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(new Path(tableDir, region));
long seqId = System.currentTimeMillis();
for (int i = 0; i < 3; ++i) {
String editName = String.format("%019d", seqId + i);
recoverEdits.add(editName);
FSDataOutputStream stream = fs.create(new Path(regionEditsDir, editName));
stream.write(Bytes.toBytes("test"));
stream.close();
}
}
}
/*
* |-.logs/
* |----server5,5,1351969633508/
* |-------server5,5,1351969633508.0
* |----server6,6,1351969633512/
* |-------server6,6,1351969633512.0
* |-------server6,6,1351969633512.3
*/
private void createLogs(final Path logDir, final Set<String> servers,
final Set<String> logs) throws IOException {
for (int s = 0; s < 7; ++s) {
String server = String.format("server%d,%d,%d", s, s, System.currentTimeMillis());
servers.add(server);
Path serverLogDir = new Path(logDir, server);
fs.mkdirs(serverLogDir);
for (int i = 0; i < 5; ++i) {
String logfile = server + '.' + i;
logs.add(logfile);
FSDataOutputStream stream = fs.create(new Path(serverLogDir, logfile));
stream.write(Bytes.toBytes("test"));
stream.close();
}
}
}
}