HBASE-4175 Fix FSUtils.createTableDescriptor()

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1159375 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Zhihong Yu 2011-08-18 19:41:47 +00:00
parent 9c8d60237f
commit 284c7a38a8
4 changed files with 167 additions and 33 deletions

View File

@ -202,6 +202,7 @@ Release 0.91.0 - Unreleased
HBASE-2399 Forced splits only act on the first family in a table (Ming Ma) HBASE-2399 Forced splits only act on the first family in a table (Ming Ma)
HBASE-4211 Do init-sizing of the StringBuilder making a ServerName HBASE-4211 Do init-sizing of the StringBuilder making a ServerName
(Benoît Sigoure) (Benoît Sigoure)
HBASE-4175 Fix FSUtils.createTableDescriptor() (Ramkrishna)
IMPROVEMENTS IMPROVEMENTS
HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack) HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack)

View File

@ -410,11 +410,13 @@ public class MasterFileSystem {
return tableInfoPath; return tableInfoPath;
} }
/** /**
* Create new HTableDescriptor in HDFS. * Create new HTableDescriptor in HDFS.
*
* @param htableDescriptor * @param htableDescriptor
*/ */
public void createTableDescriptor(HTableDescriptor htableDescriptor) { public void createTableDescriptor(HTableDescriptor htableDescriptor)
throws IOException {
FSUtils.createTableDescriptor(htableDescriptor, conf); FSUtils.createTableDescriptor(htableDescriptor, conf);
} }

View File

@ -938,42 +938,72 @@ public abstract class FSUtils {
} }
/** /**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. * Create new HTableDescriptor in HDFS. Happens when we are creating table.
/** *
* @param htableDescriptor * @param htableDescriptor
* @param conf * @param conf
*/ */
public static void createTableDescriptor(HTableDescriptor htableDescriptor, public static boolean createTableDescriptor(
Configuration conf) { HTableDescriptor htableDescriptor, Configuration conf) throws IOException {
try { return createTableDescriptor(htableDescriptor, conf, false);
FileSystem fs = getCurrentFileSystem(conf);
createTableDescriptor(fs, getRootDir(conf), htableDescriptor);
} catch(IOException ioe) {
LOG.info("IOException while trying to create tableInfo in HDFS", ioe);
}
} }
/** /**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @param htableDescriptor
* @param conf
* @param forceCreation
*/
public static boolean createTableDescriptor(
HTableDescriptor htableDescriptor, Configuration conf,
boolean forceCreation) throws IOException {
FileSystem fs = getCurrentFileSystem(conf);
return createTableDescriptor(fs, getRootDir(conf), htableDescriptor,
forceCreation);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
*
* @param fs * @param fs
* @param htableDescriptor * @param htableDescriptor
* @param rootdir * @param rootdir
*/ */
public static void createTableDescriptor(FileSystem fs, public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
Path rootdir, HTableDescriptor htableDescriptor) { HTableDescriptor htableDescriptor) throws IOException {
try { return createTableDescriptor(fs, rootdir, htableDescriptor, false);
Path tableInfoPath = }
getTableInfoPath(rootdir, htableDescriptor.getNameAsString());
LOG.info("Current tableInfoPath = " + tableInfoPath) ; /**
if (fs.exists(tableInfoPath) && * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
fs.getFileStatus(tableInfoPath).getLen() > 0) { * forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @param fs
* @param htableDescriptor
* @param rootdir
* @param forceCreation
*/
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor htableDescriptor, boolean forceCreation)
throws IOException {
Path tableInfoPath = getTableInfoPath(rootdir, htableDescriptor
.getNameAsString());
LOG.info("Current tableInfoPath = " + tableInfoPath);
if (!forceCreation) {
if (fs.exists(tableInfoPath)
&& fs.getFileStatus(tableInfoPath).getLen() > 0) {
LOG.info("TableInfo already exists.. Skipping creation"); LOG.info("TableInfo already exists.. Skipping creation");
return; return false;
} }
writeTableDescriptor(fs, htableDescriptor,
getTablePath(rootdir, htableDescriptor.getNameAsString()));
} catch(IOException ioe) {
LOG.info("IOException while trying to create tableInfo in HDFS", ioe);
} }
writeTableDescriptor(fs, htableDescriptor, getTablePath(rootdir,
htableDescriptor.getNameAsString()), forceCreation);
return true;
} }
/** /**
@ -990,25 +1020,42 @@ public abstract class FSUtils {
/** /**
* Called when we are creating a table to write out the tables' descriptor. * Called when we are creating a table to write out the tables' descriptor.
*
* @param fs * @param fs
* @param hTableDescriptor * @param hTableDescriptor
* @param tableDir * @param tableDir
* @throws IOException * @throws IOException
*/ */
private static void writeTableDescriptor(FileSystem fs, private static void writeTableDescriptor(FileSystem fs,
HTableDescriptor hTableDescriptor, Path tableDir) HTableDescriptor hTableDescriptor, Path tableDir, boolean forceCreation)
throws IOException { throws IOException {
// Create in tmpdir and then move into place in case we crash after // Create in tmpdir and then move into place in case we crash after
// create but before close. If we don't successfully close the file, // create but before close. If we don't successfully close the file,
// subsequent region reopens will fail the below because create is // subsequent region reopens will fail the below because create is
// registered in NN. // registered in NN.
Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME); Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME);
Path tmpPath = new Path(new Path(tableDir,".tmp"), HConstants.TABLEINFO_NAME); Path tmpPath = new Path(new Path(tableDir, ".tmp"),
HConstants.TABLEINFO_NAME);
LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath); LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath);
writeHTD(fs, tmpPath, hTableDescriptor); try {
writeHTD(fs, tmpPath, hTableDescriptor);
} catch (IOException e) {
LOG.error("Unable to write the tabledescriptor in the path" + tmpPath
+ ".", e);
throw e;
}
if (forceCreation) {
if (!fs.delete(tableInfoPath, false)) {
String errMsg = "Unable to delete " + tableInfoPath
+ " while forcefully writing the table descriptor.";
LOG.error(errMsg);
throw new IOException(errMsg);
}
}
if (!fs.rename(tmpPath, tableInfoPath)) { if (!fs.rename(tmpPath, tableInfoPath)) {
throw new IOException("Unable to rename " + tmpPath + " to " + String errMsg = "Unable to rename " + tmpPath + " to " + tableInfoPath;
tableInfoPath); LOG.error(errMsg);
throw new IOException(errMsg);
} else { } else {
LOG.info("TableDescriptor stored. TableInfoPath = " + tableInfoPath); LOG.info("TableDescriptor stored. TableInfoPath = " + tableInfoPath);
} }

View File

@ -0,0 +1,84 @@
/**
* Copyright 2011 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestFSTableDescriptorForceCreation {
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
@BeforeClass
public static void setUpCluster() throws Exception {
UTIL.startMiniDFSCluster(1);
}
@AfterClass
public static void shutDownCluster() throws Exception {
UTIL.shutdownMiniDFSCluster();
}
@Test
public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse()
throws IOException {
final String name = "newTable2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(fs.getWorkingDirectory(), name);
HTableDescriptor htd = new HTableDescriptor(name);
assertTrue("Should create new table descriptor", FSUtils
.createTableDescriptor(fs, rootdir, htd, false));
}
@Test
public void testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse()
throws IOException {
final String name = "testAlreadyExists";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(fs.getWorkingDirectory(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name);
htds.add(htd);
assertFalse("Should not create new table descriptor", FSUtils
.createTableDescriptor(fs, rootdir, htd, false));
}
@Test
public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor()
throws Exception {
final String name = "createNewTableNew2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(fs.getWorkingDirectory(), name);
HTableDescriptor htd = new HTableDescriptor(name);
FSUtils.createTableDescriptor(fs, rootdir, htd, false);
assertTrue("Should create new table descriptor", FSUtils
.createTableDescriptor(fs, rootdir, htd, true));
}
}