HBASE-10361. Enable/AlterTable support for region replicas (ddas)

git-svn-id: https://svn.apache.org/repos/asf/hbase/branches/hbase-10070@1570008 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Devaraj Das 2014-02-20 00:26:56 +00:00 committed by Enis Soztutar
parent c1f9b6193d
commit 72355a920a
7 changed files with 134 additions and 43 deletions

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -173,6 +174,31 @@ public class MetaEditor extends MetaReader {
} }
} }
/**
* Deletes some replica columns corresponding to replicas for the passed rows
* @param metaRows
* @param replicaIndexToDeleteFrom the replica ID we would start deleting from
* @param numReplicasToRemove
* @param ct
* @throws IOException
*/
public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows, int replicaIndexToDeleteFrom,
int numReplicasToRemove, CatalogTracker ct) throws IOException {
int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
for (byte[] row : metaRows) {
Delete deleteReplicaLocations = new Delete(row);
for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
MetaReader.getServerColumn(i));
deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
MetaReader.getSeqNumColumn(i));
deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
MetaReader.getStartCodeColumn(i));
}
deleteFromMetaTable(ct, deleteReplicaLocations);
}
}
/** /**
* Execute the passed <code>mutations</code> against <code>hbase:meta</code> table. * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table.
* @param ct CatalogTracker on whose back we will ride the edit. * @param ct CatalogTracker on whose back we will ride the edit.

View File

@ -446,11 +446,13 @@ public class RegionStates {
} }
HRegionInfo defaultReplica = RegionReplicaUtil.getRegionInfoForDefaultReplica(hri); HRegionInfo defaultReplica = RegionReplicaUtil.getRegionInfoForDefaultReplica(hri);
Set<HRegionInfo> replicas = defaultReplicaToOtherReplicas.get(defaultReplica); Set<HRegionInfo> replicas = defaultReplicaToOtherReplicas.get(defaultReplica);
if (replicas != null) {
replicas.remove(hri); replicas.remove(hri);
if (replicas.isEmpty()) { if (replicas.isEmpty()) {
defaultReplicaToOtherReplicas.remove(defaultReplica); defaultReplicaToOtherReplicas.remove(defaultReplica);
} }
} }
}
/** /**
* A dead server's hlogs have been split so that all the regions * A dead server's hlogs have been split so that all the regions

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master.handler;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -42,6 +43,7 @@ import org.apache.hadoop.hbase.master.BulkAssigner;
import org.apache.hadoop.hbase.master.GeneralBulkAssigner; import org.apache.hadoop.hbase.master.GeneralBulkAssigner;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager;
@ -61,6 +63,7 @@ public class EnableTableHandler extends EventHandler {
private final CatalogTracker catalogTracker; private final CatalogTracker catalogTracker;
private boolean skipTableStateCheck = false; private boolean skipTableStateCheck = false;
private TableLock tableLock; private TableLock tableLock;
private MasterServices services;
public EnableTableHandler(Server server, TableName tableName, public EnableTableHandler(Server server, TableName tableName,
CatalogTracker catalogTracker, AssignmentManager assignmentManager, CatalogTracker catalogTracker, AssignmentManager assignmentManager,
@ -73,6 +76,14 @@ public class EnableTableHandler extends EventHandler {
this.skipTableStateCheck = skipTableStateCheck; this.skipTableStateCheck = skipTableStateCheck;
} }
public EnableTableHandler(MasterServices services, TableName tableName,
CatalogTracker catalogTracker, AssignmentManager assignmentManager,
TableLockManager tableLockManager, boolean skipTableStateCheck) {
this((Server)services, tableName, catalogTracker, assignmentManager, tableLockManager,
skipTableStateCheck);
this.services = services;
}
public EnableTableHandler prepare() public EnableTableHandler prepare()
throws TableNotFoundException, TableNotDisabledException, IOException { throws TableNotFoundException, TableNotDisabledException, IOException {
//acquire the table write lock, blocking //acquire the table write lock, blocking
@ -186,6 +197,16 @@ public class EnableTableHandler extends EventHandler {
int countOfRegionsInTable = tableRegionsAndLocations.size(); int countOfRegionsInTable = tableRegionsAndLocations.size();
Map<HRegionInfo, ServerName> regionsToAssign = Map<HRegionInfo, ServerName> regionsToAssign =
regionsToAssignWithServerName(tableRegionsAndLocations); regionsToAssignWithServerName(tableRegionsAndLocations);
if (services != null) {
// need to potentially create some regions for the replicas
List<HRegionInfo> unrecordedReplicas = AssignmentManager.replicaRegionsNotRecordedInMeta(
new HashSet<HRegionInfo>(regionsToAssign.keySet()), services);
for (HRegionInfo h : unrecordedReplicas) {
regionsToAssign.put(h,
this.assignmentManager.getBalancer().randomAssignment(h,
serverManager.getOnlineServersList()));
}
}
int regionsCount = regionsToAssign.size(); int regionsCount = regionsToAssign.size();
if (regionsCount == 0) { if (regionsCount == 0) {
done = true; done = true;

View File

@ -19,16 +19,24 @@
package org.apache.hadoop.hbase.master.handler; package org.apache.hadoop.hbase.master.handler;
import java.io.IOException; import java.io.IOException;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@ -53,8 +61,12 @@ public class ModifyTableHandler extends TableEventHandler {
@Override @Override
protected void prepareWithTableLock() throws IOException { protected void prepareWithTableLock() throws IOException {
super.prepareWithTableLock(); super.prepareWithTableLock();
// Check table exists. // Check operation is possible on the table in its current state
getTableDescriptor(); // Also checks whether the table exists
if (masterServices.getAssignmentManager().getZKTable().isEnabledTable(this.htd.getTableName())
&& this.htd.getRegionReplication() != getTableDescriptor().getRegionReplication()) {
throw new IOException("REGION_REPLICATION change is not supported for enabled tables");
}
} }
@Override @Override
@ -68,11 +80,35 @@ public class ModifyTableHandler extends TableEventHandler {
HTableDescriptor oldHtd = getTableDescriptor(); HTableDescriptor oldHtd = getTableDescriptor();
this.masterServices.getTableDescriptors().add(this.htd); this.masterServices.getTableDescriptors().add(this.htd);
deleteFamilyFromFS(hris, oldHtd.getFamiliesKeys()); deleteFamilyFromFS(hris, oldHtd.getFamiliesKeys());
removeReplicaColumnsIfNeeded(this.htd.getRegionReplication(), oldHtd.getRegionReplication(),
htd.getTableName());
if (cpHost != null) { if (cpHost != null) {
cpHost.postModifyTableHandler(this.tableName, this.htd); cpHost.postModifyTableHandler(this.tableName, this.htd);
} }
} }
private void removeReplicaColumnsIfNeeded(int newReplicaCount, int oldReplicaCount,
TableName table) throws IOException {
if (newReplicaCount >= oldReplicaCount) return;
Set<byte[]> tableRows = new HashSet<byte[]>();
Scan scan = MetaReader.getScanForTableName(table);
scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
HTable htable = null;
try {
htable = new HTable(masterServices.getConfiguration(), TableName.META_TABLE_NAME);
ResultScanner resScanner = htable.getScanner(scan);
for (Result result : resScanner) {
tableRows.add(result.getRow());
}
MetaEditor.removeRegionReplicasFromMeta(tableRows, newReplicaCount,
oldReplicaCount - newReplicaCount, masterServices.getCatalogTracker());
} finally {
if (htable != null) {
htable.close();
}
}
}
/** /**
* Removes from hdfs the families that are not longer present in the new table descriptor. * Removes from hdfs the families that are not longer present in the new table descriptor.
*/ */

View File

@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.master;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import java.io.IOException; import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -176,44 +178,43 @@ public class TestMasterOperationsForRegionReplicas {
// TEST_UTIL.getMiniHBaseCluster().startRegionServer(); // TEST_UTIL.getMiniHBaseCluster().startRegionServer();
// } // }
//TODO: HBASE-10361 patch should uncomment the test below //check on alter table
// //check on alter table admin.disableTable(table);
// admin.disableTable(table); assert(admin.isTableDisabled(table));
// assert(admin.isTableDisabled(table)); //increase the replica
// //increase the replica desc.setRegionReplication(numReplica + 1);
// desc.setRegionReplication(numReplica + 1); admin.modifyTable(table, desc);
// admin.modifyTable(table, desc); admin.enableTable(table);
// admin.enableTable(table); assert(admin.isTableEnabled(table));
// assert(admin.isTableEnabled(table)); List<HRegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
// List<HRegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster() .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
// .getAssignmentManager().getRegionStates().getRegionsOfTable(table); assert(regions.size() == numRegions * (numReplica + 1));
// assert(regions.size() == numRegions * (numReplica + 1));
// //decrease the replica(earlier, table was modified to have a replica count of numReplica + 1)
// //decrease the replica(earlier, table was modified to have a replica count of numReplica + 1) admin.disableTable(table);
// admin.disableTable(table); desc.setRegionReplication(numReplica);
// desc.setRegionReplication(numReplica); admin.modifyTable(table, desc);
// admin.modifyTable(table, desc); admin.enableTable(table);
// admin.enableTable(table); assert(admin.isTableEnabled(table));
// assert(admin.isTableEnabled(table)); regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
// regions = TEST_UTIL.getMiniHBaseCluster().getMaster() .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
// .getAssignmentManager().getRegionStates().getRegionsOfTable(table); assert(regions.size() == numRegions * numReplica);
// assert(regions.size() == numRegions * numReplica); //also make sure the meta table has the replica locations removed
// //also make sure the meta table has the replica locations removed hris = MetaReader.getTableRegions(ct, table);
// hris = MetaReader.getTableRegions(ct, table); assert(hris.size() == numRegions * numReplica);
// assert(hris.size() == numRegions * numReplica); //just check that the number of default replica regions in the meta table are the same
// //just check that the number of default replica regions in the meta table are the same //as the number of regions the table was created with, and the count of the
// //as the number of regions the table was created with, and the count of the //replicas is numReplica for each region
// //replicas is numReplica for each region Map<HRegionInfo, Integer> defaultReplicas = new HashMap<HRegionInfo, Integer>();
// Map<HRegionInfo, Integer> defaultReplicas = new HashMap<HRegionInfo, Integer>(); for (HRegionInfo hri : hris) {
// for (HRegionInfo hri : hris) { Integer i;
// Integer i; HRegionInfo regionReplica0 = RegionReplicaUtil.getRegionInfoForDefaultReplica(hri);
// HRegionInfo regionReplica0 = hri.getRegionInfoForReplica(0); defaultReplicas.put(regionReplica0,
// defaultReplicas.put(regionReplica0, (i = defaultReplicas.get(regionReplica0)) == null ? 1 : i + 1);
// (i = defaultReplicas.get(regionReplica0)) == null ? 1 : i + 1); }
// } assert(defaultReplicas.size() == numRegions);
// assert(defaultReplicas.size() == numRegions); Collection<Integer> counts = new HashSet<Integer>(defaultReplicas.values());
// Collection<Integer> counts = new HashSet<Integer>(defaultReplicas.values()); assert(counts.size() == 1 && counts.contains(new Integer(numReplica)));
// assert(counts.size() == 1 && counts.contains(new Integer(numReplica)));
} finally { } finally {
admin.disableTable(table); admin.disableTable(table);
admin.deleteTable(table); admin.deleteTable(table);

View File

@ -505,6 +505,7 @@ module Hbase
htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE] htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE]
htd.setAsyncLogFlush(JBoolean.valueOf(arg.delete(DEFERRED_LOG_FLUSH))) if arg[DEFERRED_LOG_FLUSH] htd.setAsyncLogFlush(JBoolean.valueOf(arg.delete(DEFERRED_LOG_FLUSH))) if arg[DEFERRED_LOG_FLUSH]
htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY] htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY]
htd.setRegionReplication(JInteger.valueOf(arg.delete(REGION_REPLICATION))) if arg[REGION_REPLICATION]
set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA] set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA]
set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION] set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]

View File

@ -74,6 +74,10 @@ You can also remove a table-scope attribute:
hbase> alter 't1', METHOD => 'table_att_unset', NAME => 'coprocessor$1' hbase> alter 't1', METHOD => 'table_att_unset', NAME => 'coprocessor$1'
You can also set REGION_REPLICATION:
hbase> alter 't1', {REGION_REPLICATION => 2}
There could be more than one alteration in one command: There could be more than one alteration in one command:
hbase> alter 't1', { NAME => 'f1', VERSIONS => 3 }, hbase> alter 't1', { NAME => 'f1', VERSIONS => 3 },