HBASE-15607 Remove PB references from Admin for 2.0 (Ram)

This commit is contained in:
Ramkrishna 2016-05-03 10:51:46 +05:30
parent d77972ff16
commit c06a976a98
45 changed files with 611 additions and 270 deletions

View File

@ -41,9 +41,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.security.SecurityCapability;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaRetriever;
import org.apache.hadoop.hbase.quotas.QuotaSettings;
@ -1153,7 +1150,7 @@ public interface Admin extends Abortable, Closeable {
* @return the current compaction state
* @throws IOException if a remote or network exception occurs
*/
AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState(final TableName tableName)
CompactionState getCompactionState(final TableName tableName)
throws IOException;
/**
@ -1164,7 +1161,7 @@ public interface Admin extends Abortable, Closeable {
* @return the current compaction state
* @throws IOException if a remote or network exception occurs
*/
AdminProtos.GetRegionInfoResponse.CompactionState getCompactionStateForRegion(
CompactionState getCompactionStateForRegion(
final byte[] regionName) throws IOException;
/**
@ -1244,7 +1241,7 @@ public interface Admin extends Abortable, Closeable {
*/
void snapshot(final String snapshotName,
final TableName tableName,
HBaseProtos.SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
SnapshotType type) throws IOException, SnapshotCreationException,
IllegalArgumentException;
/**
@ -1265,7 +1262,7 @@ public interface Admin extends Abortable, Closeable {
* @throws SnapshotCreationException if snapshot failed to be taken
* @throws IllegalArgumentException if the snapshot request is formatted incorrectly
*/
void snapshot(HBaseProtos.SnapshotDescription snapshot)
void snapshot(SnapshotDescription snapshot)
throws IOException, SnapshotCreationException, IllegalArgumentException;
/**
@ -1273,12 +1270,11 @@ public interface Admin extends Abortable, Closeable {
* single snapshot should be taken at a time, or results may be undefined.
*
* @param snapshot snapshot to take
* @return response from the server indicating the max time to wait for the snapshot
* @throws IOException if the snapshot did not succeed or we lose contact with the master.
* @throws SnapshotCreationException if snapshot creation failed
* @throws IllegalArgumentException if the snapshot request is formatted incorrectly
*/
MasterProtos.SnapshotResponse takeSnapshotAsync(HBaseProtos.SnapshotDescription snapshot)
void takeSnapshotAsync(SnapshotDescription snapshot)
throws IOException, SnapshotCreationException;
/**
@ -1297,7 +1293,7 @@ public interface Admin extends Abortable, Closeable {
* @throws org.apache.hadoop.hbase.snapshot.UnknownSnapshotException if the requested snapshot is
* unknown
*/
boolean isSnapshotFinished(final HBaseProtos.SnapshotDescription snapshot)
boolean isSnapshotFinished(final SnapshotDescription snapshot)
throws IOException, HBaseSnapshotException, UnknownSnapshotException;
/**
@ -1470,7 +1466,7 @@ public interface Admin extends Abortable, Closeable {
* @return a list of snapshot descriptors for completed snapshots
* @throws IOException if a network error occurs
*/
List<HBaseProtos.SnapshotDescription> listSnapshots() throws IOException;
List<SnapshotDescription> listSnapshots() throws IOException;
/**
* List all the completed snapshots matching the given regular expression.
@ -1479,7 +1475,7 @@ public interface Admin extends Abortable, Closeable {
* @return - returns a List of SnapshotDescription
* @throws IOException if a remote or network exception occurs
*/
List<HBaseProtos.SnapshotDescription> listSnapshots(String regex) throws IOException;
List<SnapshotDescription> listSnapshots(String regex) throws IOException;
/**
* List all the completed snapshots matching the given pattern.
@ -1488,7 +1484,7 @@ public interface Admin extends Abortable, Closeable {
* @return - returns a List of SnapshotDescription
* @throws IOException if a remote or network exception occurs
*/
List<HBaseProtos.SnapshotDescription> listSnapshots(Pattern pattern) throws IOException;
List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException;
/**
* List all the completed snapshots matching the given table name regular expression and snapshot
@ -1498,7 +1494,7 @@ public interface Admin extends Abortable, Closeable {
* @return - returns a List of completed SnapshotDescription
* @throws IOException if a remote or network exception occurs
*/
List<HBaseProtos.SnapshotDescription> listTableSnapshots(String tableNameRegex,
List<SnapshotDescription> listTableSnapshots(String tableNameRegex,
String snapshotNameRegex) throws IOException;
/**
@ -1509,7 +1505,7 @@ public interface Admin extends Abortable, Closeable {
* @return - returns a List of completed SnapshotDescription
* @throws IOException if a remote or network exception occurs
*/
List<HBaseProtos.SnapshotDescription> listTableSnapshots(Pattern tableNamePattern,
List<SnapshotDescription> listTableSnapshots(Pattern tableNamePattern,
Pattern snapshotNamePattern) throws IOException;
/**
@ -1651,7 +1647,7 @@ public interface Admin extends Abortable, Closeable {
* Compact a table. Asynchronous operation.
*
* @param tableName table to compact
* @param compactType {@link org.apache.hadoop.hbase.client.Admin.CompactType}
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
* @throws IOException
* @throws InterruptedException
*/
@ -1663,7 +1659,7 @@ public interface Admin extends Abortable, Closeable {
*
* @param tableName table to compact
* @param columnFamily column family within a table
* @param compactType {@link org.apache.hadoop.hbase.client.Admin.CompactType}
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
* @throws IOException if not a mob column family or if a remote or network exception occurs
* @throws InterruptedException
*/
@ -1674,7 +1670,7 @@ public interface Admin extends Abortable, Closeable {
* Major compact a table. Asynchronous operation.
*
* @param tableName table to compact
* @param compactType {@link org.apache.hadoop.hbase.client.Admin.CompactType}
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
* @throws IOException
* @throws InterruptedException
*/
@ -1686,7 +1682,7 @@ public interface Admin extends Abortable, Closeable {
*
* @param tableName table to compact
* @param columnFamily column family within a table
* @param compactType {@link org.apache.hadoop.hbase.client.Admin.CompactType}
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
* @throws IOException if not a mob column family or if a remote or network exception occurs
* @throws InterruptedException
*/
@ -1697,11 +1693,11 @@ public interface Admin extends Abortable, Closeable {
* Get the current compaction state of a table. It could be in a compaction, or none.
*
* @param tableName table to examine
* @param compactType {@link org.apache.hadoop.hbase.client.Admin.CompactType}
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
* @return the current compaction state
* @throws IOException if a remote or network exception occurs
*/
AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState(final TableName tableName,
CompactionState getCompactionState(final TableName tableName,
CompactType compactType) throws IOException;
/**
@ -1741,27 +1737,4 @@ public interface Admin extends Abortable, Closeable {
* and rollback the switch state to be original state before you change switch
* */
void releaseSplitOrMergeLockAndRollback() throws IOException;
/**
* Currently, there are only two compact types:
* {@code NORMAL} means do store files compaction;
* {@code MOB} means do mob files compaction.
* */
@InterfaceAudience.Public
@InterfaceStability.Unstable
public enum CompactType {
NORMAL (0),
MOB (1);
CompactType(int value) {}
}
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum MasterSwitchType {
SPLIT,
MERGE
}
}

View File

@ -0,0 +1,35 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* Currently, there are only two compact types:
* {@code NORMAL} means do store files compaction;
* {@code MOB} means do mob files compaction.
* */
@InterfaceAudience.Public
@InterfaceStability.Unstable
public enum CompactType {
NORMAL (0),
MOB (1);
CompactType(int value) {}
}

View File

@ -0,0 +1,29 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* POJO representing the compaction state
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum CompactionState {
NONE, MINOR, MAJOR, MAJOR_AND_MINOR;
}

View File

@ -73,6 +73,7 @@ import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
@ -80,7 +81,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionReque
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
@ -2265,7 +2264,10 @@ public class HBaseAdmin implements Admin {
PayloadCarryingRpcController controller = rpcControllerFactory.newController();
// TODO: this does not do retries, it should. Set priority and timeout in controller
GetRegionInfoResponse response = admin.getRegionInfo(controller, request);
return response.getCompactionState();
if (response.getCompactionState() != null) {
return ProtobufUtil.createCompactionState(response.getCompactionState());
}
return null;
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
@ -2275,33 +2277,30 @@ public class HBaseAdmin implements Admin {
public void snapshot(final String snapshotName,
final TableName tableName) throws IOException,
SnapshotCreationException, IllegalArgumentException {
snapshot(snapshotName, tableName, SnapshotDescription.Type.FLUSH);
snapshot(snapshotName, tableName, SnapshotType.FLUSH);
}
@Override
public void snapshot(final byte[] snapshotName, final TableName tableName)
throws IOException, SnapshotCreationException, IllegalArgumentException {
snapshot(Bytes.toString(snapshotName), tableName, SnapshotDescription.Type.FLUSH);
snapshot(Bytes.toString(snapshotName), tableName, SnapshotType.FLUSH);
}
@Override
public void snapshot(final String snapshotName, final TableName tableName,
SnapshotDescription.Type type)
SnapshotType type)
throws IOException, SnapshotCreationException, IllegalArgumentException {
SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
builder.setTable(tableName.getNameAsString());
builder.setName(snapshotName);
builder.setType(type);
snapshot(builder.build());
snapshot(new SnapshotDescription(snapshotName, tableName.getNameAsString(), type));
}
@Override
public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException,
IllegalArgumentException {
public void snapshot(SnapshotDescription snapshotDesc)
throws IOException, SnapshotCreationException, IllegalArgumentException {
// actually take the snapshot
SnapshotResponse response = takeSnapshotAsync(snapshot);
final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
.build();
HBaseProtos.SnapshotDescription snapshot = createHBaseProtosSnapshotDesc(snapshotDesc);
SnapshotResponse response = asyncSnapshot(snapshot);
final IsSnapshotDoneRequest request =
IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build();
IsSnapshotDoneResponse done = null;
long start = EnvironmentEdgeManager.currentTime();
long max = response.getExpectedTimeout();
@ -2339,8 +2338,37 @@ public class HBaseAdmin implements Admin {
}
@Override
public SnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException,
public void takeSnapshotAsync(SnapshotDescription snapshotDesc) throws IOException,
SnapshotCreationException {
HBaseProtos.SnapshotDescription snapshot = createHBaseProtosSnapshotDesc(snapshotDesc);
asyncSnapshot(snapshot);
}
private HBaseProtos.SnapshotDescription
createHBaseProtosSnapshotDesc(SnapshotDescription snapshotDesc) {
HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder();
if (snapshotDesc.getTable() != null) {
builder.setTable(snapshotDesc.getTable());
}
if (snapshotDesc.getName() != null) {
builder.setName(snapshotDesc.getName());
}
if (snapshotDesc.getOwner() != null) {
builder.setOwner(snapshotDesc.getOwner());
}
if (snapshotDesc.getCreationTime() != -1) {
builder.setCreationTime(snapshotDesc.getCreationTime());
}
if (snapshotDesc.getVersion() != -1) {
builder.setVersion(snapshotDesc.getVersion());
}
builder.setType(ProtobufUtil.createProtosSnapShotDescType(snapshotDesc.getType()));
HBaseProtos.SnapshotDescription snapshot = builder.build();
return snapshot;
}
private SnapshotResponse asyncSnapshot(HBaseProtos.SnapshotDescription snapshot)
throws IOException {
ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot)
.build();
@ -2356,9 +2384,9 @@ public class HBaseAdmin implements Admin {
}
@Override
public boolean isSnapshotFinished(final SnapshotDescription snapshot)
public boolean isSnapshotFinished(final SnapshotDescription snapshotDesc)
throws IOException, HBaseSnapshotException, UnknownSnapshotException {
final HBaseProtos.SnapshotDescription snapshot = createHBaseProtosSnapshotDesc(snapshotDesc);
return executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
@Override
public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
@ -2643,7 +2671,7 @@ public class HBaseAdmin implements Admin {
private Future<Void> internalRestoreSnapshotAsync(
final String snapshotName,
final TableName tableName) throws IOException, RestoreSnapshotException {
final SnapshotDescription snapshot = SnapshotDescription.newBuilder()
final HBaseProtos.SnapshotDescription snapshot = HBaseProtos.SnapshotDescription.newBuilder()
.setName(snapshotName).setTable(tableName.getNameAsString()).build();
// actually restore the snapshot
@ -2671,7 +2699,7 @@ public class HBaseAdmin implements Admin {
private static class RestoreSnapshotFuture extends TableFuture<Void> {
public RestoreSnapshotFuture(
final HBaseAdmin admin,
final SnapshotDescription snapshot,
final HBaseProtos.SnapshotDescription snapshot,
final TableName tableName,
final RestoreSnapshotResponse response) {
super(admin, tableName,
@ -2702,8 +2730,16 @@ public class HBaseAdmin implements Admin {
public List<SnapshotDescription> call(int callTimeout) throws ServiceException {
PayloadCarryingRpcController controller = rpcControllerFactory.newController();
controller.setCallTimeout(callTimeout);
return master.getCompletedSnapshots(controller,
GetCompletedSnapshotsRequest.newBuilder().build()).getSnapshotsList();
List<HBaseProtos.SnapshotDescription> snapshotsList = master
.getCompletedSnapshots(controller, GetCompletedSnapshotsRequest.newBuilder().build())
.getSnapshotsList();
List<SnapshotDescription> result = new ArrayList<SnapshotDescription>(snapshotsList.size());
for (HBaseProtos.SnapshotDescription snapshot : snapshotsList) {
result.add(new SnapshotDescription(snapshot.getName(), snapshot.getTable(),
ProtobufUtil.createSnapshotType(snapshot.getType()), snapshot.getOwner(),
snapshot.getCreationTime(), snapshot.getVersion()));
}
return result;
}
});
}
@ -2765,7 +2801,9 @@ public class HBaseAdmin implements Admin {
controller.setCallTimeout(callTimeout);
master.deleteSnapshot(controller,
DeleteSnapshotRequest.newBuilder().
setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build()
setSnapshot(
HBaseProtos.SnapshotDescription.newBuilder().setName(snapshotName).build())
.build()
);
return null;
}
@ -2798,7 +2836,7 @@ public class HBaseAdmin implements Admin {
PayloadCarryingRpcController controller = rpcControllerFactory.newController();
controller.setCallTimeout(callTimeout);
this.master.deleteSnapshot(controller, DeleteSnapshotRequest.newBuilder()
.setSnapshot(snapshot).build());
.setSnapshot(createHBaseProtosSnapshotDesc(snapshot)).build());
return null;
}
});
@ -3002,7 +3040,8 @@ public class HBaseAdmin implements Admin {
@Override
public CompactionState getCompactionState(TableName tableName,
CompactType compactType) throws IOException {
CompactionState state = CompactionState.NONE;
AdminProtos.GetRegionInfoResponse.CompactionState state =
AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
checkTableExists(tableName);
PayloadCarryingRpcController controller = rpcControllerFactory.newController();
switch (compactType) {
@ -3044,16 +3083,16 @@ public class HBaseAdmin implements Admin {
case MAJOR_AND_MINOR:
return CompactionState.MAJOR_AND_MINOR;
case MAJOR:
if (state == CompactionState.MINOR) {
if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MINOR) {
return CompactionState.MAJOR_AND_MINOR;
}
state = CompactionState.MAJOR;
state = AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR;
break;
case MINOR:
if (state == CompactionState.MAJOR) {
if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR) {
return CompactionState.MAJOR_AND_MINOR;
}
state = CompactionState.MINOR;
state = AdminProtos.GetRegionInfoResponse.CompactionState.MINOR;
break;
case NONE:
default: // nothing, continue
@ -3084,7 +3123,10 @@ public class HBaseAdmin implements Admin {
}
break;
}
return state;
if(state != null) {
return ProtobufUtil.createCompactionState(state);
}
return null;
}
/**

View File

@ -0,0 +1,29 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* Represents the master switch type
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum MasterSwitchType {
SPLIT,
MERGE
}

View File

@ -0,0 +1,84 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* The POJO equivalent of HBaseProtos.SnapshotDescription
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class SnapshotDescription {
private String name;
private String table;
private SnapshotType snapShotType = SnapshotType.DISABLED;
private String owner;
private long creationTime = -1L;
private int version = -1;
public SnapshotDescription(String name) {
this(name, null);
}
public SnapshotDescription(String name, String table) {
this(name, table, SnapshotType.DISABLED, null);
}
public SnapshotDescription(String name, String table, SnapshotType type) {
this(name, table, type, null);
}
public SnapshotDescription(String name, String table, SnapshotType type, String owner) {
this(name, table, type, owner, -1, -1);
}
public SnapshotDescription(String name, String table, SnapshotType type, String owner,
long creationTime, int version) {
this.name = name;
this.table = table;
this.snapShotType = type;
this.owner = owner;
this.creationTime = creationTime;
this.version = version;
}
public String getName() {
return this.name;
}
public String getTable() {
return this.table;
}
public SnapshotType getType() {
return this.snapShotType;
}
public String getOwner() {
return this.owner;
}
public long getCreationTime() {
return this.creationTime;
}
public int getVersion() {
return this.version;
}
}

View File

@ -0,0 +1,29 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* POJO representing the snapshot type
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum SnapshotType {
DISABLED, FLUSH, SKIPFLUSH;
}

View File

@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.client.Consistency;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
@ -67,6 +68,8 @@ import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.client.security.SecurityCapability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@ -3397,4 +3400,76 @@ public final class ProtobufUtil {
}
return htd;
}
/**
* Creates {@link CompactionState} from
* {@link org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState}
* state
* @param state the protobuf CompactionState
* @return CompactionState
*/
public static CompactionState createCompactionState(GetRegionInfoResponse.CompactionState state) {
return CompactionState.valueOf(state.toString());
}
/**
* Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type}
* from {@link SnapshotType}
* @param type the SnapshotDescription type
* @return the protobuf SnapshotDescription type
*/
public static HBaseProtos.SnapshotDescription.Type
createProtosSnapShotDescType(SnapshotType type) {
return HBaseProtos.SnapshotDescription.Type.valueOf(type.name());
}
/**
* Creates {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type}
* from the type of SnapshotDescription string
* @param snapshotDesc string representing the snapshot description type
* @return the protobuf SnapshotDescription type
*/
public static HBaseProtos.SnapshotDescription.Type
createProtosSnapShotDescType(String snapshotDesc) {
return HBaseProtos.SnapshotDescription.Type.valueOf(snapshotDesc.toUpperCase());
}
/**
* Creates {@link SnapshotType} from the type of
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription}
* @param type the snapshot description type
* @return the protobuf SnapshotDescription type
*/
public static SnapshotType createSnapshotType(HBaseProtos.SnapshotDescription.Type type) {
return SnapshotType.valueOf(type.toString());
}
/**
* Convert from {@link SnapshotDescription} to
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription}
* @param snapshotDesc the POJO SnapshotDescription
* @return the protobuf SnapshotDescription
*/
public static HBaseProtos.SnapshotDescription
createHBaseProtosSnapshotDesc(SnapshotDescription snapshotDesc) {
HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder();
if (snapshotDesc.getTable() != null) {
builder.setTable(snapshotDesc.getTable());
}
if (snapshotDesc.getName() != null) {
builder.setName(snapshotDesc.getName());
}
if (snapshotDesc.getOwner() != null) {
builder.setOwner(snapshotDesc.getOwner());
}
if (snapshotDesc.getCreationTime() != -1L) {
builder.setCreationTime(snapshotDesc.getCreationTime());
}
if (snapshotDesc.getVersion() != -1) {
builder.setVersion(snapshotDesc.getVersion());
}
builder.setType(ProtobufUtil.createProtosSnapShotDescType(snapshotDesc.getType()));
HBaseProtos.SnapshotDescription snapshot = builder.build();
return snapshot;
}
}

View File

@ -31,12 +31,12 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Action;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec;
@ -1701,11 +1701,11 @@ public final class RequestConverter {
/**
* Creates a protocol buffer IsSplitOrMergeEnabledRequest
*
* @param switchType see {@link org.apache.hadoop.hbase.client.Admin.MasterSwitchType}
* @param switchType see {@link org.apache.hadoop.hbase.client.MasterSwitchType}
* @return a IsSplitOrMergeEnabledRequest
*/
public static IsSplitOrMergeEnabledRequest buildIsSplitOrMergeEnabledRequest(
Admin.MasterSwitchType switchType) {
MasterSwitchType switchType) {
IsSplitOrMergeEnabledRequest.Builder builder = IsSplitOrMergeEnabledRequest.newBuilder();
builder.setSwitchType(convert(switchType));
return builder.build();
@ -1723,23 +1723,23 @@ public final class RequestConverter {
*
* @param enabled switch is enabled or not
* @param synchronous set switch sync?
* @param switchTypes see {@link org.apache.hadoop.hbase.client.Admin.MasterSwitchType}, it is
* @param switchTypes see {@link org.apache.hadoop.hbase.client.MasterSwitchType}, it is
* a list.
* @return a SetSplitOrMergeEnabledRequest
*/
public static SetSplitOrMergeEnabledRequest buildSetSplitOrMergeEnabledRequest(boolean enabled,
boolean synchronous, boolean skipLock, Admin.MasterSwitchType... switchTypes) {
boolean synchronous, boolean skipLock, MasterSwitchType... switchTypes) {
SetSplitOrMergeEnabledRequest.Builder builder = SetSplitOrMergeEnabledRequest.newBuilder();
builder.setEnabled(enabled);
builder.setSynchronous(synchronous);
builder.setSkipLock(skipLock);
for (Admin.MasterSwitchType switchType : switchTypes) {
for (MasterSwitchType switchType : switchTypes) {
builder.addSwitchTypes(convert(switchType));
}
return builder.build();
}
private static MasterProtos.MasterSwitchType convert(Admin.MasterSwitchType switchType) {
private static MasterProtos.MasterSwitchType convert(MasterSwitchType switchType) {
switch (switchType) {
case SPLIT:
return MasterProtos.MasterSwitchType.SPLIT;

View File

@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
@ -141,18 +140,17 @@ public class TestSnapshotFromAdmin {
Mockito.when(mockConnection.getRpcRetryingCallerFactory()).thenReturn(callerFactory);
Mockito.when(mockConnection.getRpcControllerFactory()).thenReturn(controllerFactory);
Admin admin = new HBaseAdmin(mockConnection);
SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
// check that invalid snapshot names fail
failSnapshotStart(admin, builder.setName(HConstants.SNAPSHOT_DIR_NAME).build());
failSnapshotStart(admin, builder.setName("-snapshot").build());
failSnapshotStart(admin, builder.setName("snapshot fails").build());
failSnapshotStart(admin, builder.setName("snap$hot").build());
failSnapshotStart(admin, builder.setName("snap:hot").build());
failSnapshotStart(admin, new SnapshotDescription(HConstants.SNAPSHOT_DIR_NAME));
failSnapshotStart(admin, new SnapshotDescription("-snapshot"));
failSnapshotStart(admin, new SnapshotDescription("snapshot fails"));
failSnapshotStart(admin, new SnapshotDescription("snap$hot"));
failSnapshotStart(admin, new SnapshotDescription("snap:hot"));
// check the table name also get verified
failSnapshotStart(admin, builder.setName("snapshot").setTable(".table").build());
failSnapshotStart(admin, builder.setName("snapshot").setTable("-table").build());
failSnapshotStart(admin, builder.setName("snapshot").setTable("table fails").build());
failSnapshotStart(admin, builder.setName("snapshot").setTable("tab%le").build());
failSnapshotStart(admin, new SnapshotDescription("snapshot", ".table"));
failSnapshotStart(admin, new SnapshotDescription("snapshot", "-table"));
failSnapshotStart(admin, new SnapshotDescription("snapshot", "table fails"));
failSnapshotStart(admin, new SnapshotDescription("snapshot", "tab%le"));
// mock the master connection
MasterKeepAliveConnection master = Mockito.mock(MasterKeepAliveConnection.class);
@ -167,10 +165,11 @@ public class TestSnapshotFromAdmin {
Mockito.any(IsSnapshotDoneRequest.class))).thenReturn(doneResponse);
// make sure that we can use valid names
admin.snapshot(builder.setName("snapshot").setTable("table").build());
admin.snapshot(new SnapshotDescription("snapshot", "table"));
}
private void failSnapshotStart(Admin admin, SnapshotDescription snapshot) throws IOException {
private void failSnapshotStart(Admin admin, SnapshotDescription snapshot)
throws IOException {
try {
admin.snapshot(snapshot);
fail("Snapshot should not have succeed with name:" + snapshot.getName());

View File

@ -22,6 +22,7 @@ import org.apache.commons.lang.math.RandomUtils;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.CompactType;
/**
* Action that queues a table compaction.
@ -56,9 +57,9 @@ public class CompactMobAction extends Action {
LOG.info("Performing action: Compact mob of table " + tableName + ", major=" + major);
try {
if (major) {
admin.majorCompact(tableName, Admin.CompactType.MOB);
admin.majorCompact(tableName, CompactType.MOB);
} else {
admin.compact(tableName, Admin.CompactType.MOB);
admin.compact(tableName, CompactType.MOB);
}
} catch (Exception ex) {
LOG.warn("Mob Compaction failed, might be caused by other chaos: " + ex.getMessage());

View File

@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
@ -827,13 +827,13 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService
@Override
public boolean preSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException {
final boolean newValue, final MasterSwitchType switchType) throws IOException {
return false;
}
@Override
public void postSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException {
final boolean newValue, final MasterSwitchType switchType) throws IOException {
}
@Override

View File

@ -39,13 +39,13 @@ org.apache.hadoop.hbase.ServerLoad;
org.apache.hadoop.hbase.ServerName;
org.apache.hadoop.hbase.TableName;
org.apache.hadoop.hbase.client.Admin;
org.apache.hadoop.hbase.client.SnapshotDescription;
org.apache.hadoop.hbase.master.AssignmentManager;
org.apache.hadoop.hbase.master.DeadServer;
org.apache.hadoop.hbase.master.HMaster;
org.apache.hadoop.hbase.master.RegionState;
org.apache.hadoop.hbase.master.ServerManager;
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
org.apache.hadoop.hbase.quotas.QuotaUtil;
org.apache.hadoop.hbase.security.access.AccessControlLists;
org.apache.hadoop.hbase.security.visibility.VisibilityConstants;

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
@ -450,7 +450,7 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver
@Override
public boolean preSetSplitOrMergeEnabled(ObserverContext<MasterCoprocessorEnvironment> ctx,
boolean newValue,
Admin.MasterSwitchType switchType)
MasterSwitchType switchType)
throws IOException {
return false;
}
@ -458,7 +458,7 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver
@Override
public void postSetSplitOrMergeEnabled(ObserverContext<MasterCoprocessorEnvironment> ctx,
boolean newValue,
Admin.MasterSwitchType switchType)
MasterSwitchType switchType)
throws IOException {
}

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
@ -442,13 +442,13 @@ public class BaseMasterObserver implements MasterObserver {
@Override
public boolean preSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException {
final boolean newValue, final MasterSwitchType switchType) throws IOException {
return false;
}
@Override
public void postSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException {
final boolean newValue, final MasterSwitchType switchType) throws IOException {
}
@Override

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
@ -807,7 +807,7 @@ public interface MasterObserver extends Coprocessor {
* @param switchType type of switch
*/
boolean preSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException;
final boolean newValue, final MasterSwitchType switchType) throws IOException;
/**
* Called after setting split / merge switch
@ -816,7 +816,7 @@ public interface MasterObserver extends Coprocessor {
* @param switchType type of switch
*/
void postSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException;
final boolean newValue, final MasterSwitchType switchType) throws IOException;
/**
* Called prior to modifying the flag used to enable/disable region balancing.

View File

@ -62,7 +62,7 @@ import org.apache.hadoop.hbase.RegionStateListener;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableState;
@ -2364,7 +2364,7 @@ public class AssignmentManager {
}
if (!((HMaster)server).getSplitOrMergeTracker().isSplitOrMergeEnabled(
Admin.MasterSwitchType.SPLIT)) {
MasterSwitchType.SPLIT)) {
return "split switch is off!";
}
@ -2527,7 +2527,7 @@ public class AssignmentManager {
}
if (!((HMaster)server).getSplitOrMergeTracker().isSplitOrMergeEnabled(
Admin.MasterSwitchType.MERGE)) {
MasterSwitchType.MERGE)) {
return "merge switch is off!";
}
// Just return in case of retrying

View File

@ -83,7 +83,7 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableState;
@ -2821,10 +2821,10 @@ public class HMaster extends HRegionServer implements MasterServices {
/**
* Queries the state of the {@link SplitOrMergeTracker}. If it is not initialized,
* false is returned. If switchType is illegal, false will return.
* @param switchType see {@link org.apache.hadoop.hbase.client.Admin.MasterSwitchType}
* @param switchType see {@link org.apache.hadoop.hbase.client.MasterSwitchType}
* @return The state of the switch
*/
public boolean isSplitOrMergeEnabled(Admin.MasterSwitchType switchType) {
public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) {
if (null == splitOrMergeTracker) {
return false;
}

View File

@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
@ -779,7 +779,7 @@ public class MasterCoprocessorHost
}
public boolean preSetSplitOrMergeEnabled(final boolean newValue,
final Admin.MasterSwitchType switchType) throws IOException {
final MasterSwitchType switchType) throws IOException {
return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
@ -790,7 +790,7 @@ public class MasterCoprocessorHost
}
public void postSetSplitOrMergeEnabled(final boolean newValue,
final Admin.MasterSwitchType switchType) throws IOException {
final MasterSwitchType switchType) throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)

View File

@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@ -1500,8 +1500,8 @@ public class MasterRpcServices extends RSRpcServices
if (!master.getSplitOrMergeTracker().lock(skipLock)) {
throw new DoNotRetryIOException("can't set splitOrMerge switch due to lock");
}
for (MasterSwitchType masterSwitchType : request.getSwitchTypesList()) {
Admin.MasterSwitchType switchType = convert(masterSwitchType);
for (MasterProtos.MasterSwitchType masterSwitchType : request.getSwitchTypesList()) {
MasterSwitchType switchType = convert(masterSwitchType);
boolean oldValue = master.isSplitOrMergeEnabled(switchType);
response.addPrevValue(oldValue);
boolean bypass = false;
@ -1619,12 +1619,12 @@ public class MasterRpcServices extends RSRpcServices
return response.build();
}
private Admin.MasterSwitchType convert(MasterSwitchType switchType) {
private MasterSwitchType convert(MasterProtos.MasterSwitchType switchType) {
switch (switchType) {
case SPLIT:
return Admin.MasterSwitchType.SPLIT;
return MasterSwitchType.SPLIT;
case MERGE:
return Admin.MasterSwitchType.MERGE;
return MasterSwitchType.MERGE;
default:
break;
}

View File

@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin.MasterSwitchType;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.master.MasterRpcServices;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;

View File

@ -60,12 +60,12 @@ import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagRewriteCell;
import org.apache.hadoop.hbase.TagUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Query;
@ -1262,14 +1262,14 @@ public class AccessController extends BaseMasterAndRegionObserver
@Override
public boolean preSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException {
final boolean newValue, final MasterSwitchType switchType) throws IOException {
requirePermission("setSplitOrMergeEnabled", Action.ADMIN);
return false;
}
@Override
public void postSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException {
final boolean newValue, final MasterSwitchType switchType) throws IOException {
}
@Override

View File

@ -51,11 +51,11 @@ import org.apache.hadoop.hbase.TagRewriteCell;
import org.apache.hadoop.hbase.TagType;
import org.apache.hadoop.hbase.TagUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@ -310,13 +310,13 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements
@Override
public boolean preSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException {
final boolean newValue, final MasterSwitchType switchType) throws IOException {
return false;
}
@Override
public void postSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException {
final boolean newValue, final MasterSwitchType switchType) throws IOException {
}
@Override

View File

@ -23,6 +23,8 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import java.util.Arrays;
@ -66,10 +68,10 @@ public class CreateSnapshot extends AbstractHBaseTool {
admin = connection.getAdmin();
HBaseProtos.SnapshotDescription.Type type = HBaseProtos.SnapshotDescription.Type.FLUSH;
if (snapshotType != null) {
type = HBaseProtos.SnapshotDescription.Type.valueOf(snapshotName.toUpperCase());
type = ProtobufUtil.createProtosSnapShotDescType(snapshotName);
}
admin.snapshot(snapshotName, TableName.valueOf(tableName), type);
admin.snapshot(new SnapshotDescription(snapshotName, tableName,
ProtobufUtil.createSnapshotType(type)));
} catch (Exception e) {
return -1;
} finally {

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
@ -47,7 +48,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.WALLink;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.util.FSUtils;
@ -126,14 +128,15 @@ public final class SnapshotInfo extends Configured implements Tool {
private AtomicLong hfilesMobSize = new AtomicLong();
private AtomicLong logSize = new AtomicLong();
private final SnapshotDescription snapshot;
private final HBaseProtos.SnapshotDescription snapshot;
private final TableName snapshotTable;
private final Configuration conf;
private final FileSystem fs;
SnapshotStats(final Configuration conf, final FileSystem fs, final SnapshotDescription snapshot)
SnapshotStats(final Configuration conf, final FileSystem fs,
final SnapshotDescription snapshot)
{
this.snapshot = snapshot;
this.snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot);
this.snapshotTable = TableName.valueOf(snapshot.getTable());
this.conf = conf;
this.fs = fs;
@ -141,7 +144,9 @@ public final class SnapshotInfo extends Configured implements Tool {
/** @return the snapshot descriptor */
public SnapshotDescription getSnapshotDescription() {
return this.snapshot;
return new SnapshotDescription(this.snapshot.getName(), this.snapshot.getTable(),
ProtobufUtil.createSnapshotType(this.snapshot.getType()), this.snapshot.getOwner(),
this.snapshot.getCreationTime(), this.snapshot.getVersion());
}
/** @return true if the snapshot is corrupted */
@ -371,7 +376,8 @@ public final class SnapshotInfo extends Configured implements Tool {
return false;
}
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
HBaseProtos.SnapshotDescription snapshotDesc =
SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
snapshotManifest = SnapshotManifest.open(getConf(), fs, snapshotDir, snapshotDesc);
return true;
}
@ -380,7 +386,7 @@ public final class SnapshotInfo extends Configured implements Tool {
* Dump the {@link SnapshotDescription}
*/
private void printInfo() {
SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
HBaseProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
System.out.println("Snapshot Info");
System.out.println("----------------------------------------");
@ -413,9 +419,12 @@ public final class SnapshotInfo extends Configured implements Tool {
}
// Collect information about hfiles and logs in the snapshot
final SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
final HBaseProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
final String table = snapshotDesc.getTable();
final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, snapshotDesc);
SnapshotDescription desc = new SnapshotDescription(snapshotDesc.getName(),
snapshotDesc.getTable(), ProtobufUtil.createSnapshotType(snapshotDesc.getType()),
snapshotDesc.getOwner(), snapshotDesc.getCreationTime(), snapshotDesc.getVersion());
final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, desc);
SnapshotReferenceUtil.concurrentVisitReferencedFiles(getConf(), fs, snapshotManifest,
new SnapshotReferenceUtil.SnapshotVisitor() {
@Override
@ -492,10 +501,11 @@ public final class SnapshotInfo extends Configured implements Tool {
*/
public static SnapshotStats getSnapshotStats(final Configuration conf,
final SnapshotDescription snapshot) throws IOException {
HBaseProtos.SnapshotDescription snapshotDesc = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot);
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = FileSystem.get(rootDir.toUri(), conf);
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot);
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDesc, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
final SnapshotStats stats = new SnapshotStats(conf, fs, snapshot);
SnapshotReferenceUtil.concurrentVisitReferencedFiles(conf, fs, manifest,
new SnapshotReferenceUtil.SnapshotVisitor() {
@ -525,7 +535,11 @@ public final class SnapshotInfo extends Configured implements Tool {
List<SnapshotDescription> snapshotLists =
new ArrayList<SnapshotDescription>(snapshots.length);
for (FileStatus snapshotDirStat: snapshots) {
snapshotLists.add(SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath()));
HBaseProtos.SnapshotDescription snapshotDesc =
SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath());
snapshotLists.add(new SnapshotDescription(snapshotDesc.getName(),
snapshotDesc.getTable(), ProtobufUtil.createSnapshotType(snapshotDesc.getType()),
snapshotDesc.getOwner(), snapshotDesc.getCreationTime(), snapshotDesc.getVersion()));
}
return snapshotLists;
}

View File

@ -104,6 +104,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
@ -691,7 +692,7 @@ public class HBaseFsck extends Configured implements Closeable {
if (shouldDisableSplitAndMerge()) {
admin.releaseSplitOrMergeLockAndRollback();
oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, false,
Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
}
try {

View File

@ -24,7 +24,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
@ -80,7 +80,7 @@ public class SplitOrMergeTracker {
mergeStateTracker.start();
}
public boolean isSplitOrMergeEnabled(Admin.MasterSwitchType switchType) {
public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) {
switch (switchType) {
case SPLIT:
return splitStateTracker.isSwitchEnabled();
@ -92,7 +92,7 @@ public class SplitOrMergeTracker {
return false;
}
public void setSplitOrMergeEnabled(boolean enabled, Admin.MasterSwitchType switchType)
public void setSplitOrMergeEnabled(boolean enabled, MasterSwitchType switchType)
throws KeeperException {
switch (switchType) {
case SPLIT:
@ -164,8 +164,8 @@ public class SplitOrMergeTracker {
}
private void saveOriginalState() throws KeeperException {
boolean splitEnabled = isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT);
boolean mergeEnabled = isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE);
boolean splitEnabled = isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
boolean mergeEnabled = isSplitOrMergeEnabled(MasterSwitchType.MERGE);
String splitOrMergeStates = ZKUtil.joinZNode(watcher.getSwitchLockZNode(),
SplitOrMergeTracker.STATE);
ZooKeeperProtos.SplitAndMergeState.Builder builder

View File

@ -21,9 +21,9 @@
import="java.util.Date"
import="org.apache.hadoop.conf.Configuration"
import="org.apache.hadoop.hbase.client.Admin"
import="org.apache.hadoop.hbase.client.SnapshotDescription"
import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.snapshot.SnapshotInfo"
import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"
import="org.apache.hadoop.util.StringUtils"
import="org.apache.hadoop.hbase.TableName"
import="org.apache.hadoop.hbase.HBaseConfiguration" %>

View File

@ -28,6 +28,7 @@
import="org.apache.hadoop.conf.Configuration"
import="org.apache.hadoop.hbase.client.HTable"
import="org.apache.hadoop.hbase.client.Admin"
import="org.apache.hadoop.hbase.client.CompactionState"
import="org.apache.hadoop.hbase.client.RegionLocator"
import="org.apache.hadoop.hbase.HRegionInfo"
import="org.apache.hadoop.hbase.HRegionLocation"
@ -39,7 +40,6 @@
import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator"
import="org.apache.hadoop.hbase.util.Bytes"
import="org.apache.hadoop.hbase.util.FSUtils"
import="org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState"
import="org.apache.hadoop.hbase.TableName"
import="org.apache.hadoop.hbase.HColumnDescriptor"
import="org.apache.hadoop.hbase.client.RegionReplicaUtil"

View File

@ -85,7 +85,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
@ -278,7 +277,7 @@ public class TestFromClientSide {
@Override
public boolean evaluate() throws IOException {
return TEST_UTIL.getHBaseAdmin().getCompactionState(TABLENAME) ==
AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
CompactionState.NONE;
}
});

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
@ -212,18 +212,13 @@ public class TestSnapshotFromClient {
final String SNAPSHOT_NAME = "offlineTableSnapshot";
byte[] snapshot = Bytes.toBytes(SNAPSHOT_NAME);
SnapshotDescription desc = SnapshotDescription.newBuilder()
.setType(SnapshotDescription.Type.DISABLED)
.setTable(STRING_TABLE_NAME)
.setName(SNAPSHOT_NAME)
.setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION)
.build();
admin.snapshot(desc);
admin.snapshot(new SnapshotDescription(SNAPSHOT_NAME, STRING_TABLE_NAME,
SnapshotType.DISABLED, null, -1, SnapshotManifestV1.DESCRIPTOR_VERSION));
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
snapshot, TABLE_NAME);
List<SnapshotDescription> snapshots =
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME);
// make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
@ -231,9 +226,9 @@ public class TestSnapshotFromClient {
LOG.debug("FS state after snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
admin, fs);
SnapshotTestingUtils.confirmSnapshotValid(
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM,
rootDir, admin, fs);
admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots();
@ -292,8 +287,8 @@ public class TestSnapshotFromClient {
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
snapshot, TABLE_NAME);
List<SnapshotDescription> snapshots =
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME);
// make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
@ -304,8 +299,9 @@ public class TestSnapshotFromClient {
List<byte[]> emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region
List<byte[]> nonEmptyCfs = Lists.newArrayList();
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, nonEmptyCfs, emptyCfs,
rootDir, admin, fs);
SnapshotTestingUtils.confirmSnapshotValid(
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, nonEmptyCfs,
emptyCfs, rootDir, admin, fs);
admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots();
@ -375,7 +371,8 @@ public class TestSnapshotFromClient {
admin.snapshot(Bytes.toBytes(table2Snapshot1), TABLE_NAME);
LOG.debug(table2Snapshot1 + " completed.");
List<SnapshotDescription> listTableSnapshots = admin.listTableSnapshots("test.*", "Table1.*");
List<SnapshotDescription> listTableSnapshots =
admin.listTableSnapshots("test.*", "Table1.*");
List<String> listTableSnapshotNames = new ArrayList<String>();
assertEquals(2, listTableSnapshots.size());
for (SnapshotDescription s : listTableSnapshots) {

View File

@ -78,14 +78,14 @@ public class TestSplitOrMergeStatus {
Admin admin = TEST_UTIL.getAdmin();
initSwitchStatus(admin);
boolean[] results = admin.setSplitOrMergeEnabled(false, false,
true, Admin.MasterSwitchType.SPLIT);
true, MasterSwitchType.SPLIT);
assertEquals(results.length, 1);
assertTrue(results[0]);
admin.split(t.getName());
int count = waitOnSplitOrMerge(t).size();
assertTrue(orignalCount == count);
results = admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.SPLIT);
results = admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.SPLIT);
assertEquals(results.length, 1);
assertFalse(results[0]);
admin.split(t.getName());
@ -111,7 +111,7 @@ public class TestSplitOrMergeStatus {
waitForMergable(admin, name);
int orignalCount = locator.getAllRegionLocations().size();
boolean[] results = admin.setSplitOrMergeEnabled(false, false,
true, Admin.MasterSwitchType.MERGE);
true, MasterSwitchType.MERGE);
assertEquals(results.length, 1);
assertTrue(results[0]);
List<HRegionInfo> regions = admin.getTableRegions(t.getName());
@ -122,7 +122,7 @@ public class TestSplitOrMergeStatus {
assertTrue(orignalCount == count);
waitForMergable(admin, name);
results = admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.MERGE);
results = admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.MERGE);
assertEquals(results.length, 1);
assertFalse(results[0]);
admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(),
@ -136,12 +136,12 @@ public class TestSplitOrMergeStatus {
public void testMultiSwitches() throws IOException {
Admin admin = TEST_UTIL.getAdmin();
boolean[] switches = admin.setSplitOrMergeEnabled(false, false, true,
Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
for (boolean s : switches){
assertTrue(s);
}
assertFalse(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT));
assertFalse(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE));
assertFalse(admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT));
assertFalse(admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE));
admin.close();
}
@ -149,10 +149,10 @@ public class TestSplitOrMergeStatus {
public void testSwitchLock() throws IOException {
Admin admin = TEST_UTIL.getAdmin();
admin.setSplitOrMergeEnabled(false, false, false,
Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
try {
admin.setSplitOrMergeEnabled(false, false, true,
Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
fail();
} catch (IOException e) {
LOG.info("", e);
@ -160,7 +160,7 @@ public class TestSplitOrMergeStatus {
admin.releaseSplitOrMergeLockAndRollback();
try {
admin.setSplitOrMergeEnabled(true, false, true,
Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
} catch (IOException e) {
fail();
}
@ -168,14 +168,14 @@ public class TestSplitOrMergeStatus {
}
private void initSwitchStatus(Admin admin) throws IOException {
if (!admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT)) {
admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.SPLIT);
if (!admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) {
admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.SPLIT);
}
if (!admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE)) {
admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.MERGE);
if (!admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE)) {
admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.MERGE);
}
assertTrue(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT));
assertTrue(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE));
assertTrue(admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT));
assertTrue(admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE));
}
private void waitForMergable(Admin admin, TableName t) throws InterruptedException, IOException {

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.AssignmentManager;
@ -348,13 +349,13 @@ public class TestMasterObserver {
@Override
public boolean preSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException {
final boolean newValue, final MasterSwitchType switchType) throws IOException {
return false;
}
@Override
public void postSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException {
final boolean newValue, final MasterSwitchType switchType) throws IOException {
}
@Override

View File

@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.SplitLogCounters;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
@ -87,7 +88,6 @@ import org.apache.hadoop.hbase.exceptions.RegionInRecoveryException;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.Region;
@ -1421,7 +1421,8 @@ public class TestDistributedLogSplitting {
TEST_UTIL.waitFor(30000, 200, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return (TEST_UTIL.getHBaseAdmin().getCompactionState(tableName) == CompactionState.NONE);
return (TEST_UTIL.getHBaseAdmin()
.getCompactionState(tableName) == CompactionState.NONE);
}
});

View File

@ -31,7 +31,8 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.LargeTests;
@ -106,7 +107,7 @@ public class TestWarmupRegion {
@Override
public boolean evaluate() throws IOException {
return TEST_UTIL.getHBaseAdmin().getCompactionState(TABLENAME) ==
AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
CompactionState.NONE;
}
});

View File

@ -33,7 +33,9 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotState;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@ -59,7 +61,7 @@ public class TestCloneSnapshotProcedure {
private static long nonceGroup = HConstants.NO_NONCE;
private static long nonce = HConstants.NO_NONCE;
private static SnapshotDescription snapshot = null;
private static HBaseProtos.SnapshotDescription snapshot = null;
private static void setupConf(Configuration conf) {
conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
@ -99,7 +101,7 @@ public class TestCloneSnapshotProcedure {
assertTrue("expected executor to be running", procExec.isRunning());
}
private SnapshotDescription getSnapshot() throws Exception {
private HBaseProtos.SnapshotDescription getSnapshot() throws Exception {
if (snapshot == null) {
final TableName snapshotTableName = TableName.valueOf("testCloneSnapshot");
long tid = System.currentTimeMillis();
@ -116,7 +118,7 @@ public class TestCloneSnapshotProcedure {
admin.enableTable(snapshotTableName);
List<SnapshotDescription> snapshotList = admin.listSnapshots();
snapshot = snapshotList.get(0);
snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotList.get(0));
}
return snapshot;
}

View File

@ -35,7 +35,9 @@ import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@ -73,7 +75,7 @@ public class TestRestoreSnapshotProcedure {
private static long nonceGroup = HConstants.NO_NONCE;
private static long nonce = HConstants.NO_NONCE;
private SnapshotDescription snapshot = null;
private HBaseProtos.SnapshotDescription snapshot = null;
private HTableDescriptor snapshotHTD = null;
private static void setupConf(Configuration conf) {
@ -141,7 +143,7 @@ public class TestRestoreSnapshotProcedure {
admin.snapshot(snapshotName, snapshotTableName);
List<SnapshotDescription> snapshotList = admin.listSnapshots();
snapshot = snapshotList.get(0);
snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotList.get(0));
// modify the table
HColumnDescriptor columnFamilyDescriptor3 = new HColumnDescriptor(CF3);

View File

@ -51,6 +51,8 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.CompactType;
import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
@ -68,7 +70,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.StoreFile;
@ -356,7 +357,7 @@ public class TestMobCompactor {
countFiles(tableName, false, family2));
// do the major mob compaction, it will force all files to compaction
admin.majorCompact(tableName, hcd1.getName(), Admin.CompactType.MOB);
admin.majorCompact(tableName, hcd1.getName(), CompactType.MOB);
waitUntilMobCompactionFinished(tableName);
assertEquals("After compaction: mob rows count", regionNum * (rowNumPerRegion - delRowNum),
@ -399,7 +400,7 @@ public class TestMobCompactor {
Cell cell = result.getColumnLatestCell(hcd1.getName(), Bytes.toBytes(qf1));
assertEquals("Before compaction: mob value of k0", newValue0,
Bytes.toString(CellUtil.cloneValue(cell)));
admin.majorCompact(tableName, hcd1.getName(), Admin.CompactType.MOB);
admin.majorCompact(tableName, hcd1.getName(), CompactType.MOB);
waitUntilMobCompactionFinished(tableName);
// read the latest cell of key0, the cell seqId in bulk loaded file is not reset in the
// scanner. The cell that has "new" value is still visible.
@ -449,7 +450,7 @@ public class TestMobCompactor {
loadData(admin, bufMut, tableName, new Put[] { put1 }); // now two mob files
admin.majorCompact(tableName);
waitUntilCompactionFinished(tableName);
admin.majorCompact(tableName, hcd1.getName(), Admin.CompactType.MOB);
admin.majorCompact(tableName, hcd1.getName(), CompactType.MOB);
waitUntilMobCompactionFinished(tableName);
// read the latest cell of key1.
Get get = new Get(key1);
@ -475,12 +476,12 @@ public class TestMobCompactor {
private void waitUntilMobCompactionFinished(TableName tableName) throws IOException,
InterruptedException {
long finished = EnvironmentEdgeManager.currentTime() + 60000;
CompactionState state = admin.getCompactionState(tableName, Admin.CompactType.MOB);
CompactionState state = admin.getCompactionState(tableName, CompactType.MOB);
while (EnvironmentEdgeManager.currentTime() < finished) {
if (state == CompactionState.NONE) {
break;
}
state = admin.getCompactionState(tableName, Admin.CompactType.MOB);
state = admin.getCompactionState(tableName, CompactType.MOB);
Thread.sleep(10);
}
assertEquals(CompactionState.NONE, state);

View File

@ -31,9 +31,9 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
@ -164,7 +164,7 @@ public class TestCompactionState {
// otherwise, the compaction should have already been done
if (expectedState != state) {
for (Region region: regions) {
state = region.getCompactionState();
state = CompactionState.valueOf(region.getCompactionState().toString());
assertEquals(CompactionState.NONE, state);
}
} else {

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.TagUtil;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
@ -53,7 +54,6 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
@ -710,7 +711,7 @@ public class TestAccessController extends SecureTestUtil {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preSetSplitOrMergeEnabled(ObserverContext.createAndPrepare(CP_ENV, null),
true, Admin.MasterSwitchType.MERGE);
true, MasterSwitchType.MERGE);
return null;
}
};

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
@ -59,7 +60,8 @@ import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
@ -120,7 +122,7 @@ public class SnapshotTestingUtils {
* Make sure that there is only one snapshot returned from the master
*/
public static void assertOneSnapshotThatMatches(Admin admin,
SnapshotDescription snapshot) throws IOException {
HBaseProtos.SnapshotDescription snapshot) throws IOException {
assertOneSnapshotThatMatches(admin, snapshot.getName(),
TableName.valueOf(snapshot.getTable()));
}
@ -153,7 +155,7 @@ public class SnapshotTestingUtils {
}
public static void confirmSnapshotValid(HBaseTestingUtility testUtil,
SnapshotDescription snapshotDescriptor, TableName tableName, byte[] family)
HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName, byte[] family)
throws IOException {
MasterFileSystem mfs = testUtil.getHBaseCluster().getMaster().getMasterFileSystem();
confirmSnapshotValid(snapshotDescriptor, tableName, family,
@ -165,7 +167,7 @@ public class SnapshotTestingUtils {
* be in the snapshot.
*/
public static void confirmSnapshotValid(
SnapshotDescription snapshotDescriptor, TableName tableName,
HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName,
byte[] testFamily, Path rootDir, Admin admin, FileSystem fs)
throws IOException {
ArrayList nonEmptyTestFamilies = new ArrayList(1);
@ -178,7 +180,7 @@ public class SnapshotTestingUtils {
* Confirm that the snapshot has no references files but only metadata.
*/
public static void confirmEmptySnapshotValid(
SnapshotDescription snapshotDescriptor, TableName tableName,
HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName,
byte[] testFamily, Path rootDir, Admin admin, FileSystem fs)
throws IOException {
ArrayList emptyTestFamilies = new ArrayList(1);
@ -194,7 +196,7 @@ public class SnapshotTestingUtils {
* by the MasterSnapshotVerifier, at the end of the snapshot operation.
*/
public static void confirmSnapshotValid(
SnapshotDescription snapshotDescriptor, TableName tableName,
HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName,
List<byte[]> nonEmptyTestFamilies, List<byte[]> emptyTestFamilies,
Path rootDir, Admin admin, FileSystem fs) throws IOException {
final Configuration conf = admin.getConfiguration();
@ -204,7 +206,7 @@ public class SnapshotTestingUtils {
snapshotDescriptor, rootDir);
assertTrue(fs.exists(snapshotDir));
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
HBaseProtos.SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
// Extract regions and families with store files
final Set<byte[]> snapshotFamilies = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
@ -265,7 +267,7 @@ public class SnapshotTestingUtils {
* @throws ServiceException if the snapshot fails
*/
public static void waitForSnapshotToComplete(HMaster master,
SnapshotDescription snapshot, long sleep) throws ServiceException {
HBaseProtos.SnapshotDescription snapshot, long sleep) throws ServiceException {
final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder()
.setSnapshot(snapshot).build();
IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder()
@ -286,12 +288,13 @@ public class SnapshotTestingUtils {
*/
public static void snapshot(Admin admin,
final String snapshotName, final String tableName,
SnapshotDescription.Type type, int numTries) throws IOException {
HBaseProtos.SnapshotDescription.Type type, int numTries) throws IOException {
int tries = 0;
CorruptedSnapshotException lastEx = null;
while (tries++ < numTries) {
try {
admin.snapshot(snapshotName, TableName.valueOf(tableName), type);
admin.snapshot(new SnapshotDescription(snapshotName, tableName,
SnapshotType.valueOf(type.toString())));
return;
} catch (CorruptedSnapshotException cse) {
LOG.warn("Got CorruptedSnapshotException", cse);
@ -393,13 +396,14 @@ public class SnapshotTestingUtils {
}
admin.snapshot(snapshotNameString, tableName);
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertExistsMatchingSnapshot(admin,
snapshotNameString, tableName);
List<SnapshotDescription> snapshots =
SnapshotTestingUtils.assertExistsMatchingSnapshot(admin, snapshotNameString, tableName);
if (snapshots == null || snapshots.size() != 1) {
Assert.fail("Incorrect number of snapshots for table " + tableName);
}
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), tableName, nonEmptyFamilyNames,
SnapshotTestingUtils.confirmSnapshotValid(
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), tableName, nonEmptyFamilyNames,
emptyFamilyNames, rootDir, admin, fs);
}
@ -418,7 +422,8 @@ public class SnapshotTestingUtils {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName,
mfs.getRootDir());
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
HBaseProtos.SnapshotDescription snapshotDesc =
SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
final TableName table = TableName.valueOf(snapshotDesc.getTable());
final ArrayList corruptedFiles = new ArrayList();
@ -467,7 +472,7 @@ public class SnapshotTestingUtils {
public static class SnapshotBuilder {
private final RegionData[] tableRegions;
private final SnapshotDescription desc;
private final HBaseProtos.SnapshotDescription desc;
private final HTableDescriptor htd;
private final Configuration conf;
private final FileSystem fs;
@ -477,7 +482,7 @@ public class SnapshotTestingUtils {
public SnapshotBuilder(final Configuration conf, final FileSystem fs,
final Path rootDir, final HTableDescriptor htd,
final SnapshotDescription desc, final RegionData[] tableRegions)
final HBaseProtos.SnapshotDescription desc, final RegionData[] tableRegions)
throws IOException {
this.fs = fs;
this.conf = conf;
@ -495,7 +500,7 @@ public class SnapshotTestingUtils {
return this.htd;
}
public SnapshotDescription getSnapshotDescription() {
public HBaseProtos.SnapshotDescription getSnapshotDescription() {
return this.desc;
}
@ -519,7 +524,7 @@ public class SnapshotTestingUtils {
.build());
}
private Path[] addRegion(final SnapshotDescription desc) throws IOException {
private Path[] addRegion(final HBaseProtos.SnapshotDescription desc) throws IOException {
if (this.snapshotted == tableRegions.length) {
throw new UnsupportedOperationException("No more regions in the table");
}
@ -648,7 +653,7 @@ public class SnapshotTestingUtils {
HTableDescriptor htd = createHtd(tableName);
RegionData[] regions = createTable(htd, numRegions);
SnapshotDescription desc = SnapshotDescription.newBuilder()
HBaseProtos.SnapshotDescription desc = HBaseProtos.SnapshotDescription.newBuilder()
.setTable(htd.getNameAsString())
.setName(snapshotName)
.setVersion(version)

View File

@ -44,7 +44,9 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@ -147,18 +149,20 @@ public class TestFlushSnapshotFromClient {
// take a snapshot of the enabled table
String snapshotString = "offlineTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString);
admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.FLUSH);
admin.snapshot(snapshotString, TABLE_NAME,
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH));
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
snapshot, TABLE_NAME);
List<SnapshotDescription> snapshots =
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME);
// make sure its a valid snapshot
LOG.debug("FS state after snapshot:");
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM);
SnapshotTestingUtils.confirmSnapshotValid(UTIL,
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM);
}
/**
@ -181,18 +185,20 @@ public class TestFlushSnapshotFromClient {
// take a snapshot of the enabled table
String snapshotString = "skipFlushTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString);
admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.SKIPFLUSH);
admin.snapshot(snapshotString, TABLE_NAME,
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.SKIPFLUSH));
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
snapshot, TABLE_NAME);
List<SnapshotDescription> snapshots =
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME);
// make sure its a valid snapshot
LOG.debug("FS state after snapshot:");
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM);
SnapshotTestingUtils.confirmSnapshotValid(UTIL,
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM);
admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots();
@ -234,7 +240,8 @@ public class TestFlushSnapshotFromClient {
LOG.debug("FS state after snapshot:");
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM);
SnapshotTestingUtils.confirmSnapshotValid(UTIL,
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM);
}
@Test
@ -258,7 +265,8 @@ public class TestFlushSnapshotFromClient {
// snapshot the non-existant table
try {
admin.snapshot("fail", tableName, SnapshotDescription.Type.FLUSH);
admin.snapshot("fail", tableName,
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH));
fail("Snapshot succeeded even though there is not table.");
} catch (SnapshotCreationException e) {
LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage());
@ -267,13 +275,14 @@ public class TestFlushSnapshotFromClient {
@Test
public void testAsyncFlushSnapshot() throws Exception {
SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("asyncSnapshot")
.setTable(TABLE_NAME.getNameAsString())
.setType(SnapshotDescription.Type.FLUSH)
.build();
HBaseProtos.SnapshotDescription snapshot = HBaseProtos.SnapshotDescription.newBuilder()
.setName("asyncSnapshot").setTable(TABLE_NAME.getNameAsString())
.setType(HBaseProtos.SnapshotDescription.Type.FLUSH).build();
// take the snapshot async
admin.takeSnapshotAsync(snapshot);
admin.takeSnapshotAsync(
new SnapshotDescription("asyncSnapshot", TABLE_NAME.getNameAsString(),
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH)));
// constantly loop, looking for the snapshot to complete
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
@ -295,7 +304,8 @@ public class TestFlushSnapshotFromClient {
// Take a snapshot
String snapshotBeforeMergeName = "snapshotBeforeMerge";
admin.snapshot(snapshotBeforeMergeName, TABLE_NAME, SnapshotDescription.Type.FLUSH);
admin.snapshot(snapshotBeforeMergeName, TABLE_NAME,
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH));
// Clone the table
TableName cloneBeforeMergeName = TableName.valueOf("cloneBeforeMerge");
@ -364,7 +374,7 @@ public class TestFlushSnapshotFromClient {
// Take a snapshot
String snapshotName = "snapshotAfterMerge";
SnapshotTestingUtils.snapshot(admin, snapshotName, TABLE_NAME.getNameAsString(),
SnapshotDescription.Type.FLUSH, 3);
HBaseProtos.SnapshotDescription.Type.FLUSH, 3);
// Clone the table
TableName cloneName = TableName.valueOf("cloneMerge");
@ -425,14 +435,16 @@ public class TestFlushSnapshotFromClient {
@Override
public void run() {
try {
LOG.info("Submitting snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss));
LOG.info("Submitting snapshot request: " + ClientSnapshotDescriptionUtils
.toString(ProtobufUtil.createHBaseProtosSnapshotDesc(ss)));
admin.takeSnapshotAsync(ss);
} catch (Exception e) {
LOG.info("Exception during snapshot request: " + ClientSnapshotDescriptionUtils.toString(
ss)
ProtobufUtil.createHBaseProtosSnapshotDesc(ss))
+ ". This is ok, we expect some", e);
}
LOG.info("Submitted snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss));
LOG.info("Submitted snapshot request: " + ClientSnapshotDescriptionUtils
.toString(ProtobufUtil.createHBaseProtosSnapshotDesc(ss)));
toBeSubmitted.countDown();
}
};
@ -440,11 +452,15 @@ public class TestFlushSnapshotFromClient {
// build descriptions
SnapshotDescription[] descs = new SnapshotDescription[ssNum];
for (int i = 0; i < ssNum; i++) {
SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
builder.setTable(((i % 2) == 0 ? TABLE_NAME : TABLE2_NAME).getNameAsString());
builder.setName("ss"+i);
builder.setType(SnapshotDescription.Type.FLUSH);
descs[i] = builder.build();
HBaseProtos.SnapshotDescription.Builder builder =
HBaseProtos.SnapshotDescription.newBuilder();
if(i %2 ==0) {
descs[i] = new SnapshotDescription("ss" + i, TABLE_NAME.getNameAsString(),
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH));
} else {
descs[i] = new SnapshotDescription("ss" + i, TABLE2_NAME.getNameAsString(),
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH));
}
}
// kick each off its own thread

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
import org.apache.hadoop.hbase.testclassification.LargeTests;
@ -116,7 +117,7 @@ public class TestRestoreFlushSnapshotFromClient {
// take a snapshot
admin.snapshot(Bytes.toString(snapshotName0), tableName,
SnapshotDescription.Type.FLUSH);
ProtobufUtil.createSnapshotType(SnapshotDescription.Type.FLUSH));
LOG.info("=== after snapshot with 500 rows");
logFSTree();
@ -129,7 +130,7 @@ public class TestRestoreFlushSnapshotFromClient {
// take a snapshot of the updated table
admin.snapshot(Bytes.toString(snapshotName1), tableName,
SnapshotDescription.Type.FLUSH);
ProtobufUtil.createSnapshotType(SnapshotDescription.Type.FLUSH));
LOG.info("=== after snapshot with 1000 rows");
logFSTree();
table.close();
@ -194,7 +195,8 @@ public class TestRestoreFlushSnapshotFromClient {
TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName0, clonedTableName);
verifyRowCount(UTIL, clonedTableName, snapshot0Rows);
admin.snapshot(Bytes.toString(snapshotName2), clonedTableName, SnapshotDescription.Type.FLUSH);
admin.snapshot(Bytes.toString(snapshotName2), clonedTableName,
ProtobufUtil.createSnapshotType(SnapshotDescription.Type.FLUSH));
UTIL.deleteTable(clonedTableName);
admin.cloneSnapshot(snapshotName2, clonedTableName);

View File

@ -33,12 +33,12 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
@ -1856,9 +1856,9 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
@Test
public void testSplitOrMergeStatWhenHBCKAbort() throws Exception {
admin.setSplitOrMergeEnabled(true, false, true,
Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE);
boolean oldSplit = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT);
boolean oldMerge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE);
MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
boolean oldSplit = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
boolean oldMerge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE);
assertTrue(oldSplit);
assertTrue(oldMerge);
@ -1880,8 +1880,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
spiedHbck.onlineHbck();
spiedHbck.close();
boolean split = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT);
boolean merge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE);
boolean split = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
boolean merge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE);
assertFalse(split);
assertFalse(merge);
@ -1892,8 +1892,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
hbck.onlineHbck();
hbck.close();
split = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT);
merge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE);
split = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
merge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE);
assertTrue(split);
assertTrue(merge);