diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index 997f64a13ff..11b07716ec1 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -26,6 +26,7 @@ ServerName metaLocation = null; List servers = null; Set deadServers = null; boolean showAppendWarning = false; +boolean catalogJanitorEnabled = true; String filter = "general"; String format = "html"; @@ -119,6 +120,12 @@ org.apache.hadoop.hbase.HBaseConfiguration; for details. + <%if !catalogJanitorEnabled %> +
+ Please note that your cluster is running with the CatalogJanitor disabled. It can be + re-enabled from the hbase shell by running the command 'catalogjanitor_switch true' +
+

Region Servers

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java index 012d8cd6dd8..d4952953b41 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java @@ -25,6 +25,8 @@ import org.apache.hadoop.hbase.security.TokenInfo; import org.apache.hadoop.hbase.security.KerberosInfo; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest; @@ -35,8 +37,12 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableR import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; @@ -303,4 +309,41 @@ public interface MasterAdminProtocol extends @Override public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req) throws ServiceException; + + /** + * Run a scan of the catalog table + * @param c Unused (set to null). + * @param req CatalogScanRequest + * @return CatalogScanResponse that contains the int return code corresponding + * to the number of entries cleaned + * @throws ServiceException + */ + @Override + public CatalogScanResponse runCatalogScan(RpcController c, + CatalogScanRequest req) throws ServiceException; + + /** + * Enable/Disable the catalog janitor + * @param c Unused (set to null). + * @param req EnableCatalogJanitorRequest that contains:
+ * - enable: If true, enable catalog janitor. If false, disable janitor.
+ * @return EnableCatalogJanitorResponse that contains:
+ * - prevValue: true, if it was enabled previously; false, otherwise + * @throws ServiceException + */ + @Override + public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c, + EnableCatalogJanitorRequest req) throws ServiceException; + + /** + * Query whether the catalog janitor is enabled + * @param c Unused (set to null). + * @param req IsCatalogJanitorEnabledRequest + * @return IsCatalogCatalogJanitorEnabledResponse that contains:
+ * - value: true, if it is enabled; false, otherwise + * @throws ServiceException + */ + @Override + public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c, + IsCatalogJanitorEnabledRequest req) throws ServiceException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 8eed2fa7fb6..b567ec7d04f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -1502,6 +1502,55 @@ public class HBaseAdmin implements Abortable, Closeable { } } + /** + * Enable/Disable the catalog janitor + * @param enable if true enables the catalog janitor + * @return the previous state + * @throws ServiceException + * @throws MasterNotRunningException + */ + public boolean enableCatalogJanitor(boolean enable) + throws ServiceException, MasterNotRunningException { + MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + try { + return master.enableCatalogJanitor(null, + RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue(); + } finally { + master.close(); + } + } + + /** + * Ask for a scan of the catalog table + * @return the number of entries cleaned + * @throws ServiceException + * @throws MasterNotRunningException + */ + public int runCatalogScan() throws ServiceException, MasterNotRunningException { + MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + try { + return master.runCatalogScan(null, + RequestConverter.buildCatalogScanRequest()).getScanResult(); + } finally { + master.close(); + } + } + + /** + * Query on the catalog janitor state (Enabled/Disabled?) + * @throws ServiceException + * @throws MasterNotRunningException + */ + public boolean isCatalogJanitorEnabled() throws ServiceException, MasterNotRunningException { + MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + try { + return master.isCatalogJanitorEnabled(null, + RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue(); + } finally { + master.close(); + } + } + /** * Split a table or an individual region. * Asynchronous operation. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index 1492548c22c..2f42d9f5495 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -25,6 +25,7 @@ import java.util.Comparator; import java.util.HashSet; import java.util.Map; import java.util.TreeMap; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; @@ -62,7 +63,8 @@ class CatalogJanitor extends Chore { private static final Log LOG = LogFactory.getLog(CatalogJanitor.class.getName()); private final Server server; private final MasterServices services; - private boolean enabled = true; + private AtomicBoolean enabled = new AtomicBoolean(true); + private AtomicBoolean alreadyRunning = new AtomicBoolean(false); CatalogJanitor(final Server server, final MasterServices services) { super(server.getServerName() + "-CatalogJanitor", @@ -75,7 +77,7 @@ class CatalogJanitor extends Chore { @Override protected boolean initialChore() { try { - if (this.enabled) scan(); + if (this.enabled.get()) scan(); } catch (IOException e) { LOG.warn("Failed initial scan of catalog table", e); return false; @@ -86,14 +88,22 @@ class CatalogJanitor extends Chore { /** * @param enabled */ - public void setEnabled(final boolean enabled) { - this.enabled = enabled; + public boolean setEnabled(final boolean enabled) { + return this.enabled.getAndSet(enabled); + } + + boolean getEnabled() { + return this.enabled.get(); } @Override protected void chore() { try { - scan(); + if (this.enabled.get()) { + scan(); + } else { + LOG.warn("CatalogJanitor disabled! Not running scan."); + } } catch (IOException e) { LOG.warn("Failed scan of catalog table", e); } @@ -135,30 +145,37 @@ class CatalogJanitor extends Chore { * @throws IOException */ int scan() throws IOException { - Pair> pair = getSplitParents(); - int count = pair.getFirst(); - Map splitParents = pair.getSecond(); - - // Now work on our list of found parents. See if any we can clean up. - int cleaned = 0; - HashSet parentNotCleaned = new HashSet(); //regions whose parents are still around - for (Map.Entry e : splitParents.entrySet()) { - if (!parentNotCleaned.contains(e.getKey()) && cleanParent(e.getKey(), e.getValue())) { - cleaned++; - } else { - // We could not clean the parent, so it's daughters should not be cleaned either (HBASE-6160) - parentNotCleaned.add(getDaughterRegionInfo(e.getValue(), HConstants.SPLITA_QUALIFIER)); - parentNotCleaned.add(getDaughterRegionInfo(e.getValue(), HConstants.SPLITB_QUALIFIER)); + try { + if (!alreadyRunning.compareAndSet(false, true)) { + return 0; } + Pair> pair = getSplitParents(); + int count = pair.getFirst(); + Map splitParents = pair.getSecond(); + + // Now work on our list of found parents. See if any we can clean up. + int cleaned = 0; + HashSet parentNotCleaned = new HashSet(); //regions whose parents are still around + for (Map.Entry e : splitParents.entrySet()) { + if (!parentNotCleaned.contains(e.getKey()) && cleanParent(e.getKey(), e.getValue())) { + cleaned++; + } else { + // We could not clean the parent, so it's daughters should not be cleaned either (HBASE-6160) + parentNotCleaned.add(getDaughterRegionInfo(e.getValue(), HConstants.SPLITA_QUALIFIER)); + parentNotCleaned.add(getDaughterRegionInfo(e.getValue(), HConstants.SPLITB_QUALIFIER)); + } + } + if (cleaned != 0) { + LOG.info("Scanned " + count + " catalog row(s) and gc'd " + cleaned + + " unreferenced parent region(s)"); + } else if (LOG.isDebugEnabled()) { + LOG.debug("Scanned " + count + " catalog row(s) and gc'd " + cleaned + + " unreferenced parent region(s)"); + } + return cleaned; + } finally { + alreadyRunning.set(false); } - if (cleaned != 0) { - LOG.info("Scanned " + count + " catalog row(s) and gc'd " + cleaned + - " unreferenced parent region(s)"); - } else if (LOG.isDebugEnabled()) { - LOG.debug("Scanned " + count + " catalog row(s) and gc'd " + cleaned + - " unreferenced parent region(s)"); - } - return cleaned; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index a97a4337f77..b48ba9769ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -81,6 +81,7 @@ import org.apache.hadoop.hbase.MasterAdminProtocol; import org.apache.hadoop.hbase.RegionServerStatusProtocol; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.ipc.ProtocolSignature; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; @@ -130,6 +131,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnReq import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest; @@ -138,8 +141,12 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableR import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; @@ -1156,6 +1163,30 @@ Server { return IsMasterRunningResponse.newBuilder().setIsMasterRunning(isMasterRunning()).build(); } + @Override + public CatalogScanResponse runCatalogScan(RpcController c, + CatalogScanRequest req) throws ServiceException { + try { + return ResponseConverter.buildCatalogScanResponse(catalogJanitorChore.scan()); + } catch (IOException ioe) { + throw new ServiceException(ioe); + } + } + + @Override + public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c, + EnableCatalogJanitorRequest req) throws ServiceException { + return EnableCatalogJanitorResponse.newBuilder(). + setPrevValue(catalogJanitorChore.setEnabled(req.getEnable())).build(); + } + + @Override + public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c, + IsCatalogJanitorEnabledRequest req) throws ServiceException { + return IsCatalogJanitorEnabledResponse.newBuilder(). + setValue(catalogJanitorChore.getEnabled()).build(); + } + /** * @return Maximum time we should run balancer for */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java index a0aaaf420cc..bc205b98282 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java @@ -36,7 +36,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl; +import com.google.protobuf.ServiceException; /** * The servlet responsible for rendering the index page of the @@ -65,13 +67,20 @@ public class MasterStatusServlet extends HttpServlet { Set deadServers = master.getServerManager().getDeadServers(); response.setContentType("text/html"); - MasterStatusTmpl tmpl = new MasterStatusTmpl() + MasterStatusTmpl tmpl; + try { + tmpl = new MasterStatusTmpl() .setFrags(frags) .setShowAppendWarning(shouldShowAppendWarning(conf)) .setRootLocation(rootLocation) .setMetaLocation(metaLocation) .setServers(servers) - .setDeadServers(deadServers); + .setDeadServers(deadServers) + .setCatalogJanitorEnabled(master.isCatalogJanitorEnabled(null, + RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue()); + } catch (ServiceException s) { + throw new IOException(s); + } if (request.getParameter("filter") != null) tmpl.setFilter(request.getParameter("filter")); if (request.getParameter("format") != null) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 95f2e74ec73..535417cbb1a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -82,11 +82,14 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; @@ -1111,4 +1114,28 @@ public final class RequestConverter { public static GetClusterStatusRequest buildGetClusterStatusRequest() { return GetClusterStatusRequest.newBuilder().build(); } + + /** + * Creates a request for running a catalog scan + * @return A {@link CatalogScanRequest} + */ + public static CatalogScanRequest buildCatalogScanRequest() { + return CatalogScanRequest.newBuilder().build(); + } + + /** + * Creates a request for enabling/disabling the catalog janitor + * @return A {@link EnableCatalogJanitorRequest} + */ + public static EnableCatalogJanitorRequest buildEnableCatalogJanitorRequest(boolean enable) { + return EnableCatalogJanitorRequest.newBuilder().setEnable(enable).build(); + } + + /** + * Creates a request for querying the master whether the catalog janitor is enabled + * @return A {@link IsCatalogJanitorEnabledRequest} + */ + public static IsCatalogJanitorEnabledRequest buildIsCatalogJanitorEnabledRequest() { + return IsCatalogJanitorEnabledRequest.newBuilder().build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java index dc15f7f509b..8fc5ab535d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java @@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ActionResult; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.util.StringUtils; @@ -219,5 +221,21 @@ public final class ResponseConverter { return builder.build(); } + /** + * Creates a response for the catalog scan request + * @return A CatalogScanResponse + */ + public static CatalogScanResponse buildCatalogScanResponse(int numCleaned) { + return CatalogScanResponse.newBuilder().setScanResult(numCleaned).build(); + } + + /** + * Creates a response for the catalog scan request + * @return A EnableCatalogJanitorResponse + */ + public static EnableCatalogJanitorResponse buildEnableCatalogJanitorResponse(boolean prevValue) { + return EnableCatalogJanitorResponse.newBuilder().setPrevValue(prevValue).build(); + } + // End utilities for Admin } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java index 4f80e91da5e..13088d923d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java @@ -12264,6 +12264,2106 @@ public final class MasterAdminProtos { // @@protoc_insertion_point(class_scope:SetBalancerRunningResponse) } + public interface CatalogScanRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class CatalogScanRequest extends + com.google.protobuf.GeneratedMessage + implements CatalogScanRequestOrBuilder { + // Use CatalogScanRequest.newBuilder() to construct. + private CatalogScanRequest(Builder builder) { + super(builder); + } + private CatalogScanRequest(boolean noInit) {} + + private static final CatalogScanRequest defaultInstance; + public static CatalogScanRequest getDefaultInstance() { + return defaultInstance; + } + + public CatalogScanRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CatalogScanRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CatalogScanRequest_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CatalogScanRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CatalogScanRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:CatalogScanRequest) + } + + static { + defaultInstance = new CatalogScanRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:CatalogScanRequest) + } + + public interface CatalogScanResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int32 scanResult = 1; + boolean hasScanResult(); + int getScanResult(); + } + public static final class CatalogScanResponse extends + com.google.protobuf.GeneratedMessage + implements CatalogScanResponseOrBuilder { + // Use CatalogScanResponse.newBuilder() to construct. + private CatalogScanResponse(Builder builder) { + super(builder); + } + private CatalogScanResponse(boolean noInit) {} + + private static final CatalogScanResponse defaultInstance; + public static CatalogScanResponse getDefaultInstance() { + return defaultInstance; + } + + public CatalogScanResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CatalogScanResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CatalogScanResponse_fieldAccessorTable; + } + + private int bitField0_; + // optional int32 scanResult = 1; + public static final int SCANRESULT_FIELD_NUMBER = 1; + private int scanResult_; + public boolean hasScanResult() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getScanResult() { + return scanResult_; + } + + private void initFields() { + scanResult_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt32(1, scanResult_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, scanResult_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse) obj; + + boolean result = true; + result = result && (hasScanResult() == other.hasScanResult()); + if (hasScanResult()) { + result = result && (getScanResult() + == other.getScanResult()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasScanResult()) { + hash = (37 * hash) + SCANRESULT_FIELD_NUMBER; + hash = (53 * hash) + getScanResult(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CatalogScanResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CatalogScanResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + scanResult_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.scanResult_ = scanResult_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance()) return this; + if (other.hasScanResult()) { + setScanResult(other.getScanResult()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + scanResult_ = input.readInt32(); + break; + } + } + } + } + + private int bitField0_; + + // optional int32 scanResult = 1; + private int scanResult_ ; + public boolean hasScanResult() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public int getScanResult() { + return scanResult_; + } + public Builder setScanResult(int value) { + bitField0_ |= 0x00000001; + scanResult_ = value; + onChanged(); + return this; + } + public Builder clearScanResult() { + bitField0_ = (bitField0_ & ~0x00000001); + scanResult_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:CatalogScanResponse) + } + + static { + defaultInstance = new CatalogScanResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:CatalogScanResponse) + } + + public interface EnableCatalogJanitorRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool enable = 1; + boolean hasEnable(); + boolean getEnable(); + } + public static final class EnableCatalogJanitorRequest extends + com.google.protobuf.GeneratedMessage + implements EnableCatalogJanitorRequestOrBuilder { + // Use EnableCatalogJanitorRequest.newBuilder() to construct. + private EnableCatalogJanitorRequest(Builder builder) { + super(builder); + } + private EnableCatalogJanitorRequest(boolean noInit) {} + + private static final EnableCatalogJanitorRequest defaultInstance; + public static EnableCatalogJanitorRequest getDefaultInstance() { + return defaultInstance; + } + + public EnableCatalogJanitorRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_EnableCatalogJanitorRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_EnableCatalogJanitorRequest_fieldAccessorTable; + } + + private int bitField0_; + // required bool enable = 1; + public static final int ENABLE_FIELD_NUMBER = 1; + private boolean enable_; + public boolean hasEnable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getEnable() { + return enable_; + } + + private void initFields() { + enable_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasEnable()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, enable_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, enable_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest) obj; + + boolean result = true; + result = result && (hasEnable() == other.hasEnable()); + if (hasEnable()) { + result = result && (getEnable() + == other.getEnable()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasEnable()) { + hash = (37 * hash) + ENABLE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getEnable()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_EnableCatalogJanitorRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_EnableCatalogJanitorRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + enable_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.enable_ = enable_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.getDefaultInstance()) return this; + if (other.hasEnable()) { + setEnable(other.getEnable()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasEnable()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + enable_ = input.readBool(); + break; + } + } + } + } + + private int bitField0_; + + // required bool enable = 1; + private boolean enable_ ; + public boolean hasEnable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getEnable() { + return enable_; + } + public Builder setEnable(boolean value) { + bitField0_ |= 0x00000001; + enable_ = value; + onChanged(); + return this; + } + public Builder clearEnable() { + bitField0_ = (bitField0_ & ~0x00000001); + enable_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:EnableCatalogJanitorRequest) + } + + static { + defaultInstance = new EnableCatalogJanitorRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:EnableCatalogJanitorRequest) + } + + public interface EnableCatalogJanitorResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional bool prevValue = 1; + boolean hasPrevValue(); + boolean getPrevValue(); + } + public static final class EnableCatalogJanitorResponse extends + com.google.protobuf.GeneratedMessage + implements EnableCatalogJanitorResponseOrBuilder { + // Use EnableCatalogJanitorResponse.newBuilder() to construct. + private EnableCatalogJanitorResponse(Builder builder) { + super(builder); + } + private EnableCatalogJanitorResponse(boolean noInit) {} + + private static final EnableCatalogJanitorResponse defaultInstance; + public static EnableCatalogJanitorResponse getDefaultInstance() { + return defaultInstance; + } + + public EnableCatalogJanitorResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_EnableCatalogJanitorResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_EnableCatalogJanitorResponse_fieldAccessorTable; + } + + private int bitField0_; + // optional bool prevValue = 1; + public static final int PREVVALUE_FIELD_NUMBER = 1; + private boolean prevValue_; + public boolean hasPrevValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getPrevValue() { + return prevValue_; + } + + private void initFields() { + prevValue_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, prevValue_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, prevValue_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse) obj; + + boolean result = true; + result = result && (hasPrevValue() == other.hasPrevValue()); + if (hasPrevValue()) { + result = result && (getPrevValue() + == other.getPrevValue()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasPrevValue()) { + hash = (37 * hash) + PREVVALUE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getPrevValue()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_EnableCatalogJanitorResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_EnableCatalogJanitorResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + prevValue_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.prevValue_ = prevValue_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance()) return this; + if (other.hasPrevValue()) { + setPrevValue(other.getPrevValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + prevValue_ = input.readBool(); + break; + } + } + } + } + + private int bitField0_; + + // optional bool prevValue = 1; + private boolean prevValue_ ; + public boolean hasPrevValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getPrevValue() { + return prevValue_; + } + public Builder setPrevValue(boolean value) { + bitField0_ |= 0x00000001; + prevValue_ = value; + onChanged(); + return this; + } + public Builder clearPrevValue() { + bitField0_ = (bitField0_ & ~0x00000001); + prevValue_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:EnableCatalogJanitorResponse) + } + + static { + defaultInstance = new EnableCatalogJanitorResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:EnableCatalogJanitorResponse) + } + + public interface IsCatalogJanitorEnabledRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class IsCatalogJanitorEnabledRequest extends + com.google.protobuf.GeneratedMessage + implements IsCatalogJanitorEnabledRequestOrBuilder { + // Use IsCatalogJanitorEnabledRequest.newBuilder() to construct. + private IsCatalogJanitorEnabledRequest(Builder builder) { + super(builder); + } + private IsCatalogJanitorEnabledRequest(boolean noInit) {} + + private static final IsCatalogJanitorEnabledRequest defaultInstance; + public static IsCatalogJanitorEnabledRequest getDefaultInstance() { + return defaultInstance; + } + + public IsCatalogJanitorEnabledRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsCatalogJanitorEnabledRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsCatalogJanitorEnabledRequest_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsCatalogJanitorEnabledRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsCatalogJanitorEnabledRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:IsCatalogJanitorEnabledRequest) + } + + static { + defaultInstance = new IsCatalogJanitorEnabledRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:IsCatalogJanitorEnabledRequest) + } + + public interface IsCatalogJanitorEnabledResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool value = 1; + boolean hasValue(); + boolean getValue(); + } + public static final class IsCatalogJanitorEnabledResponse extends + com.google.protobuf.GeneratedMessage + implements IsCatalogJanitorEnabledResponseOrBuilder { + // Use IsCatalogJanitorEnabledResponse.newBuilder() to construct. + private IsCatalogJanitorEnabledResponse(Builder builder) { + super(builder); + } + private IsCatalogJanitorEnabledResponse(boolean noInit) {} + + private static final IsCatalogJanitorEnabledResponse defaultInstance; + public static IsCatalogJanitorEnabledResponse getDefaultInstance() { + return defaultInstance; + } + + public IsCatalogJanitorEnabledResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsCatalogJanitorEnabledResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsCatalogJanitorEnabledResponse_fieldAccessorTable; + } + + private int bitField0_; + // required bool value = 1; + public static final int VALUE_FIELD_NUMBER = 1; + private boolean value_; + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getValue() { + return value_; + } + + private void initFields() { + value_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse) obj; + + boolean result = true; + result = result && (hasValue() == other.hasValue()); + if (hasValue()) { + result = result && (getValue() + == other.getValue()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasValue()) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getValue()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsCatalogJanitorEnabledResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsCatalogJanitorEnabledResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + value_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance()) return this; + if (other.hasValue()) { + setValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasValue()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + value_ = input.readBool(); + break; + } + } + } + } + + private int bitField0_; + + // required bool value = 1; + private boolean value_ ; + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public boolean getValue() { + return value_; + } + public Builder setValue(boolean value) { + bitField0_ |= 0x00000001; + value_ = value; + onChanged(); + return this; + } + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000001); + value_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:IsCatalogJanitorEnabledResponse) + } + + static { + defaultInstance = new IsCatalogJanitorEnabledResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:IsCatalogJanitorEnabledResponse) + } + public static abstract class MasterAdminService implements com.google.protobuf.Service { protected MasterAdminService() {} @@ -12349,6 +14449,21 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest request, com.google.protobuf.RpcCallback done); + public abstract void runCatalogScan( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void enableCatalogJanitor( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void isCatalogJanitorEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -12482,6 +14597,30 @@ public final class MasterAdminProtos { impl.setBalancerRunning(controller, request, done); } + @java.lang.Override + public void runCatalogScan( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest request, + com.google.protobuf.RpcCallback done) { + impl.runCatalogScan(controller, request, done); + } + + @java.lang.Override + public void enableCatalogJanitor( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest request, + com.google.protobuf.RpcCallback done) { + impl.enableCatalogJanitor(controller, request, done); + } + + @java.lang.Override + public void isCatalogJanitorEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request, + com.google.protobuf.RpcCallback done) { + impl.isCatalogJanitorEnabled(controller, request, done); + } + }; } @@ -12536,6 +14675,12 @@ public final class MasterAdminProtos { return impl.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest)request); case 15: return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest)request); + case 16: + return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest)request); + case 17: + return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest)request); + case 18: + return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -12582,6 +14727,12 @@ public final class MasterAdminProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest.getDefaultInstance(); case 15: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest.getDefaultInstance(); + case 16: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.getDefaultInstance(); + case 18: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -12628,6 +14779,12 @@ public final class MasterAdminProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.getDefaultInstance(); case 15: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.getDefaultInstance(); + case 16: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance(); + case 18: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -12716,6 +14873,21 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest request, com.google.protobuf.RpcCallback done); + public abstract void runCatalogScan( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void enableCatalogJanitor( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void isCatalogJanitorEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -12818,6 +14990,21 @@ public final class MasterAdminProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 16: + this.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 17: + this.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 18: + this.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -12864,6 +15051,12 @@ public final class MasterAdminProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest.getDefaultInstance(); case 15: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest.getDefaultInstance(); + case 16: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.getDefaultInstance(); + case 18: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -12910,6 +15103,12 @@ public final class MasterAdminProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.getDefaultInstance(); case 15: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.getDefaultInstance(); + case 16: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance(); + case 17: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance(); + case 18: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -13170,6 +15369,51 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.getDefaultInstance())); } + + public void runCatalogScan( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(16), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance())); + } + + public void enableCatalogJanitor( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(17), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance())); + } + + public void isCatalogJanitorEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(18), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -13257,6 +15501,21 @@ public final class MasterAdminProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse runCatalogScan( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse enableCatalogJanitor( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -13457,6 +15716,42 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse runCatalogScan( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(16), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse enableCatalogJanitor( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(17), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(18), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance()); + } + } } @@ -13620,6 +15915,36 @@ public final class MasterAdminProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SetBalancerRunningResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_CatalogScanRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_CatalogScanRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_CatalogScanResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_CatalogScanResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_EnableCatalogJanitorRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_EnableCatalogJanitorRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_EnableCatalogJanitorResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_EnableCatalogJanitorResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_IsCatalogJanitorEnabledRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_IsCatalogJanitorEnabledRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_IsCatalogJanitorEnabledResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_IsCatalogJanitorEnabledResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -13663,33 +15988,45 @@ public final class MasterAdminProtos { "erRan\030\001 \002(\010\"<\n\031SetBalancerRunningRequest" + "\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"6\n\032Set" + "BalancerRunningResponse\022\030\n\020prevBalanceVa" + - "lue\030\001 \001(\0102\303\007\n\022MasterAdminService\0222\n\taddC" + - "olumn\022\021.AddColumnRequest\032\022.AddColumnResp" + - "onse\022;\n\014deleteColumn\022\024.DeleteColumnReque" + - "st\032\025.DeleteColumnResponse\022;\n\014modifyColum" + - "n\022\024.ModifyColumnRequest\032\025.ModifyColumnRe" + - "sponse\0225\n\nmoveRegion\022\022.MoveRegionRequest", - "\032\023.MoveRegionResponse\022;\n\014assignRegion\022\024." + - "AssignRegionRequest\032\025.AssignRegionRespon" + - "se\022A\n\016unassignRegion\022\026.UnassignRegionReq" + - "uest\032\027.UnassignRegionResponse\022>\n\roffline" + - "Region\022\025.OfflineRegionRequest\032\026.OfflineR" + - "egionResponse\0228\n\013deleteTable\022\023.DeleteTab" + - "leRequest\032\024.DeleteTableResponse\0228\n\013enabl" + - "eTable\022\023.EnableTableRequest\032\024.EnableTabl" + - "eResponse\022;\n\014disableTable\022\024.DisableTable" + - "Request\032\025.DisableTableResponse\0228\n\013modify", - "Table\022\023.ModifyTableRequest\032\024.ModifyTable" + - "Response\0228\n\013createTable\022\023.CreateTableReq" + - "uest\032\024.CreateTableResponse\022/\n\010shutdown\022\020" + - ".ShutdownRequest\032\021.ShutdownResponse\0225\n\ns" + - "topMaster\022\022.StopMasterRequest\032\023.StopMast" + - "erResponse\022,\n\007balance\022\017.BalanceRequest\032\020" + - ".BalanceResponse\022M\n\022setBalancerRunning\022\032" + - ".SetBalancerRunningRequest\032\033.SetBalancer" + - "RunningResponseBG\n*org.apache.hadoop.hba" + - "se.protobuf.generatedB\021MasterAdminProtos", - "H\001\210\001\001\240\001\001" + "lue\030\001 \001(\010\"\024\n\022CatalogScanRequest\")\n\023Catal" + + "ogScanResponse\022\022\n\nscanResult\030\001 \001(\005\"-\n\033En" + + "ableCatalogJanitorRequest\022\016\n\006enable\030\001 \002(" + + "\010\"1\n\034EnableCatalogJanitorResponse\022\021\n\tpre" + + "vValue\030\001 \001(\010\" \n\036IsCatalogJanitorEnabledR" + + "equest\"0\n\037IsCatalogJanitorEnabledRespons", + "e\022\r\n\005value\030\001 \002(\0102\263\t\n\022MasterAdminService\022" + + "2\n\taddColumn\022\021.AddColumnRequest\032\022.AddCol" + + "umnResponse\022;\n\014deleteColumn\022\024.DeleteColu" + + "mnRequest\032\025.DeleteColumnResponse\022;\n\014modi" + + "fyColumn\022\024.ModifyColumnRequest\032\025.ModifyC" + + "olumnResponse\0225\n\nmoveRegion\022\022.MoveRegion" + + "Request\032\023.MoveRegionResponse\022;\n\014assignRe" + + "gion\022\024.AssignRegionRequest\032\025.AssignRegio" + + "nResponse\022A\n\016unassignRegion\022\026.UnassignRe" + + "gionRequest\032\027.UnassignRegionResponse\022>\n\r", + "offlineRegion\022\025.OfflineRegionRequest\032\026.O" + + "fflineRegionResponse\0228\n\013deleteTable\022\023.De" + + "leteTableRequest\032\024.DeleteTableResponse\0228" + + "\n\013enableTable\022\023.EnableTableRequest\032\024.Ena" + + "bleTableResponse\022;\n\014disableTable\022\024.Disab" + + "leTableRequest\032\025.DisableTableResponse\0228\n" + + "\013modifyTable\022\023.ModifyTableRequest\032\024.Modi" + + "fyTableResponse\0228\n\013createTable\022\023.CreateT" + + "ableRequest\032\024.CreateTableResponse\022/\n\010shu" + + "tdown\022\020.ShutdownRequest\032\021.ShutdownRespon", + "se\0225\n\nstopMaster\022\022.StopMasterRequest\032\023.S" + + "topMasterResponse\022,\n\007balance\022\017.BalanceRe" + + "quest\032\020.BalanceResponse\022M\n\022setBalancerRu" + + "nning\022\032.SetBalancerRunningRequest\032\033.SetB" + + "alancerRunningResponse\022;\n\016runCatalogScan" + + "\022\023.CatalogScanRequest\032\024.CatalogScanRespo" + + "nse\022S\n\024enableCatalogJanitor\022\034.EnableCata" + + "logJanitorRequest\032\035.EnableCatalogJanitor" + + "Response\022\\\n\027isCatalogJanitorEnabled\022\037.Is" + + "CatalogJanitorEnabledRequest\032 .IsCatalog", + "JanitorEnabledResponseBG\n*org.apache.had" + + "oop.hbase.protobuf.generatedB\021MasterAdmi" + + "nProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -13952,6 +16289,54 @@ public final class MasterAdminProtos { new java.lang.String[] { "PrevBalanceValue", }, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.Builder.class); + internal_static_CatalogScanRequest_descriptor = + getDescriptor().getMessageTypes().get(32); + internal_static_CatalogScanRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CatalogScanRequest_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.Builder.class); + internal_static_CatalogScanResponse_descriptor = + getDescriptor().getMessageTypes().get(33); + internal_static_CatalogScanResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CatalogScanResponse_descriptor, + new java.lang.String[] { "ScanResult", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.Builder.class); + internal_static_EnableCatalogJanitorRequest_descriptor = + getDescriptor().getMessageTypes().get(34); + internal_static_EnableCatalogJanitorRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_EnableCatalogJanitorRequest_descriptor, + new java.lang.String[] { "Enable", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.Builder.class); + internal_static_EnableCatalogJanitorResponse_descriptor = + getDescriptor().getMessageTypes().get(35); + internal_static_EnableCatalogJanitorResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_EnableCatalogJanitorResponse_descriptor, + new java.lang.String[] { "PrevValue", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.Builder.class); + internal_static_IsCatalogJanitorEnabledRequest_descriptor = + getDescriptor().getMessageTypes().get(36); + internal_static_IsCatalogJanitorEnabledRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_IsCatalogJanitorEnabledRequest_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.Builder.class); + internal_static_IsCatalogJanitorEnabledResponse_descriptor = + getDescriptor().getMessageTypes().get(37); + internal_static_IsCatalogJanitorEnabledResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_IsCatalogJanitorEnabledResponse_descriptor, + new java.lang.String[] { "Value", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.Builder.class); return null; } }; diff --git a/hbase-server/src/main/protobuf/MasterAdmin.proto b/hbase-server/src/main/protobuf/MasterAdmin.proto index 00859145432..f3e193ed58e 100644 --- a/hbase-server/src/main/protobuf/MasterAdmin.proto +++ b/hbase-server/src/main/protobuf/MasterAdmin.proto @@ -154,6 +154,28 @@ message SetBalancerRunningResponse { optional bool prevBalanceValue = 1; } +message CatalogScanRequest { +} + +message CatalogScanResponse { + optional int32 scanResult = 1; +} + +message EnableCatalogJanitorRequest { + required bool enable = 1; +} + +message EnableCatalogJanitorResponse { + optional bool prevValue = 1; +} + +message IsCatalogJanitorEnabledRequest { +} + +message IsCatalogJanitorEnabledResponse { + required bool value = 1; +} + service MasterAdminService { /** Adds a column to the specified table. */ rpc addColumn(AddColumnRequest) @@ -236,4 +258,19 @@ service MasterAdminService { rpc setBalancerRunning(SetBalancerRunningRequest) returns(SetBalancerRunningResponse); + /** Get a run of the catalog janitor */ + rpc runCatalogScan(CatalogScanRequest) + returns(CatalogScanResponse); + + /** + * Enable the catalog janitor on or off. + */ + rpc enableCatalogJanitor(EnableCatalogJanitorRequest) + returns(EnableCatalogJanitorResponse); + + /** + * Query whether the catalog janitor is enabled. + */ + rpc isCatalogJanitorEnabled(IsCatalogJanitorEnabledRequest) + returns(IsCatalogJanitorEnabledResponse); } diff --git a/hbase-server/src/main/ruby/hbase/admin.rb b/hbase-server/src/main/ruby/hbase/admin.rb index 95995f1aaa0..6239d5213ba 100644 --- a/hbase-server/src/main/ruby/hbase/admin.rb +++ b/hbase-server/src/main/ruby/hbase/admin.rb @@ -92,6 +92,27 @@ module Hbase java.lang.Boolean::valueOf(enableDisable), java.lang.Boolean::valueOf(false)) end + #---------------------------------------------------------------------------------------------- + # Request a scan of the catalog table (for garbage collection) + # Returns an int signifying the number of entries cleaned + def catalogjanitor_run() + @admin.runCatalogScan() + end + + #---------------------------------------------------------------------------------------------- + # Enable/disable the catalog janitor + # Returns previous catalog janitor switch setting. + def catalogjanitor_switch(enableDisable) + @admin.enableCatalogJanitor(java.lang.Boolean::valueOf(enableDisable)) + end + + #---------------------------------------------------------------------------------------------- + # Query on the catalog janitor state (enabled/disabled?) + # Returns catalog janitor state (true signifies enabled). + def catalogjanitor_enabled() + @admin.isCatalogJanitorEnabled() + end + #---------------------------------------------------------------------------------------------- # Enables a table def enable(table_name) diff --git a/hbase-server/src/main/ruby/shell.rb b/hbase-server/src/main/ruby/shell.rb index b3cd581e6bc..d02096920c9 100644 --- a/hbase-server/src/main/ruby/shell.rb +++ b/hbase-server/src/main/ruby/shell.rb @@ -287,6 +287,9 @@ Shell.load_command_group( unassign zk_dump hlog_roll + catalogjanitor_run + catalogjanitor_switch + catalogjanitor_enabled ] ) diff --git a/hbase-server/src/main/ruby/shell/commands/catalogjanitor_enabled.rb b/hbase-server/src/main/ruby/shell/commands/catalogjanitor_enabled.rb new file mode 100644 index 00000000000..b310c3a7ab0 --- /dev/null +++ b/hbase-server/src/main/ruby/shell/commands/catalogjanitor_enabled.rb @@ -0,0 +1,40 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class CatalogjanitorEnabled < Command + def help + return <<-EOF +Query for the CatalogJanitor state (enabled/disabled?) +Examples: + + hbase> catalogjanitor_enabled +EOF + end + + def command() + format_simple_command do + formatter.row([ + admin.catalogjanitor_enabled()? "true" : "false" + ]) + end + end + end + end +end diff --git a/hbase-server/src/main/ruby/shell/commands/catalogjanitor_run.rb b/hbase-server/src/main/ruby/shell/commands/catalogjanitor_run.rb new file mode 100644 index 00000000000..03426cb4f14 --- /dev/null +++ b/hbase-server/src/main/ruby/shell/commands/catalogjanitor_run.rb @@ -0,0 +1,37 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class CatalogjanitorRun < Command + def help + return <<-EOF +Catalog janitor command to run the (garbage collection) scan from command line. + + hbase> catalogjanitor_run + +EOF + end + def command() + format_simple_command do + admin.catalogjanitor_run() + end + end + end + end +end diff --git a/hbase-server/src/main/ruby/shell/commands/catalogjanitor_switch.rb b/hbase-server/src/main/ruby/shell/commands/catalogjanitor_switch.rb new file mode 100644 index 00000000000..fce1925eecb --- /dev/null +++ b/hbase-server/src/main/ruby/shell/commands/catalogjanitor_switch.rb @@ -0,0 +1,41 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class CatalogjanitorSwitch < Command + def help + return <<-EOF +Enable/Disable CatalogJanitor. Returns previous CatalogJanitor state. +Examples: + + hbase> catalogjanitor_switch true + hbase> catalogjanitor_switch false +EOF + end + + def command(enableDisable) + format_simple_command do + formatter.row([ + admin.catalogjanitor_switch(enableDisable)? "true" : "false" + ]) + end + end + end + end +end