HDFS-9647. DiskBalancer: Add getRuntimeSettings. (Contributed by Anu Engineer)

This commit is contained in:
Arpit Agarwal 2016-01-29 11:05:53 -08:00
parent 96fe685b7a
commit 66f0bb646d
8 changed files with 130 additions and 8 deletions

View File

@ -183,4 +183,14 @@ public interface ClientDatanodeProtocol {
* Gets the status of an executing diskbalancer Plan.
*/
WorkStatus queryDiskBalancerPlan() throws IOException;
/**
* Gets a run-time configuration value from running diskbalancer instance.
* For example : Disk Balancer bandwidth of a running instance.
*
* @param key runtime configuration key
* @return value of the key as a string.
* @throws IOException - Throws if there is no such key
*/
String getDiskBalancerSetting(String key) throws IOException;
}

View File

@ -56,6 +56,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.Submit
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.WorkStatus;
import org.apache.hadoop.ipc.ProtobufHelper;
@ -365,8 +367,8 @@ public class ClientDatanodeProtocolTranslatorPB implements
/**
* Cancels an executing disk balancer plan.
* @param planID - A SHA512 hash of the plan string.
*
* @param planID - A SHA512 hash of the plan string.
* @throws IOException on error
*/
@Override
@ -399,4 +401,17 @@ public class ClientDatanodeProtocolTranslatorPB implements
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public String getDiskBalancerSetting(String key) throws IOException {
try {
DiskBalancerSettingRequestProto request =
DiskBalancerSettingRequestProto.newBuilder().setKey(key).build();
DiskBalancerSettingResponseProto response =
rpcProxy.getDiskBalancerSetting(NULL_CONTROLLER, request);
return response.hasValue() ? response.getValue() : null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
}

View File

@ -197,7 +197,21 @@ message QueryPlanStatusResponseProto {
optional string status = 2;
optional string planID = 3;
optional string currentStatus = 4;
}
/**
* This message sends a request to data node get a specific setting
* that is used by disk balancer.
*/
message DiskBalancerSettingRequestProto {
required string key = 1;
}
/**
* Response that describes the value of requested disk balancer setting.
*/
message DiskBalancerSettingResponseProto {
required string value = 1;
}
/**
@ -275,4 +289,9 @@ service ClientDatanodeProtocolService {
*/
rpc queryDiskBalancerPlan(QueryPlanStatusRequestProto)
returns (QueryPlanStatusResponseProto);
/**
* Gets run-time settings of Disk Balancer.
*/
rpc getDiskBalancerSetting(DiskBalancerSettingRequestProto)
returns(DiskBalancerSettingResponseProto);
}

View File

@ -21,3 +21,6 @@ HDFS-1312 Change Log
HDFS-9645. DiskBalancer: Add Query RPC. (Anu Engineer via Arpit Agarwal)
HDFS-9647. DiskBalancer: Add getRuntimeSettings. (Anu Engineer
via Arpit Agarwal)

View File

@ -53,7 +53,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.Cancel
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.hdfs.server.datanode.WorkStatus;
@ -279,7 +280,7 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
try {
impl.cancelDiskBalancePlan(request.getPlanID());
return CancelPlanResponseProto.newBuilder().build();
}catch (Exception e) {
} catch (Exception e) {
throw new ServiceException(e);
}
}
@ -289,7 +290,7 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
*/
@Override
public QueryPlanStatusResponseProto queryDiskBalancerPlan(
RpcController controller, QueryPlanStatusRequestProto request)
RpcController controller, QueryPlanStatusRequestProto request)
throws ServiceException {
try {
WorkStatus result = impl.queryDiskBalancerPlan();
@ -304,4 +305,21 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
throw new ServiceException(e);
}
}
/**
* Returns a run-time setting from diskbalancer like Bandwidth.
*/
@Override
public DiskBalancerSettingResponseProto getDiskBalancerSetting(
RpcController controller, DiskBalancerSettingRequestProto request)
throws ServiceException {
try {
String val = impl.getDiskBalancerSetting(request.getKey());
return DiskBalancerSettingResponseProto.newBuilder()
.setValue(val)
.build();
} catch (Exception e) {
throw new ServiceException(e);
}
}
}

View File

@ -3322,4 +3322,18 @@ public class DataNode extends ReconfigurableBase
checkSuperuserPrivilege();
throw new DiskbalancerException("Not Implemented", 0);
}
/**
* Gets a run-time configuration value from running diskbalancer instance. For
* example : Disk Balancer bandwidth of a running instance.
*
* @param key - String that represents the run time key value.
* @return value of the key as a string.
* @throws IOException - Throws if there is no such key
*/
@Override
public String getDiskBalancerSetting(String key) throws IOException {
checkSuperuserPrivilege();
throw new DiskbalancerException("Not Implemented", 0);
}
}

View File

@ -0,0 +1,35 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.diskbalancer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Constants used by Disk Balancer.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class DiskBalancerConstants {
public static final String DISKBALANCER_BANDWIDTH = "DiskBalancerBandwidth";
public static final String DISKBALANCER_VOLUME_NAME =
"DiskBalancerVolumeName";
// never constructed.
private DiskBalancerConstants() {
}
}

View File

@ -60,7 +60,7 @@ public class TestDiskBalancerRPC {
}
@Test
public void TestSubmitTestRpc() throws Exception {
public void testSubmitTestRpc() throws Exception {
final int dnIndex = 0;
cluster.restartDataNode(dnIndex);
cluster.waitActive();
@ -91,7 +91,7 @@ public class TestDiskBalancerRPC {
}
@Test
public void TestCancelTestRpc() throws Exception {
public void testCancelTestRpc() throws Exception {
final int dnIndex = 0;
cluster.restartDataNode(dnIndex);
cluster.waitActive();
@ -122,11 +122,10 @@ public class TestDiskBalancerRPC {
}
thrown.expect(DiskbalancerException.class);
dataNode.cancelDiskBalancePlan(planHash);
}
@Test
public void TestQueryTestRpc() throws Exception {
public void testQueryTestRpc() throws Exception {
final int dnIndex = 0;
cluster.restartDataNode(dnIndex);
cluster.waitActive();
@ -162,4 +161,13 @@ public class TestDiskBalancerRPC {
thrown.expect(DiskbalancerException.class);
dataNode.queryDiskBalancerPlan();
}
@Test
public void testgetDiskBalancerSetting() throws Exception {
final int dnIndex = 0;
DataNode dataNode = cluster.getDataNodes().get(dnIndex);
thrown.expect(DiskbalancerException.class);
dataNode.getDiskBalancerSetting(
DiskBalancerConstants.DISKBALANCER_BANDWIDTH);
}
}