HDFS-14983. RBF: Add dfsrouteradmin -refreshSuperUserGroupsConfiguration command option. Contributed by Xieming Li
This commit is contained in:
parent
b56c08b2b7
commit
93bb368094
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.protocolPB;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.resolver.RouterGenericManager;
|
||||||
import org.apache.hadoop.hdfs.server.federation.router.NameserviceManager;
|
import org.apache.hadoop.hdfs.server.federation.router.NameserviceManager;
|
||||||
import org.apache.hadoop.hdfs.server.federation.router.RouterStateManager;
|
import org.apache.hadoop.hdfs.server.federation.router.RouterStateManager;
|
||||||
import org.apache.hadoop.ipc.GenericRefreshProtocol;
|
import org.apache.hadoop.ipc.GenericRefreshProtocol;
|
||||||
|
@ -30,5 +31,6 @@ import org.apache.hadoop.ipc.GenericRefreshProtocol;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Stable
|
@InterfaceStability.Stable
|
||||||
public interface RouterAdminProtocol extends MountTableManager,
|
public interface RouterAdminProtocol extends MountTableManager,
|
||||||
RouterStateManager, NameserviceManager, GenericRefreshProtocol {
|
RouterStateManager, NameserviceManager, GenericRefreshProtocol,
|
||||||
|
RouterGenericManager {
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,6 +45,8 @@ import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProt
|
||||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
|
||||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
|
||||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProto;
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshSuperUserGroupsConfigurationRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshSuperUserGroupsConfigurationResponseProto;
|
||||||
import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer;
|
import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
|
||||||
|
@ -54,6 +56,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameservice
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceResponse;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.EnableNameserviceResponse;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeRequest;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeRequest;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshSuperUserGroupsConfigurationResponse;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesRequest;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDisabledNameservicesResponse;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationRequest;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationRequest;
|
||||||
|
@ -90,6 +93,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafe
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeResponsePBImpl;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeResponsePBImpl;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesRequestPBImpl;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesRequestPBImpl;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesResponsePBImpl;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesResponsePBImpl;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
|
||||||
|
@ -197,6 +201,28 @@ public class RouterAdminProtocolServerSideTranslatorPB implements
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refresh superuser proxy groups mappings.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public RefreshSuperUserGroupsConfigurationResponseProto
|
||||||
|
refreshSuperUserGroupsConfiguration(
|
||||||
|
RpcController controller,
|
||||||
|
RefreshSuperUserGroupsConfigurationRequestProto request)
|
||||||
|
throws ServiceException {
|
||||||
|
try {
|
||||||
|
boolean result = server.refreshSuperUserGroupsConfiguration();
|
||||||
|
RefreshSuperUserGroupsConfigurationResponse response =
|
||||||
|
RefreshSuperUserGroupsConfigurationResponsePBImpl.newInstance(result);
|
||||||
|
RefreshSuperUserGroupsConfigurationResponsePBImpl responsePB =
|
||||||
|
(RefreshSuperUserGroupsConfigurationResponsePBImpl) response;
|
||||||
|
return responsePB.getProto();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public EnterSafeModeResponseProto enterSafeMode(RpcController controller,
|
public EnterSafeModeResponseProto enterSafeMode(RpcController controller,
|
||||||
EnterSafeModeRequestProto request) throws ServiceException {
|
EnterSafeModeRequestProto request) throws ServiceException {
|
||||||
|
|
|
@ -42,11 +42,14 @@ import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProt
|
||||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeResponseProto;
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeResponseProto;
|
||||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesRequestProto;
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesRequestProto;
|
||||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesResponseProto;
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshSuperUserGroupsConfigurationRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshSuperUserGroupsConfigurationResponseProto;
|
||||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
|
||||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
|
||||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
|
||||||
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProto;
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProto;
|
||||||
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.resolver.RouterGenericManager;
|
||||||
import org.apache.hadoop.hdfs.server.federation.router.NameserviceManager;
|
import org.apache.hadoop.hdfs.server.federation.router.NameserviceManager;
|
||||||
import org.apache.hadoop.hdfs.server.federation.router.RouterStateManager;
|
import org.apache.hadoop.hdfs.server.federation.router.RouterStateManager;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
|
||||||
|
@ -89,6 +92,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeMo
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeResponsePBImpl;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeResponsePBImpl;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesRequestPBImpl;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesRequestPBImpl;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesResponsePBImpl;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesResponsePBImpl;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
|
||||||
|
@ -110,7 +114,8 @@ import com.google.protobuf.ServiceException;
|
||||||
@InterfaceStability.Stable
|
@InterfaceStability.Stable
|
||||||
public class RouterAdminProtocolTranslatorPB
|
public class RouterAdminProtocolTranslatorPB
|
||||||
implements ProtocolMetaInterface, MountTableManager,
|
implements ProtocolMetaInterface, MountTableManager,
|
||||||
Closeable, ProtocolTranslator, RouterStateManager, NameserviceManager {
|
Closeable, ProtocolTranslator, RouterStateManager, NameserviceManager,
|
||||||
|
RouterGenericManager {
|
||||||
final private RouterAdminProtocolPB rpcProxy;
|
final private RouterAdminProtocolPB rpcProxy;
|
||||||
|
|
||||||
public RouterAdminProtocolTranslatorPB(RouterAdminProtocolPB proxy) {
|
public RouterAdminProtocolTranslatorPB(RouterAdminProtocolPB proxy) {
|
||||||
|
@ -309,4 +314,18 @@ public class RouterAdminProtocolTranslatorPB
|
||||||
throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
|
throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean refreshSuperUserGroupsConfiguration() throws IOException {
|
||||||
|
RefreshSuperUserGroupsConfigurationRequestProto proto =
|
||||||
|
RefreshSuperUserGroupsConfigurationRequestProto.newBuilder().build();
|
||||||
|
try {
|
||||||
|
RefreshSuperUserGroupsConfigurationResponseProto response =
|
||||||
|
rpcProxy.refreshSuperUserGroupsConfiguration(null, proto);
|
||||||
|
return new RefreshSuperUserGroupsConfigurationResponsePBImpl(response)
|
||||||
|
.getStatus();
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.federation.resolver;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generic methods for managing Router.
|
||||||
|
*/
|
||||||
|
public interface RouterGenericManager {
|
||||||
|
/**
|
||||||
|
* Refresh superuser proxy groups mappings (used in RBF).
|
||||||
|
* @return true if the operation was successful.
|
||||||
|
* @throws IOException if operation was not successful.
|
||||||
|
*/
|
||||||
|
boolean refreshSuperUserGroupsConfiguration() throws IOException;
|
||||||
|
}
|
|
@ -84,6 +84,7 @@ import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB;
|
||||||
import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolServerSideTranslatorPB;
|
import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolServerSideTranslatorPB;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||||
import org.apache.hadoop.service.AbstractService;
|
import org.apache.hadoop.service.AbstractService;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -619,4 +620,10 @@ public class RouterAdminServer extends AbstractService
|
||||||
// Let the registry handle as needed
|
// Let the registry handle as needed
|
||||||
return RefreshRegistry.defaultRegistry().dispatch(identifier, args);
|
return RefreshRegistry.defaultRegistry().dispatch(identifier, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override // RouterGenericManager
|
||||||
|
public boolean refreshSuperUserGroupsConfiguration() throws IOException {
|
||||||
|
ProxyUsers.refreshSuperUserGroupsConfiguration();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB;
|
import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolTranslatorPB;
|
import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolTranslatorPB;
|
||||||
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.resolver.RouterGenericManager;
|
||||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
@ -77,6 +78,10 @@ public class RouterClient implements Closeable {
|
||||||
return proxy;
|
return proxy;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public RouterGenericManager getRouterGenericManager() {
|
||||||
|
return proxy;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void close() throws IOException {
|
public synchronized void close() throws IOException {
|
||||||
RPC.stopProxy(proxy);
|
RPC.stopProxy(proxy);
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.federation.store.protocol;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* API request for refreshing super user groups on router.
|
||||||
|
*/
|
||||||
|
public abstract class RefreshSuperUserGroupsConfigurationRequest {
|
||||||
|
public static RefreshSuperUserGroupsConfigurationRequest newInstance()
|
||||||
|
throws IOException {
|
||||||
|
return StateStoreSerializer
|
||||||
|
.newRecord(RefreshSuperUserGroupsConfigurationRequest.class);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.federation.store.protocol;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* API response for refreshing super user groups on router.
|
||||||
|
*/
|
||||||
|
public abstract class RefreshSuperUserGroupsConfigurationResponse {
|
||||||
|
|
||||||
|
public static RefreshSuperUserGroupsConfigurationResponse newInstance()
|
||||||
|
throws IOException {
|
||||||
|
return StateStoreSerializer.
|
||||||
|
newRecord(RefreshSuperUserGroupsConfigurationResponse.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static RefreshSuperUserGroupsConfigurationResponse
|
||||||
|
newInstance(boolean status) throws IOException {
|
||||||
|
RefreshSuperUserGroupsConfigurationResponse response = newInstance();
|
||||||
|
response.setStatus(status);
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Public
|
||||||
|
@Unstable
|
||||||
|
public abstract boolean getStatus();
|
||||||
|
|
||||||
|
@Public
|
||||||
|
@Unstable
|
||||||
|
public abstract void setStatus(boolean result);
|
||||||
|
}
|
|
@ -0,0 +1,66 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
|
||||||
|
|
||||||
|
import com.google.protobuf.Message;
|
||||||
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshSuperUserGroupsConfigurationRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshSuperUserGroupsConfigurationRequestProtoOrBuilder;
|
||||||
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshSuperUserGroupsConfigurationRequestProto.Builder;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshSuperUserGroupsConfigurationRequest;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protobuf implementation of the state store API object
|
||||||
|
* RefreshSuperUserGroupsConfigurationRequest.
|
||||||
|
*/
|
||||||
|
public class RefreshSuperUserGroupsConfigurationRequestPBImpl
|
||||||
|
extends RefreshSuperUserGroupsConfigurationRequest
|
||||||
|
implements PBRecord {
|
||||||
|
|
||||||
|
private FederationProtocolPBTranslator<
|
||||||
|
RefreshSuperUserGroupsConfigurationRequestProto, Builder,
|
||||||
|
RefreshSuperUserGroupsConfigurationRequestProtoOrBuilder> translator =
|
||||||
|
new FederationProtocolPBTranslator<>(
|
||||||
|
RefreshSuperUserGroupsConfigurationRequestProto.class);
|
||||||
|
|
||||||
|
public RefreshSuperUserGroupsConfigurationRequestPBImpl() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public RefreshSuperUserGroupsConfigurationRequestPBImpl(
|
||||||
|
RefreshSuperUserGroupsConfigurationRequestProto proto) {
|
||||||
|
this.translator.setProto(proto);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public RefreshSuperUserGroupsConfigurationRequestProto getProto() {
|
||||||
|
this.translator.getBuilder();
|
||||||
|
return this.translator.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setProto(Message proto) {
|
||||||
|
this.translator.setProto(proto);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void readInstance(String base64String) throws IOException {
|
||||||
|
this.translator.readInstance(base64String);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,76 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
|
||||||
|
|
||||||
|
import com.google.protobuf.Message;
|
||||||
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshSuperUserGroupsConfigurationResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshSuperUserGroupsConfigurationResponseProtoOrBuilder;
|
||||||
|
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshSuperUserGroupsConfigurationResponseProto.Builder;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshSuperUserGroupsConfigurationResponse;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protobuf implementation of the state store API object
|
||||||
|
* RefreshSuperUserGroupsConfigurationResponse.
|
||||||
|
*/
|
||||||
|
public class RefreshSuperUserGroupsConfigurationResponsePBImpl
|
||||||
|
extends RefreshSuperUserGroupsConfigurationResponse
|
||||||
|
implements PBRecord {
|
||||||
|
|
||||||
|
private FederationProtocolPBTranslator<
|
||||||
|
RefreshSuperUserGroupsConfigurationResponseProto,
|
||||||
|
Builder,
|
||||||
|
RefreshSuperUserGroupsConfigurationResponseProtoOrBuilder> translator =
|
||||||
|
new FederationProtocolPBTranslator<>(
|
||||||
|
RefreshSuperUserGroupsConfigurationResponseProto.class);
|
||||||
|
|
||||||
|
public RefreshSuperUserGroupsConfigurationResponsePBImpl() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public RefreshSuperUserGroupsConfigurationResponsePBImpl(
|
||||||
|
RefreshSuperUserGroupsConfigurationResponseProto proto) {
|
||||||
|
this.translator.setProto(proto);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public RefreshSuperUserGroupsConfigurationResponseProto getProto() {
|
||||||
|
return translator.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setProto(Message proto) {
|
||||||
|
this.translator.setProto(proto);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void readInstance(String base64String) throws IOException {
|
||||||
|
this.translator.readInstance(base64String);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean getStatus() {
|
||||||
|
return this.translator.getProtoOrBuilder().getStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setStatus(boolean result) {
|
||||||
|
this.translator.getBuilder().setStatus(result);
|
||||||
|
}
|
||||||
|
}
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
|
||||||
import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
|
import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.resolver.RouterGenericManager;
|
||||||
import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
|
import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
|
||||||
import org.apache.hadoop.hdfs.server.federation.router.NameserviceManager;
|
import org.apache.hadoop.hdfs.server.federation.router.NameserviceManager;
|
||||||
import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
|
import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
|
||||||
|
@ -127,7 +128,8 @@ public class RouterAdmin extends Configured implements Tool {
|
||||||
{"-add", "-update", "-rm", "-ls", "-getDestination",
|
{"-add", "-update", "-rm", "-ls", "-getDestination",
|
||||||
"-setQuota", "-clrQuota",
|
"-setQuota", "-clrQuota",
|
||||||
"-safemode", "-nameservice", "-getDisabledNameservices",
|
"-safemode", "-nameservice", "-getDisabledNameservices",
|
||||||
"-refresh", "-refreshRouterArgs"};
|
"-refresh", "-refreshRouterArgs",
|
||||||
|
"-refreshSuperUserGroupsConfiguration"};
|
||||||
StringBuilder usage = new StringBuilder();
|
StringBuilder usage = new StringBuilder();
|
||||||
usage.append("Usage: hdfs dfsrouteradmin :\n");
|
usage.append("Usage: hdfs dfsrouteradmin :\n");
|
||||||
for (int i = 0; i < commands.length; i++) {
|
for (int i = 0; i < commands.length; i++) {
|
||||||
|
@ -170,6 +172,8 @@ public class RouterAdmin extends Configured implements Tool {
|
||||||
return "\t[-refresh]";
|
return "\t[-refresh]";
|
||||||
} else if (cmd.equals("-refreshRouterArgs")) {
|
} else if (cmd.equals("-refreshRouterArgs")) {
|
||||||
return "\t[-refreshRouterArgs <host:ipc_port> <key> [arg1..argn]]";
|
return "\t[-refreshRouterArgs <host:ipc_port> <key> [arg1..argn]]";
|
||||||
|
} else if (cmd.equals("-refreshSuperUserGroupsConfiguration")) {
|
||||||
|
return "\t[-refreshSuperUserGroupsConfiguration]";
|
||||||
}
|
}
|
||||||
return getUsage(null);
|
return getUsage(null);
|
||||||
}
|
}
|
||||||
|
@ -203,6 +207,10 @@ public class RouterAdmin extends Configured implements Tool {
|
||||||
if (arg.length > 1) {
|
if (arg.length > 1) {
|
||||||
throw new IllegalArgumentException("No arguments allowed");
|
throw new IllegalArgumentException("No arguments allowed");
|
||||||
}
|
}
|
||||||
|
} else if (arg[0].equals("-refreshSuperUserGroupsConfiguration")) {
|
||||||
|
if (arg.length > 1) {
|
||||||
|
throw new IllegalArgumentException("No arguments allowed");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -348,6 +356,8 @@ public class RouterAdmin extends Configured implements Tool {
|
||||||
refresh(address);
|
refresh(address);
|
||||||
} else if ("-refreshRouterArgs".equals(cmd)) {
|
} else if ("-refreshRouterArgs".equals(cmd)) {
|
||||||
exitCode = genericRefresh(argv, i);
|
exitCode = genericRefresh(argv, i);
|
||||||
|
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
|
||||||
|
exitCode = refreshSuperUserGroupsConfiguration();
|
||||||
} else {
|
} else {
|
||||||
throw new IllegalArgumentException("Unknown Command: " + cmd);
|
throw new IllegalArgumentException("Unknown Command: " + cmd);
|
||||||
}
|
}
|
||||||
|
@ -387,6 +397,25 @@ public class RouterAdmin extends Configured implements Tool {
|
||||||
return exitCode;
|
return exitCode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refresh superuser proxy groups mappings on Router.
|
||||||
|
*
|
||||||
|
* @throws IOException if the operation was not successful.
|
||||||
|
*/
|
||||||
|
private int refreshSuperUserGroupsConfiguration()
|
||||||
|
throws IOException{
|
||||||
|
RouterGenericManager proxy = client.getRouterGenericManager();
|
||||||
|
String address = getConf().getTrimmed(
|
||||||
|
RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY,
|
||||||
|
RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT);
|
||||||
|
if(proxy.refreshSuperUserGroupsConfiguration()){
|
||||||
|
System.out.println(
|
||||||
|
"Successfully updated superuser proxy groups on router " + address);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
private void refresh(String address) throws IOException {
|
private void refresh(String address) throws IOException {
|
||||||
if (refreshRouterCache()) {
|
if (refreshRouterCache()) {
|
||||||
System.out.println(
|
System.out.println(
|
||||||
|
|
|
@ -242,6 +242,13 @@ message RefreshMountTableEntriesResponseProto {
|
||||||
optional bool result = 1;
|
optional bool result = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message RefreshSuperUserGroupsConfigurationRequestProto {
|
||||||
|
}
|
||||||
|
|
||||||
|
message RefreshSuperUserGroupsConfigurationResponseProto {
|
||||||
|
optional bool status = 1;
|
||||||
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////
|
/////////////////////////////////////////////////
|
||||||
// Route State
|
// Route State
|
||||||
/////////////////////////////////////////////////
|
/////////////////////////////////////////////////
|
||||||
|
|
|
@ -84,4 +84,9 @@ service RouterAdminProtocolService {
|
||||||
* Get the destination of a file/directory in the federation.
|
* Get the destination of a file/directory in the federation.
|
||||||
*/
|
*/
|
||||||
rpc getDestination(GetDestinationRequestProto) returns (GetDestinationResponseProto);
|
rpc getDestination(GetDestinationRequestProto) returns (GetDestinationResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Refresh superuser proxy groups mappings on Router.
|
||||||
|
*/
|
||||||
|
rpc refreshSuperUserGroupsConfiguration(RefreshSuperUserGroupsConfigurationRequestProto) returns (RefreshSuperUserGroupsConfigurationResponseProto);
|
||||||
}
|
}
|
|
@ -0,0 +1,198 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.federation.router;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
|
||||||
|
import org.apache.hadoop.hdfs.tools.federation.RouterAdmin;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||||
|
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||||
|
|
||||||
|
import org.apache.hadoop.test.LambdaTestUtils;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FileOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.PrintWriter;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.net.URLDecoder;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test refreshSuperUserGroupsConfiguration on Routers.
|
||||||
|
* Notice that this test is intended to test
|
||||||
|
* {@link RouterAdminServer#refreshSuperUserGroupsConfiguration}
|
||||||
|
* which invoked by {@link RouterAdmin}
|
||||||
|
*/
|
||||||
|
public class TestRouterRefreshSuperUserGroupsConfiguration {
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(
|
||||||
|
TestRouterRefreshSuperUserGroupsConfiguration.class);
|
||||||
|
|
||||||
|
private MiniRouterDFSCluster cluster;
|
||||||
|
private static final String ROUTER_NS = "rbfns";
|
||||||
|
private static final String HDFS_SCHEMA = "hdfs://";
|
||||||
|
private static final String LOOPBACK_ADDRESS = "127.0.0.1";
|
||||||
|
|
||||||
|
private String tempResource = null;
|
||||||
|
@Before
|
||||||
|
public void setUpCluster() throws Exception {
|
||||||
|
Configuration conf = new RouterConfigBuilder()
|
||||||
|
.rpc()
|
||||||
|
.admin()
|
||||||
|
.build();
|
||||||
|
cluster = new MiniRouterDFSCluster(false, 1);
|
||||||
|
cluster.addRouterOverrides(conf);
|
||||||
|
cluster.startRouters();
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
cluster = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tempResource != null) {
|
||||||
|
File f = new File(tempResource);
|
||||||
|
f.delete();
|
||||||
|
tempResource = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private Configuration initializeClientConfig() throws Exception {
|
||||||
|
|
||||||
|
Configuration conf = new Configuration(false);
|
||||||
|
Router router = cluster.getRouters().get(0).getRouter();
|
||||||
|
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns0,ns1,"+ ROUTER_NS);
|
||||||
|
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "."+ ROUTER_NS, "r1");
|
||||||
|
conf.set(RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY,
|
||||||
|
LOOPBACK_ADDRESS + ":" + router.getAdminServerAddress().getPort());
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY
|
||||||
|
+ "." + ROUTER_NS + ".r1",
|
||||||
|
LOOPBACK_ADDRESS + ":" + router.getRpcServerAddress().getPort());
|
||||||
|
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX
|
||||||
|
+ "." + ROUTER_NS,
|
||||||
|
ConfiguredFailoverProxyProvider.class.getCanonicalName());
|
||||||
|
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
|
||||||
|
HDFS_SCHEMA + ROUTER_NS);
|
||||||
|
conf.setBoolean("dfs.client.failover.random.order", false);
|
||||||
|
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, HDFS_SCHEMA + ROUTER_NS);
|
||||||
|
|
||||||
|
return conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRefreshSuperUserGroupsConfiguration()
|
||||||
|
throws Exception {
|
||||||
|
Configuration conf = initializeClientConfig();
|
||||||
|
testRefreshSuperUserGroupsConfigurationInternal(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void testRefreshSuperUserGroupsConfigurationInternal(
|
||||||
|
Configuration conf) throws Exception {
|
||||||
|
|
||||||
|
UserGroupInformation ugi = mock(UserGroupInformation.class);
|
||||||
|
UserGroupInformation impersonator = mock(UserGroupInformation.class);
|
||||||
|
|
||||||
|
// Setting for impersonator
|
||||||
|
when(impersonator.getShortUserName()).thenReturn("impersonator");
|
||||||
|
when(impersonator.getUserName()).thenReturn("impersonator");
|
||||||
|
|
||||||
|
// Setting for victim
|
||||||
|
when(ugi.getRealUser()).thenReturn(impersonator);
|
||||||
|
when(ugi.getUserName()).thenReturn("victim");
|
||||||
|
when(ugi.getGroups()).thenReturn(Arrays.asList("groupVictim"));
|
||||||
|
|
||||||
|
// Exception should be thrown before applying config
|
||||||
|
LambdaTestUtils.intercept(
|
||||||
|
AuthorizationException.class,
|
||||||
|
"User: impersonator is not allowed to impersonate victim",
|
||||||
|
() -> ProxyUsers.authorize(ugi, LOOPBACK_ADDRESS));
|
||||||
|
|
||||||
|
// refresh will look at configuration on the server side
|
||||||
|
// add additional resource with the new value
|
||||||
|
// so the server side will pick it up
|
||||||
|
String tfile = "testRouterRefreshSuperUserGroupsConfiguration_rsrc.xml";
|
||||||
|
ArrayList<String> keys = new ArrayList<>(
|
||||||
|
Arrays.asList(
|
||||||
|
"hadoop.proxyuser.impersonator.groups",
|
||||||
|
"hadoop.proxyuser.impersonator.hosts"));
|
||||||
|
ArrayList<String> values = new ArrayList<>(
|
||||||
|
Arrays.asList(
|
||||||
|
"groupVictim",
|
||||||
|
LOOPBACK_ADDRESS));
|
||||||
|
tempResource = addFileBasedConfigResource(tfile, keys, values);
|
||||||
|
Configuration.addDefaultResource(tfile);
|
||||||
|
|
||||||
|
// Mimic CLI Access
|
||||||
|
RouterAdmin routerAdmin = new RouterAdmin(conf);
|
||||||
|
int clientRes =
|
||||||
|
routerAdmin.run(new String[]{"-refreshSuperUserGroupsConfiguration"});
|
||||||
|
|
||||||
|
assertEquals("CLI command was not successful", 0, clientRes);
|
||||||
|
ProxyUsers.authorize(ugi, LOOPBACK_ADDRESS);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String addFileBasedConfigResource(String configFileName,
|
||||||
|
ArrayList<String> keyArray,
|
||||||
|
ArrayList<String> valueArray)
|
||||||
|
throws IOException {
|
||||||
|
|
||||||
|
if(keyArray.size() != valueArray.size()) {
|
||||||
|
throw new IOException("keyArray and valueArray should be equal in size");
|
||||||
|
}
|
||||||
|
|
||||||
|
URL url = new Configuration().getResource("hdfs-site.xml");
|
||||||
|
Path dir = new Path(URLDecoder.decode(url.getPath(), "UTF-8")).getParent();
|
||||||
|
String tmp = dir.toString() + "/" + configFileName;
|
||||||
|
|
||||||
|
StringBuilder configItems = new StringBuilder();
|
||||||
|
configItems.append("<configuration>");
|
||||||
|
for (int i = 0; i < keyArray.size(); i++){
|
||||||
|
configItems
|
||||||
|
.append("<property>")
|
||||||
|
.append("<name>").append(keyArray.get(i)).append("</name>")
|
||||||
|
.append("<value>").append(valueArray.get(i)).append("</value>")
|
||||||
|
.append("</property>");
|
||||||
|
}
|
||||||
|
configItems.append("</configuration>");
|
||||||
|
|
||||||
|
PrintWriter writer = new PrintWriter(new FileOutputStream(tmp));
|
||||||
|
writer.println(configItems.toString());
|
||||||
|
writer.close();
|
||||||
|
return tmp;
|
||||||
|
}
|
||||||
|
}
|
|
@ -444,6 +444,7 @@ Usage:
|
||||||
[-getDisabledNameservices]
|
[-getDisabledNameservices]
|
||||||
[-refresh]
|
[-refresh]
|
||||||
[-refreshRouterArgs <host:ipc_port> <key> [arg1..argn]]
|
[-refreshRouterArgs <host:ipc_port> <key> [arg1..argn]]
|
||||||
|
[-refreshSuperUserGroupsConfiguration]
|
||||||
|
|
||||||
| COMMAND\_OPTION | Description |
|
| COMMAND\_OPTION | Description |
|
||||||
|:---- |:---- |
|
|:---- |:---- |
|
||||||
|
@ -459,6 +460,7 @@ Usage:
|
||||||
| `-getDisabledNameservices` | Get the name services that are disabled in the federation. |
|
| `-getDisabledNameservices` | Get the name services that are disabled in the federation. |
|
||||||
| `-refresh` | Update mount table cache of the connected router. |
|
| `-refresh` | Update mount table cache of the connected router. |
|
||||||
| `refreshRouterArgs` \<host:ipc\_port\> \<key\> [arg1..argn] | To trigger a runtime-refresh of the resource specified by \<key\> on \<host:ipc\_port\>. For example, to enable white list checking, we just need to send a refresh command other than restart the router server. |
|
| `refreshRouterArgs` \<host:ipc\_port\> \<key\> [arg1..argn] | To trigger a runtime-refresh of the resource specified by \<key\> on \<host:ipc\_port\>. For example, to enable white list checking, we just need to send a refresh command other than restart the router server. |
|
||||||
|
| `-refreshSuperUserGroupsConfiguration` | Refresh superuser proxy groups mappings on Router. |
|
||||||
|
|
||||||
The commands for managing Router-based federation. See [Mount table management](../hadoop-hdfs-rbf/HDFSRouterFederation.html#Mount_table_management) for more info.
|
The commands for managing Router-based federation. See [Mount table management](../hadoop-hdfs-rbf/HDFSRouterFederation.html#Mount_table_management) for more info.
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue