HDFS-6780. Batch the encryption zones listing API. (wang)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1615189 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Wang 2014-08-01 18:47:01 +00:00
parent 3d9e39e51f
commit 70c99278a9
20 changed files with 342 additions and 111 deletions

View File

@ -72,6 +72,8 @@ fs-encryption (Unreleased)
HDFS-6692. Add more HDFS encryption tests. (wang)
HDFS-6780. Batch the encryption zones listing API. (wang)
OPTIMIZATIONS
BUG FIXES

View File

@ -150,6 +150,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -2857,13 +2858,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
}
public List<EncryptionZone> listEncryptionZones() throws IOException {
public RemoteIterator<EncryptionZone> listEncryptionZones()
throws IOException {
checkOpen();
try {
return namenode.listEncryptionZones();
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class);
}
return new EncryptionZoneIterator(namenode);
}
public void setXAttr(String src, String name, byte[] value,

View File

@ -566,6 +566,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";
public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class";
public static final int DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT = 100;
public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses";
// Journal-node related configs. These are read on the JN side.
public static final String DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir";

View File

@ -1805,7 +1805,8 @@ public class DistributedFileSystem extends FileSystem {
}
/* HDFS only */
public List<EncryptionZone> listEncryptionZones() throws IOException {
public RemoteIterator<EncryptionZone> listEncryptionZones()
throws IOException {
return dfs.listEncryptionZones();
}

View File

@ -248,16 +248,17 @@ public class HdfsAdmin {
}
/**
* Return a list of all {@link EncryptionZone}s in the HDFS hierarchy which
* are visible to the caller. If the caller is an HDFS superuser,
* then the key name of each encryption zone will also be provided.
*
* @throws IOException if there was a general IO exception
*
* @return List<EncryptionZone> the list of Encryption Zones that the caller has
* access to.
* Returns a RemoteIterator which can be used to list the encryption zones
* in HDFS. For large numbers of encryption zones, the iterator will fetch
* the list of zones in a number of small batches.
* <p/>
* Since the list is fetched in batches, it does not represent a
* consistent snapshot of the entire list of encryption zones.
* <p/>
* This method can only be called by HDFS superusers.
*/
public List<EncryptionZone> listEncryptionZones() throws IOException {
public RemoteIterator<EncryptionZone> listEncryptionZones()
throws IOException {
return dfs.listEncryptionZones();
}
}

View File

@ -1275,16 +1275,15 @@ public interface ClientProtocol {
throws IOException;
/**
* Return a list of all {@EncryptionZone}s in the HDFS hierarchy which are
* visible to the caller. If the caller is the HDFS admin, then the returned
* EncryptionZone instances will have the key id field filled in. If the
* caller is not the HDFS admin, then the EncryptionZone instances will only
* have the path field filled in and only those zones that are visible to the
* user are returned.
* Used to implement cursor-based batched listing of {@EncryptionZone}s.
*
* @param prevId ID of the last item in the previous batch. If there is no
* previous batch, a negative value can be used.
* @return Batch of encryption zones.
*/
@Idempotent
public List<EncryptionZone> listEncryptionZones()
throws IOException;
public BatchedEntries<EncryptionZoneWithId> listEncryptionZones(
long prevId) throws IOException;
/**
* Set xattr of a file or directory.

View File

@ -0,0 +1,51 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.RemoteIterator;
/**
* EncryptionZoneIterator is a remote iterator that iterates over encryption
* zones. It supports retrying in case of namenode failover.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class EncryptionZoneIterator implements RemoteIterator<EncryptionZone> {
private final EncryptionZoneWithIdIterator iterator;
public EncryptionZoneIterator(ClientProtocol namenode) {
iterator = new EncryptionZoneWithIdIterator(namenode);
}
@Override
public boolean hasNext() throws IOException {
return iterator.hasNext();
}
@Override
public EncryptionZone next() throws IOException {
EncryptionZoneWithId ezwi = iterator.next();
return ezwi.toEncryptionZone();
}
}

View File

@ -0,0 +1,64 @@
package org.apache.hadoop.hdfs.protocol;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Internal class similar to an {@link EncryptionZone} which also holds a
* unique id. Used to implement batched listing of encryption zones.
*/
@InterfaceAudience.Private
public class EncryptionZoneWithId extends EncryptionZone {
final long id;
public EncryptionZoneWithId(String path, String keyName, long id) {
super(path, keyName);
this.id = id;
}
public long getId() {
return id;
}
EncryptionZone toEncryptionZone() {
return new EncryptionZone(getPath(), getKeyName());
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 29)
.append(super.hashCode())
.append(id)
.toHashCode();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
EncryptionZoneWithId that = (EncryptionZoneWithId) o;
if (id != that.id) {
return false;
}
return true;
}
@Override
public String toString() {
return "EncryptionZoneWithId [" +
"id=" + id +
", " + super.toString() +
']';
}
}

View File

@ -0,0 +1,53 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.BatchedRemoteIterator;
/**
* Used on the client-side to iterate over the list of encryption zones
* stored on the namenode.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class EncryptionZoneWithIdIterator
extends BatchedRemoteIterator<Long, EncryptionZoneWithId> {
private final ClientProtocol namenode;
EncryptionZoneWithIdIterator(ClientProtocol namenode) {
super(Long.valueOf(0));
this.namenode = namenode;
}
@Override
public BatchedEntries<EncryptionZoneWithId> makeRequest(Long prevId)
throws IOException {
return namenode.listEncryptionZones(prevId);
}
@Override
public Long elementToPrevKey(EncryptionZoneWithId entry) {
return entry.getId();
}
}

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -1317,7 +1318,15 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
RpcController controller, ListEncryptionZonesRequestProto req)
throws ServiceException {
try {
return PBHelper.convertListEZResponse(server.listEncryptionZones());
BatchedEntries<EncryptionZoneWithId> entries = server
.listEncryptionZones(req.getId());
ListEncryptionZonesResponseProto.Builder builder =
ListEncryptionZonesResponseProto.newBuilder();
builder.setHasMore(entries.hasMore());
for (int i=0; i<entries.size(); i++) {
builder.addZones(PBHelper.convert(entries.get(i)));
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}

View File

@ -24,6 +24,7 @@ import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import com.google.common.collect.Lists;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.crypto.CipherSuite;
@ -52,7 +53,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@ -146,6 +147,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos;
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
@ -174,6 +176,11 @@ import org.apache.hadoop.security.token.Token;
import com.google.protobuf.ByteString;
import com.google.protobuf.ServiceException;
import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
.EncryptionZoneWithIdProto;
/**
* This class forwards NN's ClientProtocol calls as RPC calls to the NN server
* while translating from the parameter types used in ClientProtocol to the
@ -1317,11 +1324,22 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
@Override
public List<EncryptionZone> listEncryptionZones() throws IOException {
public BatchedEntries<EncryptionZoneWithId> listEncryptionZones(long id)
throws IOException {
final ListEncryptionZonesRequestProto req =
ListEncryptionZonesRequestProto.newBuilder().build();
ListEncryptionZonesRequestProto.newBuilder()
.setId(id)
.build();
try {
return PBHelper.convert(rpcProxy.listEncryptionZones(null, req));
EncryptionZonesProtos.ListEncryptionZonesResponseProto response =
rpcProxy.listEncryptionZones(null, req);
List<EncryptionZoneWithId> elements =
Lists.newArrayListWithCapacity(response.getZonesCount());
for (EncryptionZoneWithIdProto p : response.getZonesList()) {
elements.add(PBHelper.convert(p));
}
return new BatchedListEntries<EncryptionZoneWithId>(elements,
response.getHasMore());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.hdfs.protocolPB;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
.EncryptionZoneWithIdProto;
import java.io.EOFException;
import java.io.IOException;
@ -59,7 +61,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdfs.protocol.FsAclPermission;
@ -111,8 +113,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCom
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@ -2265,45 +2265,6 @@ public class PBHelper {
return xAttrs;
}
public static List<EncryptionZone> convert(ListEncryptionZonesResponseProto a) {
final List<EncryptionZoneProto> ezs = a.getPathsAndKeysList();
return convertEZ(ezs);
}
public static ListEncryptionZonesResponseProto convertListEZResponse(
List<EncryptionZone> ezs) {
final ListEncryptionZonesResponseProto.Builder builder =
ListEncryptionZonesResponseProto.newBuilder();
builder.addAllPathsAndKeys(convertEZProto(ezs));
return builder.build();
}
public static List<EncryptionZoneProto> convertEZProto(
List<EncryptionZone> ezs) {
final ArrayList<EncryptionZoneProto> ret =
Lists.newArrayListWithCapacity(ezs.size());
for (EncryptionZone a : ezs) {
final EncryptionZoneProto.Builder builder =
EncryptionZoneProto.newBuilder();
builder.setPath(a.getPath());
builder.setKeyName(a.getKeyName());
ret.add(builder.build());
}
return ret;
}
public static List<EncryptionZone> convertEZ(
List<EncryptionZoneProto> ezs) {
final ArrayList<EncryptionZone> ret =
Lists.newArrayListWithCapacity(ezs.size());
for (EncryptionZoneProto a : ezs) {
final EncryptionZone ez =
new EncryptionZone(a.getPath(), a.getKeyName());
ret.add(ez);
}
return ret;
}
public static List<XAttr> convert(GetXAttrsResponseProto a) {
List<XAttrProto> xAttrs = a.getXAttrsList();
return convertXAttrs(xAttrs);
@ -2334,6 +2295,18 @@ public class PBHelper {
return builder.build();
}
public static EncryptionZoneWithIdProto convert(EncryptionZoneWithId zone) {
return EncryptionZoneWithIdProto.newBuilder()
.setId(zone.getId())
.setKeyName(zone.getKeyName())
.setPath(zone.getPath()).build();
}
public static EncryptionZoneWithId convert(EncryptionZoneWithIdProto proto) {
return new EncryptionZoneWithId(proto.getPath(), proto.getKeyName(),
proto.getId());
}
public static ShortCircuitShmSlotProto convert(SlotId slotId) {
return ShortCircuitShmSlotProto.newBuilder().
setShmId(convert(slotId.getShmId())).

View File

@ -2,22 +2,25 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants
.CRYPTO_XATTR_ENCRYPTION_ZONE;
@ -57,17 +60,26 @@ public class EncryptionZoneManager {
}
private final Map<Long, EncryptionZoneInt> encryptionZones;
private final TreeMap<Long, EncryptionZoneInt> encryptionZones;
private final FSDirectory dir;
private final int maxListEncryptionZonesResponses;
/**
* Construct a new EncryptionZoneManager.
*
* @param dir Enclosing FSDirectory
*/
public EncryptionZoneManager(FSDirectory dir) {
public EncryptionZoneManager(FSDirectory dir, Configuration conf) {
this.dir = dir;
encryptionZones = new HashMap<Long, EncryptionZoneInt>();
encryptionZones = new TreeMap<Long, EncryptionZoneInt>();
maxListEncryptionZonesResponses = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT
);
Preconditions.checkArgument(maxListEncryptionZonesResponses >= 0,
DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES + " " +
"must be a positive integer."
);
}
/**
@ -236,17 +248,30 @@ public class EncryptionZoneManager {
}
/**
* Return the current list of encryption zones.
* Cursor-based listing of encryption zones.
* <p/>
* Called while holding the FSDirectory lock.
*/
List<EncryptionZone> listEncryptionZones() throws IOException {
BatchedListEntries<EncryptionZoneWithId> listEncryptionZones(long prevId)
throws IOException {
assert dir.hasReadLock();
final List<EncryptionZone> ret =
Lists.newArrayListWithExpectedSize(encryptionZones.size());
for (EncryptionZoneInt ezi : encryptionZones.values()) {
ret.add(new EncryptionZone(getFullPathName(ezi), ezi.getKeyName()));
NavigableMap<Long, EncryptionZoneInt> tailMap = encryptionZones.tailMap
(prevId, false);
final int numResponses = Math.min(maxListEncryptionZonesResponses,
tailMap.size());
final List<EncryptionZoneWithId> zones =
Lists.newArrayListWithExpectedSize(numResponses);
int count = 0;
for (EncryptionZoneInt ezi : tailMap.values()) {
zones.add(new EncryptionZoneWithId(getFullPathName(ezi),
ezi.getKeyName(), ezi.getINodeId()));
count++;
if (count >= numResponses) {
break;
}
return ret;
}
final boolean hasMore = (numResponses < tailMap.size());
return new BatchedListEntries<EncryptionZoneWithId>(zones, hasMore);
}
}

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO;
import static org.apache.hadoop.util.Time.now;
@ -58,7 +59,7 @@ import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.FsAclPermission;
@ -227,7 +228,7 @@ public class FSDirectory implements Closeable {
nameCache = new NameCache<ByteArray>(threshold);
namesystem = ns;
ezManager = new EncryptionZoneManager(this);
ezManager = new EncryptionZoneManager(this, conf);
}
private FSNamesystem getFSNamesystem() {
@ -2646,10 +2647,11 @@ public class FSDirectory implements Closeable {
}
}
List<EncryptionZone> listEncryptionZones() throws IOException {
BatchedListEntries<EncryptionZoneWithId> listEncryptionZones(long prevId)
throws IOException {
readLock();
try {
return ezManager.listEncryptionZones();
return ezManager.listEncryptionZones(prevId);
} finally {
readUnlock();
}

View File

@ -183,6 +183,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -8559,7 +8560,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
logAuditEvent(true, "createEncryptionZone", srcArg, null, resultingStat);
}
List<EncryptionZone> listEncryptionZones() throws IOException {
BatchedListEntries<EncryptionZoneWithId> listEncryptionZones(long prevId)
throws IOException {
boolean success = false;
checkSuperuserPrivilege();
checkOperation(OperationCategory.READ);
@ -8567,7 +8569,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
try {
checkSuperuserPrivilege();
checkOperation(OperationCategory.READ);
final List<EncryptionZone> ret = dir.listEncryptionZones();
final BatchedListEntries<EncryptionZoneWithId> ret =
dir.listEncryptionZones(prevId);
success = true;
return ret;
} finally {

View File

@ -77,7 +77,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -1432,8 +1432,9 @@ class NameNodeRpcServer implements NamenodeProtocols {
}
@Override
public List<EncryptionZone> listEncryptionZones() throws IOException {
return namesystem.listEncryptionZones();
public BatchedEntries<EncryptionZoneWithId> listEncryptionZones(
long prevId) throws IOException {
return namesystem.listEncryptionZones(prevId);
}
@Override

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.tools.TableListing;
@ -201,8 +202,9 @@ public class CryptoAdmin extends Configured implements Tool {
final TableListing listing = new TableListing.Builder()
.addField("").addField("", true)
.wrapWidth(MAX_LINE_WIDTH).hideHeaders().build();
final List<EncryptionZone> ezs = dfs.listEncryptionZones();
for (EncryptionZone ez : ezs) {
final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
while (it.hasNext()) {
EncryptionZone ez = it.next();
listing.addRow(ez.getPath(), ez.getKeyName());
}
System.out.println(listing.toString());

View File

@ -42,13 +42,16 @@ message CreateEncryptionZoneResponseProto {
}
message ListEncryptionZonesRequestProto {
required int64 id = 1;
}
message EncryptionZoneProto {
message EncryptionZoneWithIdProto {
required string path = 1;
required string keyName = 2;
required int64 id = 3;
}
message ListEncryptionZonesResponseProto {
repeated EncryptionZoneProto pathsAndKeys = 1;
repeated EncryptionZoneWithIdProto zones = 1;
required bool hasMore = 2;
}

View File

@ -2052,4 +2052,13 @@
</description>
</property>
<property>
<name>dfs.namenode.list.encryption.zones.num.responses</name>
<value>false</value>
<description>When listing encryption zones, the maximum number of zones
that will be returned in a batch. Fetching the list incrementally in
batches improves namenode performance.
</description>
</property>
</configuration>

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FileSystemTestWrapper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
@ -90,6 +91,9 @@ public class TestEncryptionZones {
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks"
);
// Lower the batch size for testing
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
fs = cluster.getFileSystem();
@ -114,9 +118,13 @@ public class TestEncryptionZones {
}
public void assertNumZones(final int numZones) throws IOException {
final List<EncryptionZone> zones = dfsAdmin.listEncryptionZones();
assertEquals("Unexpected number of encryption zones!", numZones,
zones.size());
RemoteIterator<EncryptionZone> it = dfsAdmin.listEncryptionZones();
int count = 0;
while (it.hasNext()) {
count++;
it.next();
}
assertEquals("Unexpected number of encryption zones!", numZones, count);
}
/**
@ -126,9 +134,10 @@ public class TestEncryptionZones {
* @throws IOException if a matching zone could not be found
*/
public void assertZonePresent(String keyName, String path) throws IOException {
final List<EncryptionZone> zones = dfsAdmin.listEncryptionZones();
final RemoteIterator<EncryptionZone> it = dfsAdmin.listEncryptionZones();
boolean match = false;
for (EncryptionZone zone : zones) {
while (it.hasNext()) {
EncryptionZone zone = it.next();
boolean matchKey = (keyName == null);
boolean matchPath = (path == null);
if (keyName != null && zone.getKeyName().equals(keyName)) {
@ -282,6 +291,16 @@ public class TestEncryptionZones {
dfsAdmin.createEncryptionZone(deepZone, TEST_KEY);
assertNumZones(++numZones);
assertZonePresent(null, deepZone.toString());
// Create and list some zones to test batching of listEZ
for (int i=1; i<6; i++) {
final Path zonePath = new Path("/listZone" + i);
fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
dfsAdmin.createEncryptionZone(zonePath, TEST_KEY);
numZones++;
assertNumZones(numZones);
assertZonePresent(null, zonePath.toString());
}
}
/**
@ -369,9 +388,8 @@ public class TestEncryptionZones {
// Read them back in and compare byte-by-byte
verifyFilesEqual(fs, baseFile, encFile1, len);
// Roll the key of the encryption zone
List<EncryptionZone> zones = dfsAdmin.listEncryptionZones();
assertEquals("Expected 1 EZ", 1, zones.size());
String keyName = zones.get(0).getKeyName();
assertNumZones(1);
String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
cluster.getNamesystem().getProvider().rollNewVersion(keyName);
// Read them back in and compare byte-by-byte
verifyFilesEqual(fs, baseFile, encFile1, len);
@ -457,14 +475,12 @@ public class TestEncryptionZones {
@Test(timeout = 120000)
public void testCreateEZWithNoProvider() throws Exception {
// Unset the key provider and make sure EZ ops don't work
final Configuration clusterConf = cluster.getConfiguration(0);
clusterConf.set(KeyProviderFactory.KEY_PROVIDER_PATH, "");
cluster.restartNameNode(true);
cluster.waitActive();
/* Test failure of create EZ on a directory that doesn't exist. */
final Path zone1 = new Path("/zone1");
/* Normal creation of an EZ */
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
try {
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
@ -476,8 +492,7 @@ public class TestEncryptionZones {
JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks"
);
// Try listing EZs as well
List<EncryptionZone> zones = dfsAdmin.listEncryptionZones();
assertEquals("Expected no zones", 0, zones.size());
assertNumZones(0);
}
private class MyInjector extends EncryptionFaultInjector {