HDDS-2161. Create RepeatedKeyInfo structure to be saved in deletedTable

Signed-off-by: Anu Engineer <aengineer@apache.org>
This commit is contained in:
dchitlangia 2019-09-20 18:06:30 -04:00 committed by Anu Engineer
parent 3d78b1223d
commit 3fd3d746fc
16 changed files with 324 additions and 116 deletions

View File

@ -26,9 +26,11 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.VolumeList;
import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.Table;
@ -251,7 +253,7 @@ public interface OMMetadataManager {
*
* @return Deleted Table.
*/
Table<String, OmKeyInfo> getDeletedTable();
Table<String, RepeatedOmKeyInfo> getDeletedTable();
/**
* Gets the OpenKeyTable.

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.ozone.om.codec;
import com.google.common.base.Preconditions;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.hdds.utils.db.Codec;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.RepeatedKeyInfo;
import java.io.IOException;
/**
* Codec to encode RepeatedOmKeyInfo as byte array.
*/
public class RepeatedOmKeyInfoCodec implements Codec<RepeatedOmKeyInfo> {
@Override
public byte[] toPersistedFormat(RepeatedOmKeyInfo object)
throws IOException {
Preconditions.checkNotNull(object,
"Null object can't be converted to byte array.");
return object.getProto().toByteArray();
}
@Override
public RepeatedOmKeyInfo fromPersistedFormat(byte[] rawData)
throws IOException {
Preconditions.checkNotNull(rawData,
"Null byte array can't converted to real object.");
try {
return RepeatedOmKeyInfo.getFromProto(RepeatedKeyInfo.parseFrom(rawData));
} catch (InvalidProtocolBufferException e) {
throw new IllegalArgumentException(
"Can't encode the the raw data from the byte array", e);
}
}
}

View File

@ -0,0 +1,91 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.ozone.om.helpers;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.RepeatedKeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.KeyInfo;
/**
* Args for deleted keys. This is written to om metadata deletedTable.
* Once a key is deleted, it is moved to om metadata deletedTable. Having a
* {label: List<OMKeyInfo>} ensures that if users create & delete keys with
* exact same uri multiple times, all the delete instances are bundled under
* the same key name. This is useful as part of GDPR compliance where an
* admin wants to confirm if a given key is deleted from deletedTable metadata.
*/
public class RepeatedOmKeyInfo {
private List<OmKeyInfo> omKeyInfoList;
public RepeatedOmKeyInfo(List<OmKeyInfo> omKeyInfos) {
this.omKeyInfoList = omKeyInfos;
}
public RepeatedOmKeyInfo(OmKeyInfo omKeyInfos) {
this.omKeyInfoList = new ArrayList<>();
this.omKeyInfoList.add(omKeyInfos);
}
public void addOmKeyInfo(OmKeyInfo info) {
this.omKeyInfoList.add(info);
}
public List<OmKeyInfo> getOmKeyInfoList() {
return omKeyInfoList;
}
public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo
repeatedKeyInfo) {
List<OmKeyInfo> list = new ArrayList<>();
for(KeyInfo k : repeatedKeyInfo.getKeyInfoList()) {
list.add(OmKeyInfo.getFromProtobuf(k));
}
return new RepeatedOmKeyInfo.Builder().setOmKeyInfos(list).build();
}
public RepeatedKeyInfo getProto() {
List<KeyInfo> list = new ArrayList<>();
for(OmKeyInfo k : omKeyInfoList) {
list.add(k.getProtobuf());
}
RepeatedKeyInfo.Builder builder = RepeatedKeyInfo.newBuilder()
.addAllKeyInfo(list);
return builder.build();
}
/**
* Builder of RepeatedOmKeyInfo.
*/
public static class Builder {
private List<OmKeyInfo> omKeyInfos;
public Builder(){}
public Builder setOmKeyInfos(List<OmKeyInfo> infoList) {
this.omKeyInfos = infoList;
return this;
}
public RepeatedOmKeyInfo build() {
return new RepeatedOmKeyInfo(omKeyInfos);
}
}
}

View File

@ -687,6 +687,10 @@ message KeyInfo {
repeated OzoneAclInfo acls = 13;
}
message RepeatedKeyInfo {
repeated KeyInfo keyInfo = 1;
}
message OzoneFileStatusProto {
required hadoop.fs.FileStatusProto status = 1;
}

View File

@ -85,6 +85,7 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
@ -781,9 +782,17 @@ public class KeyManagerImpl implements KeyManager {
return;
}
}
metadataManager.getStore().move(objectKey,
metadataManager.getKeyTable(),
metadataManager.getDeletedTable());
//Check if key with same keyName exists in deletedTable and then
// insert/update accordingly.
RepeatedOmKeyInfo repeatedOmKeyInfo =
metadataManager.getDeletedTable().get(objectKey);
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(keyInfo);
}
metadataManager.getKeyTable().delete(objectKey);
metadataManager.getDeletedTable().put(objectKey, repeatedOmKeyInfo);
} catch (OMException ex) {
throw ex;
} catch (IOException ex) {
@ -1003,7 +1012,14 @@ public class KeyManagerImpl implements KeyManager {
// will not be garbage collected, so move this part to delete table
// and throw error
// Move this part to delete table.
metadataManager.getDeletedTable().put(partName, keyInfo);
RepeatedOmKeyInfo repeatedOmKeyInfo =
metadataManager.getDeletedTable().get(partName);
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(keyInfo);
}
metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo);
throw new OMException("No such Multipart upload is with specified " +
"uploadId " + uploadID, ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
} else {
@ -1031,9 +1047,19 @@ public class KeyManagerImpl implements KeyManager {
// Add the new entry in to the list of part keys.
DBStore store = metadataManager.getStore();
try (BatchOperation batch = store.initBatchOperation()) {
RepeatedOmKeyInfo repeatedOmKeyInfo = metadataManager.
getDeletedTable().get(oldPartKeyInfo.getPartName());
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(
OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo()));
} else {
repeatedOmKeyInfo.addOmKeyInfo(
OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo()));
}
metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo);
metadataManager.getDeletedTable().putWithBatch(batch,
oldPartKeyInfo.getPartName(),
OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo()));
repeatedOmKeyInfo);
metadataManager.getOpenKeyTable().deleteWithBatch(batch, openKey);
metadataManager.getMultipartInfoTable().putWithBatch(batch,
multipartKey, multipartKeyInfo);
@ -1252,8 +1278,17 @@ public class KeyManagerImpl implements KeyManager {
PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf(
partKeyInfo.getPartKeyInfo());
RepeatedOmKeyInfo repeatedOmKeyInfo = metadataManager.
getDeletedTable().get(partKeyInfo.getPartName());
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(currentKeyPartInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(currentKeyPartInfo);
}
metadataManager.getDeletedTable().putWithBatch(batch,
partKeyInfo.getPartName(), currentKeyPartInfo);
partKeyInfo.getPartName(), repeatedOmKeyInfo);
}
// Finally delete the entry from the multipart info table and open
// key table

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec;
import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec;
import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec;
import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec;
import org.apache.hadoop.ozone.om.codec.RepeatedOmKeyInfoCodec;
import org.apache.hadoop.ozone.om.codec.S3SecretValueCodec;
import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec;
import org.apache.hadoop.ozone.om.codec.VolumeListCodec;
@ -57,6 +58,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload;
import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
@ -87,31 +89,31 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
* OM DB stores metadata as KV pairs in different column families.
* <p>
* OM DB Schema:
* |-------------------------------------------------------------------|
* | Column Family | VALUE |
* |-------------------------------------------------------------------|
* | userTable | user->VolumeList |
* |-------------------------------------------------------------------|
* | volumeTable | /volume->VolumeInfo |
* |-------------------------------------------------------------------|
* | bucketTable | /volume/bucket-> BucketInfo |
* |-------------------------------------------------------------------|
* | keyTable | /volumeName/bucketName/keyName->KeyInfo |
* |-------------------------------------------------------------------|
* | deletedTable | /volumeName/bucketName/keyName->KeyInfo |
* |-------------------------------------------------------------------|
* | openKey | /volumeName/bucketName/keyName/id->KeyInfo |
* |-------------------------------------------------------------------|
* | s3Table | s3BucketName -> /volumeName/bucketName |
* |-------------------------------------------------------------------|
* | s3SecretTable | s3g_access_key_id -> s3Secret |
* |-------------------------------------------------------------------|
* | dTokenTable | s3g_access_key_id -> s3Secret |
* |-------------------------------------------------------------------|
* | prefixInfoTable | prefix -> PrefixInfo |
* |-------------------------------------------------------------------|
* | multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->...|
* |-------------------------------------------------------------------|
* |----------------------------------------------------------------------|
* | Column Family | VALUE |
* |----------------------------------------------------------------------|
* | userTable | user->VolumeList |
* |----------------------------------------------------------------------|
* | volumeTable | /volume->VolumeInfo |
* |----------------------------------------------------------------------|
* | bucketTable | /volume/bucket-> BucketInfo |
* |----------------------------------------------------------------------|
* | keyTable | /volumeName/bucketName/keyName->KeyInfo |
* |----------------------------------------------------------------------|
* | deletedTable | /volumeName/bucketName/keyName->RepeatedKeyInfo |
* |----------------------------------------------------------------------|
* | openKey | /volumeName/bucketName/keyName/id->KeyInfo |
* |----------------------------------------------------------------------|
* | s3Table | s3BucketName -> /volumeName/bucketName |
* |----------------------------------------------------------------------|
* | s3SecretTable | s3g_access_key_id -> s3Secret |
* |----------------------------------------------------------------------|
* | dTokenTable | s3g_access_key_id -> s3Secret |
* |----------------------------------------------------------------------|
* | prefixInfoTable | prefix -> PrefixInfo |
* |----------------------------------------------------------------------|
* | multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->... |
* |----------------------------------------------------------------------|
*/
public static final String USER_TABLE = "userTable";
@ -192,7 +194,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
}
@Override
public Table<String, OmKeyInfo> getDeletedTable() {
public Table<String, RepeatedOmKeyInfo> getDeletedTable() {
return deletedTable;
}
@ -261,6 +263,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
.addTable(PREFIX_TABLE)
.addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
.addCodec(OmKeyInfo.class, new OmKeyInfoCodec())
.addCodec(RepeatedOmKeyInfo.class, new RepeatedOmKeyInfoCodec())
.addCodec(OmBucketInfo.class, new OmBucketInfoCodec())
.addCodec(OmVolumeArgs.class, new OmVolumeArgsCodec())
.addCodec(VolumeList.class, new VolumeListCodec())
@ -296,8 +299,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
keyTable = this.store.getTable(KEY_TABLE, String.class, OmKeyInfo.class);
checkTableStatus(keyTable, KEY_TABLE);
deletedTable =
this.store.getTable(DELETED_TABLE, String.class, OmKeyInfo.class);
deletedTable = this.store.getTable(DELETED_TABLE, String.class,
RepeatedOmKeyInfo.class);
checkTableStatus(deletedTable, DELETED_TABLE);
openKeyTable =
@ -765,25 +768,26 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
public List<BlockGroup> getPendingDeletionKeys(final int keyCount)
throws IOException {
List<BlockGroup> keyBlocksList = Lists.newArrayList();
try (TableIterator<String, ? extends KeyValue<String, OmKeyInfo>> keyIter =
getDeletedTable()
.iterator()) {
try (TableIterator<String, ? extends KeyValue<String, RepeatedOmKeyInfo>>
keyIter = getDeletedTable().iterator()) {
int currentCount = 0;
while (keyIter.hasNext() && currentCount < keyCount) {
KeyValue<String, OmKeyInfo> kv = keyIter.next();
KeyValue<String, RepeatedOmKeyInfo> kv = keyIter.next();
if (kv != null) {
OmKeyInfo info = kv.getValue();
RepeatedOmKeyInfo infoList = kv.getValue();
// Get block keys as a list.
OmKeyLocationInfoGroup latest = info.getLatestVersionLocations();
List<BlockID> item = latest.getLocationList().stream()
.map(b -> new BlockID(b.getContainerID(), b.getLocalID()))
.collect(Collectors.toList());
BlockGroup keyBlocks = BlockGroup.newBuilder()
.setKeyName(kv.getKey())
.addAllBlockIDs(item)
.build();
keyBlocksList.add(keyBlocks);
currentCount++;
for(OmKeyInfo info : infoList.getOmKeyInfoList()){
OmKeyLocationInfoGroup latest = info.getLatestVersionLocations();
List<BlockID> item = latest.getLocationList().stream()
.map(b -> new BlockID(b.getContainerID(), b.getLocalID()))
.collect(Collectors.toList());
BlockGroup keyBlocks = BlockGroup.newBuilder()
.setKeyName(kv.getKey())
.addAllBlockIDs(item)
.build();
keyBlocksList.add(keyBlocks);
currentCount++;
}
}
}
}

View File

@ -139,13 +139,12 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
// TODO: Revisit if we need it later.
omClientResponse = new OMKeyDeleteResponse(omKeyInfo,
deleteKeyArgs.getModificationTime(),
omResponse.setDeleteKeyResponse(
DeleteKeyResponse.newBuilder()).build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMKeyDeleteResponse(null, 0,
omClientResponse = new OMKeyDeleteResponse(null,
createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {

View File

@ -133,14 +133,13 @@ public class S3MultipartUploadAbortRequest extends OMKeyRequest {
}
omClientResponse = new S3MultipartUploadAbortResponse(multipartKey,
keyArgs.getModificationTime(), multipartKeyInfo,
multipartKeyInfo,
omResponse.setAbortMultiPartUploadResponse(
MultipartUploadAbortResponse.newBuilder()).build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new S3MultipartUploadAbortResponse(multipartKey,
keyArgs.getModificationTime(), multipartKeyInfo,
createErrorOMResponse(omResponse, exception));
multipartKeyInfo, createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(

View File

@ -188,13 +188,13 @@ public class S3MultipartUploadCommitPartRequest extends OMKeyRequest {
omResponse.setCommitMultiPartUploadResponse(
MultipartCommitUploadPartResponse.newBuilder().setPartName(partName));
omClientResponse = new S3MultipartUploadCommitPartResponse(multipartKey,
openKey, keyArgs.getModificationTime(), omKeyInfo, multipartKeyInfo,
openKey, omKeyInfo, multipartKeyInfo,
oldPartKeyInfo, omResponse.build());
} catch (IOException ex) {
exception = ex;
omClientResponse = new S3MultipartUploadCommitPartResponse(multipartKey,
openKey, keyArgs.getModificationTime(), omKeyInfo, multipartKeyInfo,
openKey, omKeyInfo, multipartKeyInfo,
oldPartKeyInfo, createErrorOMResponse(omResponse, exception));
} finally {
if (omClientResponse != null) {

View File

@ -18,10 +18,10 @@
package org.apache.hadoop.ozone.om.response.key;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@ -35,13 +35,10 @@ import java.io.IOException;
*/
public class OMKeyDeleteResponse extends OMClientResponse {
private OmKeyInfo omKeyInfo;
private long deleteTimestamp;
public OMKeyDeleteResponse(OmKeyInfo omKeyInfo, long deletionTime,
OMResponse omResponse) {
public OMKeyDeleteResponse(OmKeyInfo omKeyInfo, OMResponse omResponse) {
super(omResponse);
this.omKeyInfo = omKeyInfo;
this.deleteTimestamp = deletionTime;
}
@Override
@ -60,12 +57,22 @@ public class OMKeyDeleteResponse extends OMClientResponse {
if (!isKeyEmpty(omKeyInfo)) {
// If a deleted key is put in the table where a key with the same
// name already exists, then the old deleted key information would be
// lost. To differentiate between keys with same name in
// deletedTable, we add the timestamp to the key name.
String deleteKeyName = OmUtils.getDeletedKeyName(
ozoneKey, deleteTimestamp);
// lost. To avoid this, first check if a key with same name exists.
// deletedTable in OM Metadata stores <KeyName, RepeatedOMKeyInfo>.
// The RepeatedOmKeyInfo is the structure that allows us to store a
// list of OmKeyInfo that can be tied to same key name. For a keyName
// if RepeatedOMKeyInfo structure is null, we create a new instance,
// if it is not null, then we simply add to the list and store this
// instance in deletedTable.
RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable().get(ozoneKey);
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo);
}
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
deleteKeyName, omKeyInfo);
ozoneKey, repeatedOmKeyInfo);
}
}
}

View File

@ -18,10 +18,10 @@
package org.apache.hadoop.ozone.om.response.s3.multipart;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@ -40,16 +40,12 @@ import java.util.TreeMap;
public class S3MultipartUploadAbortResponse extends OMClientResponse {
private String multipartKey;
private long timeStamp;
private OmMultipartKeyInfo omMultipartKeyInfo;
public S3MultipartUploadAbortResponse(String multipartKey,
long timeStamp,
OmMultipartKeyInfo omMultipartKeyInfo,
OMResponse omResponse) {
OmMultipartKeyInfo omMultipartKeyInfo, OMResponse omResponse) {
super(omResponse);
this.multipartKey = multipartKey;
this.timeStamp = timeStamp;
this.omMultipartKeyInfo = omMultipartKeyInfo;
}
@ -73,9 +69,18 @@ public class S3MultipartUploadAbortResponse extends OMClientResponse {
PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
OmKeyInfo currentKeyPartInfo =
OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable().get(partKeyInfo.getPartName());
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(currentKeyPartInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(currentKeyPartInfo);
}
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
OmUtils.getDeletedKeyName(partKeyInfo.getPartName(), timeStamp),
currentKeyPartInfo);
partKeyInfo.getPartName(),
repeatedOmKeyInfo);
}
}

View File

@ -18,10 +18,10 @@
package org.apache.hadoop.ozone.om.response.s3.multipart;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@ -41,21 +41,19 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse {
private String multipartKey;
private String openKey;
private long deleteTimeStamp;
private OmKeyInfo deletePartKeyInfo;
private OmMultipartKeyInfo omMultipartKeyInfo;
private OzoneManagerProtocolProtos.PartKeyInfo oldMultipartKeyInfo;
public S3MultipartUploadCommitPartResponse(String multipartKey,
String openKey, long deleteTimeStamp,
OmKeyInfo deletePartKeyInfo, OmMultipartKeyInfo omMultipartKeyInfo,
String openKey, OmKeyInfo deletePartKeyInfo,
OmMultipartKeyInfo omMultipartKeyInfo,
OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
OMResponse omResponse) {
super(omResponse);
this.multipartKey = multipartKey;
this.openKey = openKey;
this.deleteTimeStamp = deleteTimeStamp;
this.deletePartKeyInfo = deletePartKeyInfo;
this.omMultipartKeyInfo = omMultipartKeyInfo;
this.oldMultipartKeyInfo = oldPartKeyInfo;
@ -69,9 +67,16 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse {
if (getOMResponse().getStatus() == NO_SUCH_MULTIPART_UPLOAD_ERROR) {
// Means by the time we try to commit part, some one has aborted this
// multipart upload. So, delete this part information.
RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable().get(openKey);
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(deletePartKeyInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(deletePartKeyInfo);
}
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
OmUtils.getDeletedKeyName(openKey, deleteTimeStamp),
deletePartKeyInfo);
openKey,
repeatedOmKeyInfo);
}
if (getOMResponse().getStatus() == OK) {
@ -85,10 +90,19 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse {
// This means for this multipart upload part upload, we have an old
// part information, so delete it.
if (oldMultipartKeyInfo != null) {
RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.
getDeletedTable().get(oldMultipartKeyInfo.getPartName());
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(
OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo()));
} else {
repeatedOmKeyInfo.addOmKeyInfo(
OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo()));
}
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
OmUtils.getDeletedKeyName(oldMultipartKeyInfo.getPartName(),
deleteTimeStamp),
OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo()));
oldMultipartKeyInfo.getPartName(),
repeatedOmKeyInfo);
}

View File

@ -28,13 +28,13 @@ import java.util.UUID;
import com.google.common.base.Optional;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@ -370,10 +370,16 @@ public final class TestOMRequestUtils {
// Delete key from KeyTable and put in DeletedKeyTable
omMetadataManager.getKeyTable().delete(ozoneKey);
String deletedKeyName = OmUtils.getDeletedKeyName(ozoneKey, Time.now());
omMetadataManager.getDeletedTable().put(deletedKeyName, omKeyInfo);
RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable().get(ozoneKey);
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo);
}
omMetadataManager.getDeletedTable().put(ozoneKey, repeatedOmKeyInfo);
return deletedKeyName;
return ozoneKey;
}
/**

View File

@ -21,9 +21,7 @@ package org.apache.hadoop.ozone.om.response.key;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
@ -52,15 +50,11 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
.setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
.build();
long deletionTime = Time.now();
OMKeyDeleteResponse omKeyDeleteResponse =
new OMKeyDeleteResponse(omKeyInfo, deletionTime, omResponse);
new OMKeyDeleteResponse(omKeyInfo, omResponse);
String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
keyName);
String deletedOzoneKeyName = OmUtils.getDeletedKeyName(
ozoneKey, deletionTime);
TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
clientID, replicationType, replicationFactor, omMetadataManager);
@ -76,7 +70,7 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
// As default key entry does not have any blocks, it should not be in
// deletedKeyTable.
Assert.assertFalse(omMetadataManager.getDeletedTable().isExist(
deletedOzoneKeyName));
ozoneKey));
}
@Test
@ -117,13 +111,9 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
.setStatus(OzoneManagerProtocolProtos.Status.OK)
.setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
.build();
long deletionTime = Time.now();
OMKeyDeleteResponse omKeyDeleteResponse =
new OMKeyDeleteResponse(omKeyInfo, deletionTime, omResponse);
String deletedOzoneKeyName = OmUtils.getDeletedKeyName(
ozoneKey, deletionTime);
new OMKeyDeleteResponse(omKeyInfo, omResponse);
Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
@ -135,7 +125,7 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
// Key has blocks, it should not be in deletedKeyTable.
Assert.assertTrue(omMetadataManager.getDeletedTable().isExist(
deletedOzoneKeyName));
ozoneKey));
}
@ -152,7 +142,7 @@ public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
.build();
OMKeyDeleteResponse omKeyDeleteResponse =
new OMKeyDeleteResponse(omKeyInfo, Time.now(), omResponse);
new OMKeyDeleteResponse(omKeyInfo, omResponse);
String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
keyName);

View File

@ -113,8 +113,7 @@ public class TestS3MultipartResponse {
.setAbortMultiPartUploadResponse(
MultipartUploadAbortResponse.newBuilder().build()).build();
return new S3MultipartUploadAbortResponse(multipartKey, timeStamp,
omMultipartKeyInfo,
return new S3MultipartUploadAbortResponse(multipartKey, omMultipartKeyInfo,
omResponse);
}

View File

@ -20,10 +20,10 @@ package org.apache.hadoop.ozone.om.response.s3.multipart;
import java.util.UUID;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.junit.Assert;
import org.junit.Test;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@ -124,24 +124,25 @@ public class TestS3MultipartUploadAbortResponse
Assert.assertTrue(omMetadataManager.countRowsInTable(
omMetadataManager.getDeletedTable()) == 2);
String part1DeletedKeyName = OmUtils.getDeletedKeyName(
omMultipartKeyInfo.getPartKeyInfo(1).getPartName(),
timeStamp);
String part1DeletedKeyName =
omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
String part2DeletedKeyName = OmUtils.getDeletedKeyName(
omMultipartKeyInfo.getPartKeyInfo(2).getPartName(),
timeStamp);
String part2DeletedKeyName =
omMultipartKeyInfo.getPartKeyInfo(2).getPartName();
Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
part1DeletedKeyName));
Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
part2DeletedKeyName));
RepeatedOmKeyInfo ro =
omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
Assert.assertEquals(OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()),
omMetadataManager.getDeletedTable().get(part1DeletedKeyName));
ro.getOmKeyInfoList().get(0));
ro = omMetadataManager.getDeletedTable().get(part2DeletedKeyName);
Assert.assertEquals(OmKeyInfo.getFromProtobuf(part2.getPartKeyInfo()),
omMetadataManager.getDeletedTable().get(part2DeletedKeyName));
ro.getOmKeyInfoList().get(0));
}
}