HDDS-1054. List Multipart uploads in a bucket (#1277)

This commit is contained in:
Elek, Márton 2019-09-19 20:06:02 +02:00 committed by Bharat Viswanadham
parent a79f286c6f
commit da1c67e0c2
35 changed files with 1253 additions and 148 deletions

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.hdds.client;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
/**
* The replication factor to be used while writing key into ozone.
*/
@ -53,6 +55,22 @@ public enum ReplicationFactor {
throw new IllegalArgumentException("Unsupported value: " + value);
}
public static ReplicationFactor fromProto(
HddsProtos.ReplicationFactor replicationFactor) {
if (replicationFactor == null) {
return null;
}
switch (replicationFactor) {
case ONE:
return ReplicationFactor.ONE;
case THREE:
return ReplicationFactor.THREE;
default:
throw new IllegalArgumentException(
"Unsupported ProtoBuf replication factor: " + replicationFactor);
}
}
/**
* Returns integer representation of ReplicationFactor.
* @return replication value

View File

@ -18,11 +18,31 @@
package org.apache.hadoop.hdds.client;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
/**
* The replication type to be used while writing key into ozone.
*/
public enum ReplicationType {
RATIS,
STAND_ALONE,
CHAINED
CHAINED;
public static ReplicationType fromProto(
HddsProtos.ReplicationType replicationType) {
if (replicationType == null) {
return null;
}
switch (replicationType) {
case RATIS:
return ReplicationType.RATIS;
case STAND_ALONE:
return ReplicationType.STAND_ALONE;
case CHAINED:
return ReplicationType.CHAINED;
default:
throw new IllegalArgumentException(
"Unsupported ProtoBuf replication type: " + replicationType);
}
}
}

View File

@ -555,6 +555,16 @@ public class OzoneBucket extends WithMetadata {
.listStatus(volumeName, name, keyName, recursive, startKey, numEntries);
}
/**
* Return with the list of the in-flight multipart uploads.
*
* @param prefix Optional string to filter for the selected keys.
*/
public OzoneMultipartUploadList listMultipartUploads(String prefix)
throws IOException {
return proxy.listMultipartUploads(volumeName, getName(), prefix);
}
/**
* An Iterator to iterate over {@link OzoneKey} list.
*/

View File

@ -0,0 +1,89 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client;
import java.time.Instant;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
/**
* Information about one initialized upload.
*/
public class OzoneMultipartUpload {
private String volumeName;
private String bucketName;
private String keyName;
private String uploadId;
private Instant creationTime;
private ReplicationType replicationType;
private ReplicationFactor replicationFactor;
public OzoneMultipartUpload(String volumeName, String bucketName,
String keyName, String uploadId, Instant creationTime,
ReplicationType replicationType,
ReplicationFactor replicationFactor) {
this.volumeName = volumeName;
this.bucketName = bucketName;
this.keyName = keyName;
this.uploadId = uploadId;
this.creationTime = creationTime;
this.replicationType = replicationType;
this.replicationFactor = replicationFactor;
}
public String getVolumeName() {
return volumeName;
}
public String getBucketName() {
return bucketName;
}
public String getKeyName() {
return keyName;
}
public String getUploadId() {
return uploadId;
}
public Instant getCreationTime() {
return creationTime;
}
public void setCreationTime(Instant creationTime) {
this.creationTime = creationTime;
}
public ReplicationType getReplicationType() {
return replicationType;
}
public ReplicationFactor getReplicationFactor() {
return replicationFactor;
}
}

View File

@ -0,0 +1,47 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client;
import java.util.ArrayList;
import java.util.List;
import com.google.common.base.Preconditions;
/**
* List of in-flight MPU upoads.
*/
public class OzoneMultipartUploadList {
private List<OzoneMultipartUpload> uploads;
public OzoneMultipartUploadList(
List<OzoneMultipartUpload> uploads) {
Preconditions.checkNotNull(uploads);
this.uploads = uploads;
}
public List<OzoneMultipartUpload> getUploads() {
return uploads;
}
public void setUploads(
List<OzoneMultipartUpload> uploads) {
this.uploads = uploads;
}
}

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.client;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import java.util.ArrayList;
@ -29,6 +30,9 @@ import java.util.List;
public class OzoneMultipartUploadPartListParts {
private ReplicationType replicationType;
private ReplicationFactor replicationFactor;
//When a list is truncated, this element specifies the last part in the list,
// as well as the value to use for the part-number-marker request parameter
// in a subsequent request.
@ -41,10 +45,12 @@ public class OzoneMultipartUploadPartListParts {
private List<PartInfo> partInfoList = new ArrayList<>();
public OzoneMultipartUploadPartListParts(ReplicationType type,
ReplicationFactor factor,
int nextMarker, boolean truncate) {
this.replicationType = type;
this.nextPartNumberMarker = nextMarker;
this.truncated = truncate;
this.replicationFactor = factor;
}
public void addAllParts(List<PartInfo> partInfos) {
@ -71,6 +77,10 @@ public class OzoneMultipartUploadPartListParts {
return partInfoList;
}
public ReplicationFactor getReplicationFactor() {
return replicationFactor;
}
/**
* Class that represents each Part information of a multipart upload part.
*/

View File

@ -454,6 +454,11 @@ public interface ClientProtocol {
String bucketName, String keyName, String uploadID, int partNumberMarker,
int maxParts) throws IOException;
/**
* Return with the inflight multipart uploads.
*/
OzoneMultipartUploadList listMultipartUploads(String volumename,
String bucketName, String prefix) throws IOException;
/**
* Get a valid Delegation Token.

View File

@ -59,6 +59,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
import org.apache.hadoop.ozone.om.helpers.OmPartInfo;
@ -901,12 +902,13 @@ public class RpcClient implements ClientProtocol {
.setAcls(getAclList())
.build();
OmMultipartUploadList omMultipartUploadList = new OmMultipartUploadList(
OmMultipartUploadCompleteList
omMultipartUploadCompleteList = new OmMultipartUploadCompleteList(
partsMap);
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo =
ozoneManagerClient.completeMultipartUpload(keyArgs,
omMultipartUploadList);
omMultipartUploadCompleteList);
return omMultipartUploadCompleteInfo;
@ -942,8 +944,10 @@ public class RpcClient implements ClientProtocol {
uploadID, partNumberMarker, maxParts);
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
new OzoneMultipartUploadPartListParts(ReplicationType.valueOf(
omMultipartUploadListParts.getReplicationType().toString()),
new OzoneMultipartUploadPartListParts(ReplicationType
.fromProto(omMultipartUploadListParts.getReplicationType()),
ReplicationFactor
.fromProto(omMultipartUploadListParts.getReplicationFactor()),
omMultipartUploadListParts.getNextPartNumberMarker(),
omMultipartUploadListParts.isTruncated());
@ -957,6 +961,26 @@ public class RpcClient implements ClientProtocol {
}
@Override
public OzoneMultipartUploadList listMultipartUploads(String volumeName,
String bucketName, String prefix) throws IOException {
OmMultipartUploadList omMultipartUploadList =
ozoneManagerClient.listMultipartUploads(volumeName, bucketName, prefix);
List<OzoneMultipartUpload> uploads = omMultipartUploadList.getUploads()
.stream()
.map(upload -> new OzoneMultipartUpload(upload.getVolumeName(),
upload.getBucketName(),
upload.getKeyName(),
upload.getUploadId(),
upload.getCreationTime(),
ReplicationType.fromProto(upload.getReplicationType()),
ReplicationFactor.fromProto(upload.getReplicationFactor())))
.collect(Collectors.toList());
OzoneMultipartUploadList result = new OzoneMultipartUploadList(uploads);
return result;
}
@Override
public OzoneFileStatus getOzoneFileStatus(String volumeName,
String bucketName, String keyName) throws IOException {

View File

@ -224,6 +224,7 @@ public final class OmUtils {
case ListStatus:
case GetAcl:
case DBUpdates:
case ListMultipartUploads:
return true;
case CreateVolume:
case SetVolumeProperty:

View File

@ -56,6 +56,7 @@ public enum OMAction implements AuditAction {
COMMIT_MULTIPART_UPLOAD_PARTKEY,
COMPLETE_MULTIPART_UPLOAD,
LIST_MULTIPART_UPLOAD_PARTS,
LIST_MULTIPART_UPLOADS,
ABORT_MULTIPART_UPLOAD,
//ACL Actions

View File

@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.common.BlockGroup;
@ -327,4 +328,11 @@ public interface OMMetadataManager {
*/
<KEY, VALUE> long countEstimatedRowsInTable(Table<KEY, VALUE> table)
throws IOException;
/**
* Return the existing upload keys which includes volumeName, bucketName,
* keyName.
*/
List<String> getMultipartUploadKeys(String volumeName,
String bucketName, String prefix) throws IOException;
}

View File

@ -16,11 +16,14 @@
*/
package org.apache.hadoop.ozone.om.helpers;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.MultipartKeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.PartKeyInfo;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.TreeMap;
@ -36,8 +39,6 @@ public class OmMultipartKeyInfo {
/**
* Construct OmMultipartKeyInfo object which holds multipart upload
* information for a key.
* @param id
* @param list upload parts of a key.
*/
public OmMultipartKeyInfo(String id, Map<Integer, PartKeyInfo> list) {
this.uploadID = id;

View File

@ -0,0 +1,149 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.om.helpers;
import java.time.Instant;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
/**
* Information about one initialized upload.
*/
public class OmMultipartUpload {
private String volumeName;
private String bucketName;
private String keyName;
private String uploadId;
private Instant creationTime;
private HddsProtos.ReplicationType replicationType;
private HddsProtos.ReplicationFactor replicationFactor;
public OmMultipartUpload(String volumeName, String bucketName,
String keyName, String uploadId) {
this.volumeName = volumeName;
this.bucketName = bucketName;
this.keyName = keyName;
this.uploadId = uploadId;
}
public OmMultipartUpload(String volumeName, String bucketName,
String keyName, String uploadId, Instant creationDate) {
this.volumeName = volumeName;
this.bucketName = bucketName;
this.keyName = keyName;
this.uploadId = uploadId;
this.creationTime = creationDate;
}
public OmMultipartUpload(String volumeName, String bucketName,
String keyName, String uploadId, Instant creationTime,
ReplicationType replicationType,
ReplicationFactor replicationFactor) {
this.volumeName = volumeName;
this.bucketName = bucketName;
this.keyName = keyName;
this.uploadId = uploadId;
this.creationTime = creationTime;
this.replicationType = replicationType;
this.replicationFactor = replicationFactor;
}
public static OmMultipartUpload from(String key) {
String[] split = key.split(OM_KEY_PREFIX);
if (split.length < 5) {
throw new IllegalArgumentException("Key " + key
+ " doesn't have enough segments to be a valid multipart upload key");
}
String uploadId = split[split.length - 1];
String volume = split[1];
String bucket = split[2];
return new OmMultipartUpload(volume, bucket,
key.substring(volume.length() + bucket.length() + 3,
key.length() - uploadId.length() - 1), uploadId);
}
public String getDbKey() {
return OmMultipartUpload
.getDbKey(volumeName, bucketName, keyName, uploadId);
}
public static String getDbKey(String volume, String bucket, String key,
String uploadId) {
return getDbKey(volume, bucket, key) + OM_KEY_PREFIX + uploadId;
}
public static String getDbKey(String volume, String bucket, String key) {
return OM_KEY_PREFIX + volume + OM_KEY_PREFIX + bucket +
OM_KEY_PREFIX + key;
}
public String getVolumeName() {
return volumeName;
}
public String getBucketName() {
return bucketName;
}
public String getKeyName() {
return keyName;
}
public String getUploadId() {
return uploadId;
}
public Instant getCreationTime() {
return creationTime;
}
public void setCreationTime(Instant creationTime) {
this.creationTime = creationTime;
}
public ReplicationType getReplicationType() {
return replicationType;
}
public void setReplicationType(
ReplicationType replicationType) {
this.replicationType = replicationType;
}
public ReplicationFactor getReplicationFactor() {
return replicationFactor;
}
public void setReplicationFactor(
ReplicationFactor replicationFactor) {
this.replicationFactor = replicationFactor;
}
}

View File

@ -0,0 +1,63 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.om.helpers;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
/**
* This class represents multipart list, which is required for
* CompleteMultipart upload request.
*/
public class OmMultipartUploadCompleteList {
private final TreeMap<Integer, String> multipartMap;
/**
* Construct OmMultipartUploadCompleteList which holds multipart map which
* contains part number and part name.
* @param partMap
*/
public OmMultipartUploadCompleteList(Map<Integer, String> partMap) {
this.multipartMap = new TreeMap<>(partMap);
}
/**
* Return multipartMap which is a map of part number and part name.
* @return multipartMap
*/
public TreeMap<Integer, String> getMultipartMap() {
return multipartMap;
}
/**
* Construct Part list from the multipartMap.
* @return List<Part>
*/
public List<Part> getPartsList() {
List<Part> partList = new ArrayList<>();
multipartMap.forEach((partNumber, partName) -> partList.add(Part
.newBuilder().setPartName(partName).setPartNumber(partNumber).build()));
return partList;
}
}

View File

@ -18,46 +18,30 @@
package org.apache.hadoop.ozone.om.helpers;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
/**
* This class represents multipart list, which is required for
* CompleteMultipart upload request.
* List of in-flight MPU uploads.
*/
public class OmMultipartUploadList {
private final TreeMap<Integer, String> multipartMap;
private List<OmMultipartUpload> uploads;
/**
* Construct OmMultipartUploadList which holds multipart map which contains
* part number and part name.
* @param partMap
*/
public OmMultipartUploadList(Map<Integer, String> partMap) {
this.multipartMap = new TreeMap<>(partMap);
public OmMultipartUploadList(
List<OmMultipartUpload> uploads) {
this.uploads = uploads;
}
/**
* Return multipartMap which is a map of part number and part name.
* @return multipartMap
*/
public TreeMap<Integer, String> getMultipartMap() {
return multipartMap;
public List<OmMultipartUpload> getUploads() {
return uploads;
}
/**
* Construct Part list from the multipartMap.
* @return List<Part>
*/
public List<Part> getPartsList() {
List<Part> partList = new ArrayList<>();
multipartMap.forEach((partNumber, partName) -> partList.add(Part
.newBuilder().setPartName(partName).setPartNumber(partNumber).build()));
return partList;
public void setUploads(
List<OmMultipartUpload> uploads) {
this.uploads = uploads;
}
}

View File

@ -19,6 +19,8 @@
package org.apache.hadoop.ozone.om.helpers;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.PartInfo;
@ -29,7 +31,11 @@ import java.util.List;
* Class which is response for the list parts of a multipart upload key.
*/
public class OmMultipartUploadListParts {
private HddsProtos.ReplicationType replicationType;
private HddsProtos.ReplicationFactor replicationFactor;
//When a list is truncated, this element specifies the last part in the list,
// as well as the value to use for the part-number-marker request parameter
// in a subsequent request.
@ -39,11 +45,15 @@ public class OmMultipartUploadListParts {
// A list can be truncated if the number of parts exceeds the limit
// returned in the MaxParts element.
private boolean truncated;
private final List<OmPartInfo> partInfoList = new ArrayList<>();
public OmMultipartUploadListParts(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor factor,
int nextMarker, boolean truncate) {
this.replicationType = type;
this.replicationFactor = factor;
this.nextPartNumberMarker = nextMarker;
this.truncated = truncate;
}
@ -72,6 +82,10 @@ public class OmMultipartUploadListParts {
return partInfoList;
}
public ReplicationFactor getReplicationFactor() {
return replicationFactor;
}
public void addPartList(List<OmPartInfo> partInfos) {
this.partInfoList.addAll(partInfos);
}

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
@ -367,7 +368,7 @@ public interface OzoneManagerProtocol
* @throws IOException
*/
OmMultipartUploadCompleteInfo completeMultipartUpload(
OmKeyArgs omKeyArgs, OmMultipartUploadList multipartUploadList)
OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList)
throws IOException;
/**
@ -391,6 +392,11 @@ public interface OzoneManagerProtocol
String keyName, String uploadID, int partNumberMarker,
int maxParts) throws IOException;
/**
* List in-flight uploads.
*/
OmMultipartUploadList listMultipartUploads(String volumeName,
String bucketName, String prefix) throws IOException;
/**
* Gets s3Secret for given kerberos user.
* @param kerberosID

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.protocolPB;
import java.io.EOFException;
import java.io.IOException;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
@ -47,7 +48,9 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
@ -59,6 +62,8 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListMultipartUploadsRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListMultipartUploadsResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileResponse;
@ -1072,7 +1077,7 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
@Override
public OmMultipartUploadCompleteInfo completeMultipartUpload(
OmKeyArgs omKeyArgs, OmMultipartUploadList multipartUploadList)
OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList)
throws IOException {
MultipartUploadCompleteRequest.Builder multipartUploadCompleteRequest =
MultipartUploadCompleteRequest.newBuilder();
@ -1145,7 +1150,7 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
OmMultipartUploadListParts omMultipartUploadListParts =
new OmMultipartUploadListParts(response.getType(),
new OmMultipartUploadListParts(response.getType(), response.getFactor(),
response.getNextPartNumberMarker(), response.getIsTruncated());
omMultipartUploadListParts.addProtoPartList(response.getPartsListList());
@ -1153,6 +1158,43 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
}
@Override
public OmMultipartUploadList listMultipartUploads(String volumeName,
String bucketName,
String prefix) throws IOException {
ListMultipartUploadsRequest request = ListMultipartUploadsRequest
.newBuilder()
.setVolume(volumeName)
.setBucket(bucketName)
.setPrefix(prefix == null ? "" : prefix)
.build();
OMRequest omRequest = createOMRequest(Type.ListMultipartUploads)
.setListMultipartUploadsRequest(request)
.build();
ListMultipartUploadsResponse listMultipartUploadsResponse =
handleError(submitRequest(omRequest)).getListMultipartUploadsResponse();
List<OmMultipartUpload> uploadList =
listMultipartUploadsResponse.getUploadsListList()
.stream()
.map(proto -> new OmMultipartUpload(
proto.getVolumeName(),
proto.getBucketName(),
proto.getKeyName(),
proto.getUploadId(),
Instant.ofEpochMilli(proto.getCreationTime()),
proto.getType(),
proto.getFactor()
))
.collect(Collectors.toList());
OmMultipartUploadList response = new OmMultipartUploadList(uploadList);
return response;
}
public List<ServiceInfo> getServiceList() throws IOException {
ServiceListRequest req = ServiceListRequest.newBuilder().build();

View File

@ -90,6 +90,8 @@ enum Type {
GetAcl = 78;
PurgeKeys = 81;
ListMultipartUploads = 82;
}
message OMRequest {
@ -158,6 +160,7 @@ message OMRequest {
optional PurgeKeysRequest purgeKeysRequest = 81;
optional UpdateGetS3SecretRequest updateGetS3SecretRequest = 82;
optional ListMultipartUploadsRequest listMultipartUploadsRequest = 83;
}
message OMResponse {
@ -225,6 +228,8 @@ message OMResponse {
optional GetAclResponse getAclResponse = 78;
optional PurgeKeysResponse purgeKeysResponse = 81;
optional ListMultipartUploadsResponse listMultipartUploadsResponse = 82;
}
enum Status {
@ -1014,9 +1019,32 @@ message MultipartUploadListPartsRequest {
message MultipartUploadListPartsResponse {
optional hadoop.hdds.ReplicationType type = 2;
optional uint32 nextPartNumberMarker = 3;
optional bool isTruncated = 4;
repeated PartInfo partsList = 5;
optional hadoop.hdds.ReplicationFactor factor = 3;
optional uint32 nextPartNumberMarker = 4;
optional bool isTruncated = 5;
repeated PartInfo partsList = 6;
}
message ListMultipartUploadsRequest {
required string volume = 1;
required string bucket = 2;
required string prefix = 3;
}
message ListMultipartUploadsResponse {
optional bool isTruncated = 1;
repeated MultipartUploadInfo uploadsList = 2;
}
message MultipartUploadInfo {
required string volumeName = 1;
required string bucketName = 2;
required string keyName = 3;
required string uploadId = 4;
required uint64 creationTime = 5;
required hadoop.hdds.ReplicationType type = 6;
required hadoop.hdds.ReplicationFactor factor = 7;
}
@ -1027,12 +1055,12 @@ message PartInfo {
required uint64 size = 4;
}
message GetDelegationTokenResponseProto{
message GetDelegationTokenResponseProto {
optional hadoop.common.GetDelegationTokenResponseProto response = 2;
}
message RenewDelegationTokenResponseProto{
message RenewDelegationTokenResponseProto {
optional hadoop.common.RenewDelegationTokenResponseProto response = 2;
}

View File

@ -0,0 +1,39 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.om.helpers;
import org.junit.Assert;
import org.junit.Test;
/**
* Test utilities inside OmMutipartUpload.
*/
public class TestOmMultipartUpload {
@Test
public void from() {
String key1 =
OmMultipartUpload.getDbKey("vol1", "bucket1", "dir1/key1", "uploadId");
OmMultipartUpload info = OmMultipartUpload.from(key1);
Assert.assertEquals("vol1", info.getVolumeName());
Assert.assertEquals("bucket1", info.getBucketName());
Assert.assertEquals("dir1/key1", info.getKeyName());
Assert.assertEquals("uploadId", info.getUploadId());
}
}

View File

@ -252,3 +252,23 @@ Test Multipart Upload Put With Copy and range
Execute AWSS3APICli get-object --bucket ${BUCKET} --key copyrange/destination /tmp/part-result
Compare files /tmp/part1 /tmp/part-result
Test Multipart Upload list
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key listtest/key1
${uploadID1} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Should contain ${result} ${BUCKET}
Should contain ${result} listtest/key1
Should contain ${result} UploadId
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key listtest/key2
${uploadID2} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Should contain ${result} ${BUCKET}
Should contain ${result} listtest/key2
Should contain ${result} UploadId
${result} = Execute AWSS3APICli list-multipart-uploads --bucket ${BUCKET} --prefix listtest
Should contain ${result} ${uploadID1}
Should contain ${result} ${uploadID2}
${count} = Execute and checkrc echo '${result}' | jq -r '.Uploads | length' 0
Should Be Equal ${count} 2

View File

@ -19,12 +19,14 @@ package org.apache.hadoop.ozone.om;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
@ -220,7 +222,7 @@ public interface KeyManager extends OzoneManagerFS, IOzoneAcl {
* @throws IOException
*/
OmMultipartUploadCompleteInfo completeMultipartUpload(OmKeyArgs omKeyArgs,
OmMultipartUploadList multipartUploadList) throws IOException;
OmMultipartUploadCompleteList multipartUploadList) throws IOException;
/**
* Abort multipart upload request.
@ -229,6 +231,8 @@ public interface KeyManager extends OzoneManagerFS, IOzoneAcl {
*/
void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException;
OmMultipartUploadList listMultipartUploads(String volumeName,
String bucketName, String prefix) throws OMException;
/**
* Returns list of parts of a multipart upload key.

View File

@ -19,6 +19,9 @@ package org.apache.hadoop.ozone.om;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.GeneralSecurityException;
import java.security.PrivilegedExceptionAction;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
@ -30,12 +33,8 @@ import java.util.Objects;
import java.util.TreeMap;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.security.GeneralSecurityException;
import java.security.PrivilegedExceptionAction;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
@ -44,6 +43,7 @@ import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
@ -51,10 +51,20 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
import org.apache.hadoop.hdds.utils.BackgroundService;
import org.apache.hadoop.hdds.utils.UniqueId;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.CodecRegistry;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.RDBStore;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
@ -64,7 +74,9 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
import org.apache.hadoop.ozone.om.helpers.OmPartInfo;
@ -73,28 +85,18 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.ozone.security.acl.RequestContext;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.PartKeyInfo;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.utils.BackgroundService;
import org.apache.hadoop.hdds.utils.UniqueId;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.CodecRegistry;
import org.apache.hadoop.hdds.utils.db.RDBStore;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.Table;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import org.apache.commons.codec.digest.DigestUtils;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT;
@ -120,7 +122,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLU
import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY;
import static org.apache.hadoop.util.Time.monotonicNow;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -1058,7 +1059,7 @@ public class KeyManagerImpl implements KeyManager {
@Override
@SuppressWarnings("methodlength")
public OmMultipartUploadCompleteInfo completeMultipartUpload(
OmKeyArgs omKeyArgs, OmMultipartUploadList multipartUploadList)
OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList)
throws IOException {
Preconditions.checkNotNull(omKeyArgs);
Preconditions.checkNotNull(multipartUploadList);
@ -1277,6 +1278,59 @@ public class KeyManagerImpl implements KeyManager {
}
@Override
public OmMultipartUploadList listMultipartUploads(String volumeName,
String bucketName, String prefix) throws OMException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName);
try {
List<String> multipartUploadKeys =
metadataManager
.getMultipartUploadKeys(volumeName, bucketName, prefix);
List<OmMultipartUpload> collect = multipartUploadKeys.stream()
.map(OmMultipartUpload::from)
.map(upload -> {
String dbKey = metadataManager
.getOzoneKey(upload.getVolumeName(),
upload.getBucketName(),
upload.getKeyName());
try {
Table<String, OmKeyInfo> openKeyTable =
metadataManager.getOpenKeyTable();
OmKeyInfo omKeyInfo =
openKeyTable.get(upload.getDbKey());
upload.setCreationTime(
Instant.ofEpochMilli(omKeyInfo.getCreationTime()));
upload.setReplicationType(omKeyInfo.getType());
upload.setReplicationFactor(omKeyInfo.getFactor());
} catch (IOException e) {
LOG.warn(
"Open key entry for multipart upload record can be read {}",
dbKey);
}
return upload;
})
.collect(Collectors.toList());
return new OmMultipartUploadList(collect);
} catch (IOException ex) {
LOG.error("List Multipart Uploads Failed: volume: " + volumeName +
"bucket: " + bucketName + "prefix: " + prefix, ex);
throw new OMException(ex.getMessage(), ResultCodes
.LIST_MULTIPART_UPLOAD_PARTS_FAILED);
} finally {
metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
bucketName);
}
}
@Override
public OmMultipartUploadListParts listParts(String volumeName,
@ -1307,6 +1361,7 @@ public class KeyManagerImpl implements KeyManager {
partKeyInfoMap.entrySet().iterator();
HddsProtos.ReplicationType replicationType = null;
HddsProtos.ReplicationFactor replicationFactor = null;
int count = 0;
List<OmPartInfo> omPartInfoList = new ArrayList<>();
@ -1327,6 +1382,7 @@ public class KeyManagerImpl implements KeyManager {
//if there are parts, use replication type from one of the parts
replicationType = partKeyInfo.getPartKeyInfo().getType();
replicationFactor = partKeyInfo.getPartKeyInfo().getFactor();
count++;
}
}
@ -1343,10 +1399,12 @@ public class KeyManagerImpl implements KeyManager {
}
replicationType = omKeyInfo.getType();
replicationFactor = omKeyInfo.getFactor();
}
Preconditions.checkNotNull(replicationType,
"Replication type can't be identified");
Preconditions.checkNotNull(replicationFactor,
"Replication factor can't be identified");
if (partKeyInfoMapIterator.hasNext()) {
Map.Entry<Integer, PartKeyInfo> partKeyInfoEntry =
@ -1357,7 +1415,7 @@ public class KeyManagerImpl implements KeyManager {
nextPartNumberMarker = 0;
}
OmMultipartUploadListParts omMultipartUploadListParts =
new OmMultipartUploadListParts(replicationType,
new OmMultipartUploadListParts(replicationType, replicationFactor,
nextPartNumberMarker, isTruncated);
omMultipartUploadListParts.addPartList(omPartInfoList);
return omMultipartUploadListParts;
@ -1365,8 +1423,10 @@ public class KeyManagerImpl implements KeyManager {
} catch (OMException ex) {
throw ex;
} catch (IOException ex){
LOG.error("List Multipart Upload Parts Failed: volume: " + volumeName +
"bucket: " + bucketName + "key: " + keyName, ex);
LOG.error(
"List Multipart Upload Parts Failed: volume: {}, bucket: {}, ,key: "
+ "{} ",
volumeName, bucketName, keyName, ex);
throw new OMException(ex.getMessage(), ResultCodes
.LIST_MULTIPART_UPLOAD_PARTS_FAILED);
} finally {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.om;
import com.google.common.annotations.VisibleForTesting;
import com.sun.codemodel.internal.JExpression;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsSystem;
@ -131,6 +132,8 @@ public class OMMetrics {
private @Metric MutableCounterLong numBucketS3Deletes;
private @Metric MutableCounterLong numBucketS3DeleteFails;
private @Metric MutableCounterLong numListMultipartUploadFails;
private @Metric MutableCounterLong numListMultipartUploads;
public OMMetrics() {
}
@ -324,10 +327,18 @@ public class OMMetrics {
numAbortMultipartUploads.incr();
}
public void incNumListMultipartUploadFails() {
numListMultipartUploadFails.incr();
}
public void incNumListMultipartUploads() {
numKeyOps.incr();
numListMultipartUploads.incr();
}
public void incNumAbortMultipartUploadFails() {
numAbortMultipartUploadFails.incr();
}
public void incNumListMultipartUploadParts() {
numKeyOps.incr();
numListMultipartUploadParts.incr();

View File

@ -27,14 +27,23 @@ import java.util.stream.Collectors;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.TypedTable;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec;
import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec;
import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec;
import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec;
import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec;
import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec;
import org.apache.hadoop.ozone.om.codec.S3SecretValueCodec;
import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec;
import org.apache.hadoop.ozone.om.codec.VolumeListCodec;
@ -44,6 +53,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload;
import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
@ -51,11 +61,6 @@ import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
@ -65,11 +70,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRE
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import org.apache.hadoop.hdds.utils.db.TypedTable;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl;
import org.eclipse.jetty.util.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -108,7 +108,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
* |-------------------------------------------------------------------|
* | dTokenTable | s3g_access_key_id -> s3Secret |
* |-------------------------------------------------------------------|
* | prefixInfoTable | prefix -> PrefixInfo |
* | prefixInfoTable | prefix -> PrefixInfo |
* |-------------------------------------------------------------------|
* | multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->...|
* |-------------------------------------------------------------------|
*/
@ -414,9 +416,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
public String getMultipartKey(String volume, String bucket, String key,
String
uploadId) {
String multipartKey = OM_KEY_PREFIX + volume + OM_KEY_PREFIX + bucket +
OM_KEY_PREFIX + key + OM_KEY_PREFIX + uploadId;
return multipartKey;
return OmMultipartUpload.getDbKey(volume, bucket, key, uploadId);
}
/**
@ -823,6 +823,29 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
return count;
}
@Override
public List<String> getMultipartUploadKeys(
String volumeName, String bucketName, String prefix) throws IOException {
List<String> response = new ArrayList<>();
TableIterator<String, ? extends KeyValue<String, OmMultipartKeyInfo>>
iterator = getMultipartInfoTable().iterator();
String prefixKey =
OmMultipartUpload.getDbKey(volumeName, bucketName, prefix);
iterator.seek(prefixKey);
while (iterator.hasNext()) {
KeyValue<String, OmMultipartKeyInfo> entry = iterator.next();
if (entry.getKey().startsWith(prefixKey)) {
response.add(entry.getKey());
} else {
break;
}
}
return response;
}
@Override
public Table<String, S3SecretValue> getS3SecretTable() {
return s3SecretTable;

View File

@ -77,6 +77,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneIllegalArgumentException;
import org.apache.hadoop.ozone.OzoneSecurityUtil;
import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
import org.apache.hadoop.ozone.om.protocol.OzoneManagerServerProtocol;
import org.apache.hadoop.ozone.om.ratis.OMRatisSnapshotInfo;
@ -111,7 +112,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
@ -2771,7 +2772,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
@Override
public OmMultipartUploadCompleteInfo completeMultipartUpload(
OmKeyArgs omKeyArgs, OmMultipartUploadList multipartUploadList)
OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList)
throws IOException {
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo;
metrics.incNumCompleteMultipartUploads();
@ -2841,6 +2842,33 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
}
}
@Override
public OmMultipartUploadList listMultipartUploads(String volumeName,
String bucketName, String prefix) throws IOException {
Map<String, String> auditMap = new HashMap<>();
auditMap.put(OzoneConsts.VOLUME, volumeName);
auditMap.put(OzoneConsts.BUCKET, bucketName);
auditMap.put(OzoneConsts.PREFIX, prefix);
metrics.incNumListMultipartUploads();
try {
OmMultipartUploadList omMultipartUploadList =
keyManager.listMultipartUploads(volumeName, bucketName, prefix);
AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction
.LIST_MULTIPART_UPLOADS, auditMap));
return omMultipartUploadList;
} catch (IOException ex) {
metrics.incNumListMultipartUploadFails();
AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction
.LIST_MULTIPART_UPLOADS, auditMap, ex));
throw ex;
}
}
@Override
public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException {
if (isAclEnabled) {

View File

@ -25,11 +25,6 @@ import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import com.google.common.base.Optional;
import org.apache.commons.codec.digest.DigestUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.audit.OMAction;
@ -40,31 +35,29 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.KeyArgs;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.MultipartUploadCompleteRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.MultipartUploadCompleteResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.PartKeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import com.google.common.base.Optional;
import org.apache.commons.codec.digest.DigestUtils;
import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE;
import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Handle Multipart upload complete request.
@ -121,7 +114,7 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
.setSuccess(true);
OMClientResponse omClientResponse = null;
IOException exception = null;
OmMultipartUploadList multipartUploadList = null;
OmMultipartUploadCompleteList multipartUploadList = null;
try {
// TODO to support S3 ACL later.
TreeMap<Integer, String> partsMap = new TreeMap<>();
@ -129,7 +122,7 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest {
partsMap.put(part.getPartNumber(), part.getPartName());
}
multipartUploadList = new OmMultipartUploadList(partsMap);
multipartUploadList = new OmMultipartUploadCompleteList(partsMap);
acquiredLock = omMetadataManager.getLock().acquireLock(BUCKET_LOCK,
volumeName, bucketName);

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
import org.apache.hadoop.ozone.om.helpers.OmPartInfo;
@ -297,6 +298,11 @@ public class OzoneManagerRequestHandler implements RequestHandler {
listParts(request.getListMultipartUploadPartsRequest());
responseBuilder.setListMultipartUploadPartsResponse(listPartsResponse);
break;
case ListMultipartUploads:
ListMultipartUploadsResponse response =
listMultipartUploads(request.getListMultipartUploadsRequest());
responseBuilder.setListMultipartUploadsResponse(response);
break;
case ServiceList:
ServiceListResponse serviceListResponse = getServiceList(
request.getServiceListRequest());
@ -816,9 +822,9 @@ public class OzoneManagerRequestHandler implements RequestHandler {
.setBucketName(keyArgs.getBucketName())
.setKeyName(keyArgs.getKeyName())
.setType(keyArgs.getType())
.setFactor(keyArgs.getFactor())
.setAcls(keyArgs.getAclsList().stream().map(a ->
OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()))
.setFactor(keyArgs.getFactor())
.build();
OmMultipartInfo multipartInfo = impl.initiateMultipartUpload(omKeyArgs);
resp.setVolumeName(multipartInfo.getVolumeName());
@ -867,8 +873,8 @@ public class OzoneManagerRequestHandler implements RequestHandler {
partsMap.put(part.getPartNumber(), part.getPartName());
}
OmMultipartUploadList omMultipartUploadList =
new OmMultipartUploadList(partsMap);
OmMultipartUploadCompleteList omMultipartUploadCompleteList =
new OmMultipartUploadCompleteList(partsMap);
OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
.setVolumeName(keyArgs.getVolumeName())
@ -879,7 +885,7 @@ public class OzoneManagerRequestHandler implements RequestHandler {
.setMultipartUploadID(keyArgs.getMultipartUploadID())
.build();
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = impl
.completeMultipartUpload(omKeyArgs, omMultipartUploadList);
.completeMultipartUpload(omKeyArgs, omMultipartUploadCompleteList);
response.setVolume(omMultipartUploadCompleteInfo.getVolume())
.setBucket(omMultipartUploadCompleteInfo.getBucket())
@ -931,6 +937,7 @@ public class OzoneManagerRequestHandler implements RequestHandler {
omPartInfoList.forEach(partInfo -> partInfoList.add(partInfo.getProto()));
response.setType(omMultipartUploadListParts.getReplicationType());
response.setFactor(omMultipartUploadListParts.getReplicationFactor());
response.setNextPartNumberMarker(
omMultipartUploadListParts.getNextPartNumberMarker());
response.setIsTruncated(omMultipartUploadListParts.isTruncated());
@ -940,6 +947,36 @@ public class OzoneManagerRequestHandler implements RequestHandler {
}
private ListMultipartUploadsResponse listMultipartUploads(
ListMultipartUploadsRequest request)
throws IOException {
OmMultipartUploadList omMultipartUploadList =
impl.listMultipartUploads(request.getVolume(), request.getBucket(),
request.getPrefix());
List<MultipartUploadInfo> info = omMultipartUploadList
.getUploads()
.stream()
.map(upload -> MultipartUploadInfo.newBuilder()
.setVolumeName(upload.getVolumeName())
.setBucketName(upload.getBucketName())
.setKeyName(upload.getKeyName())
.setUploadId(upload.getUploadId())
.setType(upload.getReplicationType())
.setFactor(upload.getReplicationFactor())
.setCreationTime(upload.getCreationTime().toEpochMilli())
.build())
.collect(Collectors.toList());
ListMultipartUploadsResponse response =
ListMultipartUploadsResponse.newBuilder()
.addAllUploadsList(info)
.build();
return response;
}
private GetDelegationTokenResponseProto getDelegationToken(
GetDelegationTokenRequestProto request) throws OMException {
GetDelegationTokenResponseProto.Builder rb =

View File

@ -16,10 +16,13 @@
* limitations under the License.
*
*/
package org.apache.hadoop.ozone.om;
import java.io.IOException;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@ -31,6 +34,8 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs.Builder;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
@ -49,6 +54,8 @@ public class TestKeyManagerUnit {
private OmMetadataManagerImpl metadataManager;
private KeyManagerImpl keyManager;
private Instant startDate;
@Before
public void setup() throws IOException {
OzoneConfiguration configuration = new OzoneConfiguration();
@ -62,6 +69,8 @@ public class TestKeyManagerUnit {
"omtest",
Mockito.mock(OzoneBlockTokenSecretManager.class)
);
startDate = Instant.now();
}
@Test
@ -80,6 +89,66 @@ public class TestKeyManagerUnit {
Assert.assertEquals(0,
omMultipartUploadListParts.getPartInfoList().size());
this.startDate = Instant.now();
}
@Test
public void listMultipartUploads() throws IOException {
//GIVEN
createBucket(metadataManager, "vol1", "bucket1");
createBucket(metadataManager, "vol1", "bucket2");
OmMultipartInfo upload1 =
initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1");
OmMultipartInfo upload2 =
initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key2");
OmMultipartInfo upload3 =
initMultipartUpload(keyManager, "vol1", "bucket2", "dir/key1");
//WHEN
OmMultipartUploadList omMultipartUploadList =
keyManager.listMultipartUploads("vol1", "bucket1", "");
//THEN
List<OmMultipartUpload> uploads = omMultipartUploadList.getUploads();
Assert.assertEquals(2, uploads.size());
Assert.assertEquals("dir/key1", uploads.get(0).getKeyName());
Assert.assertEquals("dir/key2", uploads.get(1).getKeyName());
Assert.assertNotNull(uploads.get(1));
Assert.assertNotNull(uploads.get(1).getCreationTime());
Assert.assertTrue("Creation date is too old",
uploads.get(1).getCreationTime().compareTo(startDate) > 0);
}
@Test
public void listMultipartUploadsWithPrefix() throws IOException {
//GIVEN
createBucket(metadataManager, "vol1", "bucket1");
createBucket(metadataManager, "vol1", "bucket2");
OmMultipartInfo upload1 =
initMultipartUpload(keyManager, "vol1", "bucket1", "dip/key1");
initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1");
initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key2");
initMultipartUpload(keyManager, "vol1", "bucket1", "key3");
initMultipartUpload(keyManager, "vol1", "bucket2", "dir/key1");
//WHEN
OmMultipartUploadList omMultipartUploadList =
keyManager.listMultipartUploads("vol1", "bucket1", "dir");
//THEN
List<OmMultipartUpload> uploads = omMultipartUploadList.getUploads();
Assert.assertEquals(2, uploads.size());
Assert.assertEquals("dir/key1", uploads.get(0).getKeyName());
Assert.assertEquals("dir/key2", uploads.get(1).getKeyName());
}
private void createBucket(OmMetadataManagerImpl omMetadataManager,
@ -108,4 +177,4 @@ public class TestKeyManagerUnit {
.build();
return omtest.initiateMultipartUpload(key1);
}
}
}

View File

@ -40,6 +40,7 @@ import java.util.Iterator;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneMultipartUploadList;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata;
@ -53,7 +54,6 @@ import org.apache.hadoop.ozone.s3.util.S3StorageType;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.lang3.StringUtils;
import static org.apache.hadoop.ozone.s3.util.OzoneS3Util.getVolumeName;
import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE;
import org.apache.http.HttpStatus;
@ -88,6 +88,7 @@ public class BucketEndpoint extends EndpointBase {
@QueryParam("browser") String browser,
@QueryParam("continuation-token") String continueToken,
@QueryParam("start-after") String startAfter,
@QueryParam("uploads") String uploads,
@Context HttpHeaders hh) throws OS3Exception, IOException {
if (browser != null) {
@ -99,6 +100,10 @@ public class BucketEndpoint extends EndpointBase {
}
if (uploads != null) {
return listMultipartUploads(bucketName, prefix);
}
if (prefix == null) {
prefix = "";
}
@ -209,6 +214,29 @@ public class BucketEndpoint extends EndpointBase {
}
public Response listMultipartUploads(
@PathParam("bucket") String bucketName,
@QueryParam("prefix") String prefix)
throws OS3Exception, IOException {
OzoneBucket bucket = getBucket(bucketName);
OzoneMultipartUploadList ozoneMultipartUploadList =
bucket.listMultipartUploads(prefix);
ListMultipartUploadsResult result = new ListMultipartUploadsResult();
result.setBucket(bucketName);
ozoneMultipartUploadList.getUploads().forEach(upload -> result.addUpload(
new ListMultipartUploadsResult.Upload(
upload.getKeyName(),
upload.getUploadId(),
upload.getCreationTime(),
S3StorageType.fromReplicationType(upload.getReplicationType(),
upload.getReplicationFactor())
)));
return Response.ok(result).build();
}
/**
* Rest endpoint to check the existence of a bucket.
* <p>

View File

@ -0,0 +1,268 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.endpoint;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter;
import org.apache.hadoop.ozone.s3.util.S3StorageType;
/**
* AWS compatible REST response for list multipart upload.
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlRootElement(name = "ListMultipartUploadsResult", namespace =
"http://s3.amazonaws.com/doc/2006-03-01/")
public class ListMultipartUploadsResult {
public static final Owner
NOT_SUPPORTED_OWNER = new Owner("NOT-SUPPORTED", "Not Supported");
@XmlElement(name = "Bucket")
private String bucket;
@XmlElement(name = "KeyMarker")
private String keyMarker;
@XmlElement(name = "UploadIdMarker")
private String uploadIdMarker;
@XmlElement(name = "NextKeyMarker")
private String nextKeyMarker;
@XmlElement(name = "NextUploadIdMarker")
private String nextUploadIdMarker;
@XmlElement(name = "MaxUploads")
private int maxUploads = 1000;
@XmlElement(name = "IsTruncated")
private boolean isTruncated = false;
@XmlElement(name = "Upload")
private List<Upload> uploads = new ArrayList<>();
public String getBucket() {
return bucket;
}
public void setBucket(String bucket) {
this.bucket = bucket;
}
public String getKeyMarker() {
return keyMarker;
}
public void setKeyMarker(String keyMarker) {
this.keyMarker = keyMarker;
}
public String getUploadIdMarker() {
return uploadIdMarker;
}
public void setUploadIdMarker(String uploadIdMarker) {
this.uploadIdMarker = uploadIdMarker;
}
public String getNextKeyMarker() {
return nextKeyMarker;
}
public void setNextKeyMarker(String nextKeyMarker) {
this.nextKeyMarker = nextKeyMarker;
}
public String getNextUploadIdMarker() {
return nextUploadIdMarker;
}
public void setNextUploadIdMarker(String nextUploadIdMarker) {
this.nextUploadIdMarker = nextUploadIdMarker;
}
public int getMaxUploads() {
return maxUploads;
}
public void setMaxUploads(int maxUploads) {
this.maxUploads = maxUploads;
}
public boolean isTruncated() {
return isTruncated;
}
public void setTruncated(boolean truncated) {
isTruncated = truncated;
}
public List<Upload> getUploads() {
return uploads;
}
public void setUploads(
List<Upload> uploads) {
this.uploads = uploads;
}
public void addUpload(Upload upload) {
this.uploads.add(upload);
}
/**
* Upload information.
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlRootElement(name = "Upload")
public static class Upload {
@XmlElement(name = "Key")
private String key;
@XmlElement(name = "UploadId")
private String uploadId;
@XmlElement(name = "Owner")
private Owner owner = NOT_SUPPORTED_OWNER;
@XmlElement(name = "Initiator")
private Owner initiator = NOT_SUPPORTED_OWNER;
@XmlElement(name = "StorageClass")
private String storageClass = "STANDARD";
@XmlJavaTypeAdapter(IsoDateAdapter.class)
@XmlElement(name = "Initiated")
private Instant initiated;
public Upload() {
}
public Upload(String key, String uploadId, Instant initiated) {
this.key = key;
this.uploadId = uploadId;
this.initiated = initiated;
}
public Upload(String key, String uploadId, Instant initiated,
S3StorageType storageClass) {
this.key = key;
this.uploadId = uploadId;
this.initiated = initiated;
this.storageClass = storageClass.toString();
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getUploadId() {
return uploadId;
}
public void setUploadId(String uploadId) {
this.uploadId = uploadId;
}
public Owner getOwner() {
return owner;
}
public void setOwner(
Owner owner) {
this.owner = owner;
}
public Owner getInitiator() {
return initiator;
}
public void setInitiator(
Owner initiator) {
this.initiator = initiator;
}
public String getStorageClass() {
return storageClass;
}
public void setStorageClass(String storageClass) {
this.storageClass = storageClass;
}
public Instant getInitiated() {
return initiated;
}
public void setInitiated(Instant initiated) {
this.initiated = initiated;
}
}
/**
* Upload information.
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlRootElement(name = "Owner")
public static class Owner {
@XmlElement(name = "ID")
private String id;
@XmlElement(name = "DisplayName")
private String displayName;
public Owner() {
}
public Owner(String id, String displayName) {
this.id = id;
this.displayName = displayName;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getDisplayName() {
return displayName;
}
public void setDisplayName(String displayName) {
this.displayName = displayName;
}
}
}

View File

@ -615,13 +615,9 @@ public class ObjectEndpoint extends EndpointBase {
listPartsResponse.setPartNumberMarker(partNumberMarker);
listPartsResponse.setTruncated(false);
if (ozoneMultipartUploadPartListParts.getReplicationType().toString()
.equals(ReplicationType.STAND_ALONE.toString())) {
listPartsResponse.setStorageClass(S3StorageType.REDUCED_REDUNDANCY
.toString());
} else {
listPartsResponse.setStorageClass(S3StorageType.STANDARD.toString());
}
listPartsResponse.setStorageClass(S3StorageType.fromReplicationType(
ozoneMultipartUploadPartListParts.getReplicationType(),
ozoneMultipartUploadPartListParts.getReplicationFactor()).toString());
if (ozoneMultipartUploadPartListParts.isTruncated()) {
listPartsResponse.setTruncated(

View File

@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.client.ReplicationType;
public enum S3StorageType {
REDUCED_REDUNDANCY(ReplicationType.STAND_ALONE, ReplicationFactor.ONE),
REDUCED_REDUNDANCY(ReplicationType.RATIS, ReplicationFactor.ONE),
STANDARD(ReplicationType.RATIS, ReplicationFactor.THREE);
private final ReplicationType type;
@ -52,4 +52,13 @@ public enum S3StorageType {
return STANDARD;
}
public static S3StorageType fromReplicationType(
ReplicationType replicationType, ReplicationFactor factor) {
if ((replicationType == ReplicationType.STAND_ALONE) ||
(factor == ReplicationFactor.ONE)) {
return S3StorageType.REDUCED_REDUNDANCY;
} else {
return S3StorageType.STANDARD;
}
}
}

View File

@ -252,8 +252,8 @@ public class OzoneBucketStub extends OzoneBucket {
List<PartInfo> partInfoList = new ArrayList<>();
if (partList.get(key) == null) {
return new OzoneMultipartUploadPartListParts(ReplicationType.STAND_ALONE,
0, false);
return new OzoneMultipartUploadPartListParts(ReplicationType.RATIS,
ReplicationFactor.ONE, 0, false);
} else {
Map<Integer, Part> partMap = partList.get(key);
Iterator<Map.Entry<Integer, Part>> partIterator =
@ -282,7 +282,8 @@ public class OzoneBucketStub extends OzoneBucket {
}
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
new OzoneMultipartUploadPartListParts(ReplicationType.STAND_ALONE,
new OzoneMultipartUploadPartListParts(ReplicationType.RATIS,
ReplicationFactor.ONE,
nextPartNumberMarker, truncated);
ozoneMultipartUploadPartListParts.addAllParts(partInfoList);

View File

@ -27,9 +27,8 @@ import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.fail;
import org.junit.Test;
/**
* Testing basic object list browsing.
@ -47,7 +46,7 @@ public class TestBucketGet {
ListObjectResponse getBucketResponse =
(ListObjectResponse) getBucket
.list("b1", "/", null, null, 100, "", null, null, null, null)
.list("b1", "/", null, null, 100, "", null, null, null, null, null)
.getEntity();
Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
@ -71,7 +70,7 @@ public class TestBucketGet {
ListObjectResponse getBucketResponse =
(ListObjectResponse) getBucket.list("b1", "/", null, null, 100,
"dir1", null, null, null, null).getEntity();
"dir1", null, null, null, null, null).getEntity();
Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
Assert.assertEquals("dir1/",
@ -95,7 +94,7 @@ public class TestBucketGet {
ListObjectResponse getBucketResponse =
(ListObjectResponse) getBucket
.list("b1", "/", null, null, 100, "dir1/", null, null,
null, null)
null, null, null)
.getEntity();
Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
@ -122,7 +121,7 @@ public class TestBucketGet {
ListObjectResponse getBucketResponse =
(ListObjectResponse) getBucket.list("b1", "/", null, null, 100,
"dir1", null, null, null, null).getEntity();
"dir1", null, null, null, null, null).getEntity();
Assert.assertEquals(3, getBucketResponse.getCommonPrefixes().size());
@ -141,7 +140,7 @@ public class TestBucketGet {
ListObjectResponse getBucketResponse =
(ListObjectResponse) getBucket.list("b1", "/", null, null, 100,
"", null, null, null, null).getEntity();
"", null, null, null, null, null).getEntity();
Assert.assertEquals(3, getBucketResponse.getCommonPrefixes().size());
Assert.assertEquals("file2", getBucketResponse.getContents().get(0)
@ -162,7 +161,7 @@ public class TestBucketGet {
ListObjectResponse getBucketResponse =
(ListObjectResponse) getBucket.list("b1", "/", null, null, 100,
"dir1bh", null, null, "dir1/dir2/file2", null).getEntity();
"dir1bh", null, null, "dir1/dir2/file2", null, null).getEntity();
Assert.assertEquals(2, getBucketResponse.getCommonPrefixes().size());
@ -185,7 +184,7 @@ public class TestBucketGet {
// First time
ListObjectResponse getBucketResponse =
(ListObjectResponse) getBucket.list("b1", null, null, null, maxKeys,
"", null, null, null, null).getEntity();
"", null, null, null, null, null).getEntity();
Assert.assertTrue(getBucketResponse.isTruncated());
Assert.assertTrue(getBucketResponse.getContents().size() == 2);
@ -194,7 +193,7 @@ public class TestBucketGet {
String continueToken = getBucketResponse.getNextToken();
getBucketResponse =
(ListObjectResponse) getBucket.list("b1", null, null, null, maxKeys,
"", null, continueToken, null, null).getEntity();
"", null, continueToken, null, null, null).getEntity();
Assert.assertTrue(getBucketResponse.isTruncated());
Assert.assertTrue(getBucketResponse.getContents().size() == 2);
@ -204,7 +203,7 @@ public class TestBucketGet {
//3rd time
getBucketResponse =
(ListObjectResponse) getBucket.list("b1", null, null, null, maxKeys,
"", null, continueToken, null, null).getEntity();
"", null, continueToken, null, null, null).getEntity();
Assert.assertFalse(getBucketResponse.isTruncated());
Assert.assertTrue(getBucketResponse.getContents().size() == 1);
@ -236,7 +235,7 @@ public class TestBucketGet {
getBucketResponse =
(ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
"test/", null, null, null, null).getEntity();
"test/", null, null, null, null, null).getEntity();
Assert.assertEquals(0, getBucketResponse.getContents().size());
Assert.assertEquals(2, getBucketResponse.getCommonPrefixes().size());
@ -247,7 +246,7 @@ public class TestBucketGet {
getBucketResponse =
(ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
"test/", null, getBucketResponse.getNextToken(), null, null)
"test/", null, getBucketResponse.getNextToken(), null, null, null)
.getEntity();
Assert.assertEquals(1, getBucketResponse.getContents().size());
Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
@ -279,7 +278,7 @@ public class TestBucketGet {
// First time
ListObjectResponse getBucketResponse =
(ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
"dir", null, null, null, null).getEntity();
"dir", null, null, null, null, null).getEntity();
Assert.assertTrue(getBucketResponse.isTruncated());
Assert.assertTrue(getBucketResponse.getCommonPrefixes().size() == 2);
@ -288,7 +287,7 @@ public class TestBucketGet {
String continueToken = getBucketResponse.getNextToken();
getBucketResponse =
(ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
"dir", null, continueToken, null, null).getEntity();
"dir", null, continueToken, null, null, null).getEntity();
Assert.assertTrue(getBucketResponse.isTruncated());
Assert.assertTrue(getBucketResponse.getCommonPrefixes().size() == 2);
@ -296,7 +295,7 @@ public class TestBucketGet {
continueToken = getBucketResponse.getNextToken();
getBucketResponse =
(ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
"dir", null, continueToken, null, null).getEntity();
"dir", null, continueToken, null, null, null).getEntity();
Assert.assertFalse(getBucketResponse.isTruncated());
Assert.assertTrue(getBucketResponse.getCommonPrefixes().size() == 1);
@ -317,7 +316,7 @@ public class TestBucketGet {
try {
ListObjectResponse getBucketResponse =
(ListObjectResponse) getBucket.list("b1", "/", null, null, 2,
"dir", null, "random", null, null).getEntity();
"dir", null, "random", null, null, null).getEntity();
fail("listWithContinuationTokenFail");
} catch (OS3Exception ex) {
Assert.assertEquals("random", ex.getResource());
@ -339,7 +338,7 @@ public class TestBucketGet {
ListObjectResponse getBucketResponse =
(ListObjectResponse) getBucket.list("b1", null, null, null, 1000,
null, null, null, null, null).getEntity();
null, null, null, null, null, null).getEntity();
Assert.assertFalse(getBucketResponse.isTruncated());
Assert.assertTrue(getBucketResponse.getContents().size() == 5);
@ -350,14 +349,14 @@ public class TestBucketGet {
getBucketResponse =
(ListObjectResponse) getBucket.list("b1", null, null, null,
1000, null, null, null, startAfter, null).getEntity();
1000, null, null, null, startAfter, null, null).getEntity();
Assert.assertFalse(getBucketResponse.isTruncated());
Assert.assertTrue(getBucketResponse.getContents().size() == 4);
getBucketResponse =
(ListObjectResponse) getBucket.list("b1", null, null, null,
1000, null, null, null, "random", null).getEntity();
1000, null, null, null, "random", null, null).getEntity();
Assert.assertFalse(getBucketResponse.isTruncated());
Assert.assertTrue(getBucketResponse.getContents().size() == 0);