HDDS-798. Storage-class is showing incorrectly. Contributed by Bharat Viswanadham.
This commit is contained in:
parent
349168c4b3
commit
3d5cc1138a
|
@ -18,6 +18,8 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.client;
|
package org.apache.hadoop.ozone.client;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A class that encapsulates OzoneKey.
|
* A class that encapsulates OzoneKey.
|
||||||
*/
|
*/
|
||||||
|
@ -48,19 +50,22 @@ public class OzoneKey {
|
||||||
*/
|
*/
|
||||||
private long modificationTime;
|
private long modificationTime;
|
||||||
|
|
||||||
|
private ReplicationType replicationType;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs OzoneKey from OmKeyInfo.
|
* Constructs OzoneKey from OmKeyInfo.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public OzoneKey(String volumeName, String bucketName,
|
public OzoneKey(String volumeName, String bucketName,
|
||||||
String keyName, long size, long creationTime,
|
String keyName, long size, long creationTime,
|
||||||
long modificationTime) {
|
long modificationTime, ReplicationType type) {
|
||||||
this.volumeName = volumeName;
|
this.volumeName = volumeName;
|
||||||
this.bucketName = bucketName;
|
this.bucketName = bucketName;
|
||||||
this.name = keyName;
|
this.name = keyName;
|
||||||
this.dataSize = size;
|
this.dataSize = size;
|
||||||
this.creationTime = creationTime;
|
this.creationTime = creationTime;
|
||||||
this.modificationTime = modificationTime;
|
this.modificationTime = modificationTime;
|
||||||
|
this.replicationType = type;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -116,4 +121,13 @@ public class OzoneKey {
|
||||||
public long getModificationTime() {
|
public long getModificationTime() {
|
||||||
return modificationTime;
|
return modificationTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the replication type of the key.
|
||||||
|
*
|
||||||
|
* @return replicationType
|
||||||
|
*/
|
||||||
|
public ReplicationType getReplicationType() {
|
||||||
|
return replicationType;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,8 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ozone.client;
|
package org.apache.hadoop.ozone.client;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -35,9 +37,10 @@ public class OzoneKeyDetails extends OzoneKey {
|
||||||
*/
|
*/
|
||||||
public OzoneKeyDetails(String volumeName, String bucketName, String keyName,
|
public OzoneKeyDetails(String volumeName, String bucketName, String keyName,
|
||||||
long size, long creationTime, long modificationTime,
|
long size, long creationTime, long modificationTime,
|
||||||
List<OzoneKeyLocation> ozoneKeyLocations) {
|
List<OzoneKeyLocation> ozoneKeyLocations,
|
||||||
|
ReplicationType type) {
|
||||||
super(volumeName, bucketName, keyName, size, creationTime,
|
super(volumeName, bucketName, keyName, size, creationTime,
|
||||||
modificationTime);
|
modificationTime, type);
|
||||||
this.ozoneKeyLocations = ozoneKeyLocations;
|
this.ozoneKeyLocations = ozoneKeyLocations;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -777,7 +777,8 @@ public class RestClient implements ClientProtocol {
|
||||||
LOG.warn("Parse exception in getting creation time for volume", e);
|
LOG.warn("Parse exception in getting creation time for volume", e);
|
||||||
}
|
}
|
||||||
return new OzoneKey(volumeName, bucketName, keyInfo.getKeyName(),
|
return new OzoneKey(volumeName, bucketName, keyInfo.getKeyName(),
|
||||||
keyInfo.getSize(), creationTime, modificationTime);
|
keyInfo.getSize(), creationTime, modificationTime,
|
||||||
|
ReplicationType.valueOf(keyInfo.getType().toString()));
|
||||||
}).collect(Collectors.toList());
|
}).collect(Collectors.toList());
|
||||||
} catch (URISyntaxException e) {
|
} catch (URISyntaxException e) {
|
||||||
throw new IOException(e);
|
throw new IOException(e);
|
||||||
|
@ -812,7 +813,8 @@ public class RestClient implements ClientProtocol {
|
||||||
keyInfo.getSize(),
|
keyInfo.getSize(),
|
||||||
HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()),
|
HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()),
|
||||||
HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()),
|
HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()),
|
||||||
ozoneKeyLocations);
|
ozoneKeyLocations, ReplicationType.valueOf(
|
||||||
|
keyInfo.getType().toString()));
|
||||||
EntityUtils.consume(response);
|
EntityUtils.consume(response);
|
||||||
return key;
|
return key;
|
||||||
} catch (URISyntaxException | ParseException e) {
|
} catch (URISyntaxException | ParseException e) {
|
||||||
|
|
|
@ -537,7 +537,8 @@ public class RpcClient implements ClientProtocol {
|
||||||
key.getKeyName(),
|
key.getKeyName(),
|
||||||
key.getDataSize(),
|
key.getDataSize(),
|
||||||
key.getCreationTime(),
|
key.getCreationTime(),
|
||||||
key.getModificationTime()))
|
key.getModificationTime(),
|
||||||
|
ReplicationType.valueOf(key.getType().toString())))
|
||||||
.collect(Collectors.toList());
|
.collect(Collectors.toList());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -559,13 +560,10 @@ public class RpcClient implements ClientProtocol {
|
||||||
keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().forEach(
|
keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().forEach(
|
||||||
(a) -> ozoneKeyLocations.add(new OzoneKeyLocation(a.getContainerID(),
|
(a) -> ozoneKeyLocations.add(new OzoneKeyLocation(a.getContainerID(),
|
||||||
a.getLocalID(), a.getLength(), a.getOffset())));
|
a.getLocalID(), a.getLength(), a.getOffset())));
|
||||||
return new OzoneKeyDetails(keyInfo.getVolumeName(),
|
return new OzoneKeyDetails(keyInfo.getVolumeName(), keyInfo.getBucketName(),
|
||||||
keyInfo.getBucketName(),
|
keyInfo.getKeyName(), keyInfo.getDataSize(), keyInfo.getCreationTime(),
|
||||||
keyInfo.getKeyName(),
|
keyInfo.getModificationTime(), ozoneKeyLocations, ReplicationType
|
||||||
keyInfo.getDataSize(),
|
.valueOf(keyInfo.getType().toString()));
|
||||||
keyInfo.getCreationTime(),
|
|
||||||
keyInfo.getModificationTime(),
|
|
||||||
ozoneKeyLocations);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -24,6 +24,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import com.fasterxml.jackson.databind.ObjectReader;
|
import com.fasterxml.jackson.databind.ObjectReader;
|
||||||
import org.apache.commons.lang3.builder.EqualsBuilder;
|
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||||
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||||
|
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* KeyInfo class is used used for parsing json response
|
* KeyInfo class is used used for parsing json response
|
||||||
|
@ -40,6 +41,25 @@ public class KeyInfo implements Comparable<KeyInfo> {
|
||||||
private String modifiedOn;
|
private String modifiedOn;
|
||||||
private long size;
|
private long size;
|
||||||
private String keyName;
|
private String keyName;
|
||||||
|
private ReplicationType type;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return replication type of the key.
|
||||||
|
*
|
||||||
|
* @return replication type
|
||||||
|
*/
|
||||||
|
public ReplicationType getType() {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set replication type of the key.
|
||||||
|
*
|
||||||
|
* @param replicationType
|
||||||
|
*/
|
||||||
|
public void setType(ReplicationType replicationType) {
|
||||||
|
this.type = replicationType;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* When this key was created.
|
* When this key was created.
|
||||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.commons.lang3.builder.EqualsBuilder;
|
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||||
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||||
|
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||||
import org.apache.hadoop.ozone.web.utils.JsonUtils;
|
import org.apache.hadoop.ozone.web.utils.JsonUtils;
|
||||||
|
|
||||||
import com.fasterxml.jackson.annotation.JsonAutoDetect;
|
import com.fasterxml.jackson.annotation.JsonAutoDetect;
|
||||||
|
@ -75,6 +76,26 @@ public class KeyInfo implements Comparable<KeyInfo> {
|
||||||
|
|
||||||
private String dataFileName;
|
private String dataFileName;
|
||||||
|
|
||||||
|
private ReplicationType type;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return replication type of the key.
|
||||||
|
*
|
||||||
|
* @return replication type
|
||||||
|
*/
|
||||||
|
public ReplicationType getType() {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set replication type of the key.
|
||||||
|
*
|
||||||
|
* @param replicationType
|
||||||
|
*/
|
||||||
|
public void setType(ReplicationType replicationType) {
|
||||||
|
this.type = replicationType;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* When this key was created.
|
* When this key was created.
|
||||||
*
|
*
|
||||||
|
|
|
@ -36,9 +36,13 @@ Delete file with multi delete
|
||||||
Should contain ${result} multidelete/f1
|
Should contain ${result} multidelete/f1
|
||||||
Should contain ${result} multidelete/f2
|
Should contain ${result} multidelete/f2
|
||||||
Should contain ${result} multidelete/f3
|
Should contain ${result} multidelete/f3
|
||||||
|
Should contain ${result} REDUCED_REDUNDANCY
|
||||||
|
Should not contain ${result} STANDARD
|
||||||
${result} = Execute AWSS3APICli delete-objects --bucket ${BUCKET} --delete 'Objects=[{Key=multidelete/f1},{Key=multidelete/f2},{Key=multidelete/f4}]'
|
${result} = Execute AWSS3APICli delete-objects --bucket ${BUCKET} --delete 'Objects=[{Key=multidelete/f1},{Key=multidelete/f2},{Key=multidelete/f4}]'
|
||||||
Should not contain ${result} Error
|
Should not contain ${result} Error
|
||||||
${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix multidelete/
|
${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix multidelete/
|
||||||
Should not contain ${result} multidelete/f1
|
Should not contain ${result} multidelete/f1
|
||||||
Should not contain ${result} multidelete/f2
|
Should not contain ${result} multidelete/f2
|
||||||
Should contain ${result} multidelete/f3
|
Should contain ${result} multidelete/f3
|
||||||
|
Should contain ${result} REDUCED_REDUNDANCY
|
||||||
|
Should not contain ${result} STANDARD
|
||||||
|
|
|
@ -37,6 +37,7 @@ import java.io.InputStream;
|
||||||
import java.time.Instant;
|
import java.time.Instant;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||||
import org.apache.hadoop.ozone.client.OzoneBucket;
|
import org.apache.hadoop.ozone.client.OzoneBucket;
|
||||||
import org.apache.hadoop.ozone.client.OzoneKey;
|
import org.apache.hadoop.ozone.client.OzoneKey;
|
||||||
import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata;
|
import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata;
|
||||||
|
@ -48,6 +49,7 @@ import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
|
||||||
|
|
||||||
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
import org.apache.hadoop.ozone.s3.util.S3StorageType;
|
||||||
import org.apache.hadoop.ozone.s3.util.S3utils;
|
import org.apache.hadoop.ozone.s3.util.S3utils;
|
||||||
import org.apache.http.HttpStatus;
|
import org.apache.http.HttpStatus;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -296,7 +298,12 @@ public class BucketEndpoint extends EndpointBase {
|
||||||
keyMetadata.setKey(next.getName());
|
keyMetadata.setKey(next.getName());
|
||||||
keyMetadata.setSize(next.getDataSize());
|
keyMetadata.setSize(next.getDataSize());
|
||||||
keyMetadata.setETag("" + next.getModificationTime());
|
keyMetadata.setETag("" + next.getModificationTime());
|
||||||
keyMetadata.setStorageClass("STANDARD");
|
if (next.getReplicationType().toString().equals(ReplicationType
|
||||||
|
.STAND_ALONE.toString())) {
|
||||||
|
keyMetadata.setStorageClass(S3StorageType.REDUCED_REDUNDANCY.toString());
|
||||||
|
} else {
|
||||||
|
keyMetadata.setStorageClass(S3StorageType.STANDARD.toString());
|
||||||
|
}
|
||||||
keyMetadata.setLastModified(Instant.ofEpochMilli(
|
keyMetadata.setLastModified(Instant.ofEpochMilli(
|
||||||
next.getModificationTime()));
|
next.getModificationTime()));
|
||||||
response.addKey(keyMetadata);
|
response.addKey(keyMetadata);
|
||||||
|
|
|
@ -93,7 +93,7 @@ public class OzoneBucketStub extends OzoneBucket {
|
||||||
size,
|
size,
|
||||||
System.currentTimeMillis(),
|
System.currentTimeMillis(),
|
||||||
System.currentTimeMillis(),
|
System.currentTimeMillis(),
|
||||||
new ArrayList<>()
|
new ArrayList<>(), type
|
||||||
));
|
));
|
||||||
super.close();
|
super.close();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue