HDFS-12259. Ozone: OzoneClient: Refactor move ozone client from hadoop-hdfs to hadoop-hdfs-client. Contributed by Nandakumar.

This commit is contained in:
Xiaoyu Yao 2017-08-08 13:36:05 -07:00
parent b153dbb9b0
commit 43d38114e6
97 changed files with 991 additions and 463 deletions

View File

@ -84,6 +84,11 @@ public class OzoneAcl {
return new OzoneAcl(aclType, parts[1], rights);
}
@Override
public String toString() {
return type+":" + name + ":" + rights;
}
/**
* Returns a hash code value for the object. This method is
* supported for the benefit of hash tables.

View File

@ -16,11 +16,12 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone;
package org.apache.hadoop.ozone.client;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts.Versioning;
import java.util.List;

View File

@ -15,12 +15,13 @@
* the License.
*/
package org.apache.hadoop.ozone;
package org.apache.hadoop.ozone.client;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts.Versioning;
import org.apache.hadoop.ozone.io.OzoneInputStream;
import org.apache.hadoop.ozone.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import java.io.IOException;
import java.util.Iterator;
@ -237,7 +238,7 @@ public interface OzoneClient {
* @throws IOException
*/
void createBucket(String volumeName, String bucketName,
OzoneConsts.Versioning versioning,
Versioning versioning,
StorageType storageType, OzoneAcl... acls)
throws IOException;

View File

@ -16,9 +16,12 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone;
package org.apache.hadoop.ozone.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.client.rest.OzoneRestClient;
import org.apache.hadoop.ozone.client.rpc.OzoneRpcClient;
import java.io.IOException;
@ -34,6 +37,18 @@ public final class OzoneClientFactory {
private static Configuration configuration;
/**
* Returns an OzoneClient which will use RPC protocol to perform
* client operations.
*
* @return OzoneClient
* @throws IOException
*/
public static OzoneClient getClient() throws IOException {
//TODO: get client based on ozone.client.protocol
return new OzoneRpcClient(getConfiguration());
}
/**
* Returns an OzoneClient which will use RPC protocol to perform
* client operations.
@ -42,7 +57,18 @@ public final class OzoneClientFactory {
* @throws IOException
*/
public static OzoneClient getRpcClient() throws IOException {
return new OzoneClientImpl(getConfiguration());
return new OzoneRpcClient(getConfiguration());
}
/**
* Returns an OzoneClient which will use RPC protocol to perform
* client operations.
*
* @return OzoneClient
* @throws IOException
*/
public static OzoneClient getRestClient() throws IOException {
return new OzoneRestClient(getConfiguration());
}
/**

View File

@ -15,7 +15,7 @@
* the License.
*/
package org.apache.hadoop.ozone;
package org.apache.hadoop.ozone.client;
import com.google.common.base.Optional;
@ -25,6 +25,8 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.scm.ScmConfigKeys;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.HttpRequestBase;
@ -189,8 +191,8 @@ public final class OzoneClientUtils {
throw new IllegalArgumentException(
ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY +
" must be defined. See" +
" https://wiki.apache.org/hadoop/Ozone#Configuration for details" +
" on configuring Ozone.");
" https://wiki.apache.org/hadoop/Ozone#Configuration" +
" for details on configuring Ozone.");
}
}

View File

@ -16,10 +16,10 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone;
package org.apache.hadoop.ozone.client;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ksm.helpers.KsmKeyLocationInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
import java.util.List;

View File

@ -16,10 +16,10 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone;
package org.apache.hadoop.ozone.client;
import org.apache.hadoop.ksm.helpers.KsmOzoneAclMap;
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmOzoneAclMap;
import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
/**
* A class that encapsulates OzoneVolume.

View File

@ -15,14 +15,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.storage;
package org.apache.hadoop.ozone.client.io;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ksm.helpers.KsmKeyLocationInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;

View File

@ -15,18 +15,20 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.storage;
package org.apache.hadoop.ozone.client.io;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.Result;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ksm.helpers.KsmKeyLocationInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.storage.ChunkOutputStream;
import org.apache.hadoop.scm.storage.ContainerProtocolCalls;
import org.slf4j.Logger;

View File

@ -0,0 +1,49 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.io;
import java.io.FilterInputStream;
import java.io.InputStream;
/**
* An input stream with length.
*/
public class LengthInputStream extends FilterInputStream {
private final long length;
/**
* Create an stream.
* @param in the underlying input stream.
* @param length the length of the stream.
*/
public LengthInputStream(InputStream in, long length) {
super(in);
this.length = length;
}
/** @return the length. */
public long getLength() {
return length;
}
public InputStream getWrappedStream() {
return in;
}
}

View File

@ -0,0 +1,51 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.io;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.KeyData;
/**
* This class contains methods that define the translation between the Ozone
* domain model and the storage container domain model.
*/
final class OzoneContainerTranslation {
/**
* Creates key data intended for reading a container key.
*
* @param containerName container name
* @param containerKey container key
* @return KeyData intended for reading the container key
*/
public static KeyData containerKeyDataForRead(String containerName,
String containerKey) {
return KeyData
.newBuilder()
.setContainerName(containerName)
.setName(containerKey)
.build();
}
/**
* There is no need to instantiate this class.
*/
private OzoneContainerTranslation() {
}
}

View File

@ -15,9 +15,8 @@
* the License.
*/
package org.apache.hadoop.ozone.io;
package org.apache.hadoop.ozone.client.io;
import org.apache.hadoop.ozone.web.storage.ChunkGroupInputStream;
import org.apache.hadoop.scm.storage.ChunkInputStream;
import java.io.IOException;

View File

@ -15,9 +15,7 @@
* the License.
*/
package org.apache.hadoop.ozone.io;
import org.apache.hadoop.ozone.web.storage.ChunkGroupOutputStream;
package org.apache.hadoop.ozone.client.io;
import java.io.IOException;
import java.io.OutputStream;

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone.io;
package org.apache.hadoop.ozone.client.io;
/**
* This package contains Ozone I/O classes.

View File

@ -0,0 +1,23 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client;
/**
* This package contains Ozone Client classes.
*/

View File

@ -0,0 +1,510 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.ozone.client.rest;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.OzoneConsts.Versioning;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.core.HttpHeaders;
import java.io.Closeable;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import static java.net.HttpURLConnection.HTTP_CREATED;
import static java.net.HttpURLConnection.HTTP_OK;
/**
* Ozone REST Client Implementation, it connects Ozone Handler to execute
* client calls. This uses REST protocol for the communication with server.
*/
public class OzoneRestClient implements OzoneClient, Closeable {
private static final Logger LOG =
LoggerFactory.getLogger(OzoneRestClient.class);
private static final String SCHEMA = "http://";
private static final int DEFAULT_OZONE_PORT = 50070;
private final URI uri;
private final UserGroupInformation ugi;
private final OzoneAcl.OzoneACLRights userRights;
private final OzoneAcl.OzoneACLRights groupRights;
/**
* Creates OzoneRpcClient instance with new OzoneConfiguration.
*
* @throws IOException
*/
public OzoneRestClient() throws IOException, URISyntaxException {
this(new OzoneConfiguration());
}
/**
* Creates OzoneRpcClient instance with the given configuration.
*
* @param conf
*
* @throws IOException
*/
public OzoneRestClient(Configuration conf)
throws IOException {
Preconditions.checkNotNull(conf);
this.ugi = UserGroupInformation.getCurrentUser();
this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,
KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT);
this.groupRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS,
KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS_DEFAULT);
//TODO: get uri from property ozone.reset.servers
URIBuilder ozoneURI = null;
try {
ozoneURI = new URIBuilder(SCHEMA + "localhost");
if (ozoneURI.getPort() == 0) {
ozoneURI.setPort(DEFAULT_OZONE_PORT);
}
uri = ozoneURI.build();
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
public void createVolume(String volumeName)
throws IOException {
createVolume(volumeName, ugi.getUserName());
}
@Override
public void createVolume(String volumeName, String owner)
throws IOException {
createVolume(volumeName, owner, OzoneConsts.MAX_QUOTA_IN_BYTES,
(OzoneAcl[])null);
}
@Override
public void createVolume(String volumeName, String owner,
OzoneAcl... acls)
throws IOException {
createVolume(volumeName, owner, OzoneConsts.MAX_QUOTA_IN_BYTES, acls);
}
@Override
public void createVolume(String volumeName, String owner,
long quota)
throws IOException {
createVolume(volumeName, owner, quota, (OzoneAcl[])null);
}
@Override
public void createVolume(String volumeName, String owner,
long quota, OzoneAcl... acls)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(owner);
Preconditions.checkNotNull(quota);
Preconditions.checkState(quota >= 0);
Set<OzoneAcl> aclSet = new HashSet<>();
if(acls != null) {
aclSet.addAll(Arrays.asList(acls));
}
LOG.info("Creating Volume: {}, with {} as owner and " +
"quota set to {} bytes.", volumeName, owner, quota);
HttpPost httpPost = null;
HttpEntity entity = null;
try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) {
URIBuilder builder = new URIBuilder(uri);
builder.setPath("/" + volumeName);
String quotaString = quota + Header.OZONE_QUOTA_BYTES;
builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quotaString);
httpPost = getHttpPost(owner, builder.build().toString());
for (OzoneAcl acl : aclSet) {
httpPost.addHeader(
Header.OZONE_ACLS, Header.OZONE_ACL_ADD + " " + acl.toString());
}
HttpResponse response = httpClient.execute(httpPost);
entity = response.getEntity();
int errorCode = response.getStatusLine().getStatusCode();
if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
return;
}
if (entity != null) {
throw new IOException(EntityUtils.toString(entity));
} else {
throw new IOException("Unexpected null in http payload");
}
} catch (URISyntaxException | IllegalArgumentException ex) {
throw new IOException(ex.getMessage());
} finally {
EntityUtils.consume(entity);
OzoneClientUtils.releaseConnection(httpPost);
}
}
@Override
public void setVolumeOwner(String volumeName, String owner)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(owner);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public void setVolumeQuota(String volumeName, long quota)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(quota);
Preconditions.checkState(quota >= 0);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public OzoneVolume getVolumeDetails(String volumeName)
throws IOException {
Preconditions.checkNotNull(volumeName);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public boolean checkVolumeAccess(String volumeName, OzoneAcl acl)
throws IOException {
Preconditions.checkNotNull(volumeName);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public void deleteVolume(String volumeName)
throws IOException {
Preconditions.checkNotNull(volumeName);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public Iterator<OzoneVolume> listVolumes(String volumePrefix)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public Iterator<OzoneVolume> listVolumes(String volumePrefix,
String user)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public void createBucket(String volumeName, String bucketName)
throws IOException {
createBucket(volumeName, bucketName, Versioning.NOT_DEFINED,
StorageType.DEFAULT, (OzoneAcl[])null);
}
@Override
public void createBucket(String volumeName, String bucketName,
Versioning versioning)
throws IOException {
createBucket(volumeName, bucketName, versioning,
StorageType.DEFAULT, (OzoneAcl[])null);
}
@Override
public void createBucket(String volumeName, String bucketName,
StorageType storageType)
throws IOException {
createBucket(volumeName, bucketName, Versioning.NOT_DEFINED,
storageType, (OzoneAcl[])null);
}
@Override
public void createBucket(String volumeName, String bucketName,
OzoneAcl... acls)
throws IOException {
createBucket(volumeName, bucketName, Versioning.NOT_DEFINED,
StorageType.DEFAULT, acls);
}
@Override
public void createBucket(String volumeName, String bucketName,
Versioning versioning, StorageType storageType,
OzoneAcl... acls)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(versioning);
Preconditions.checkNotNull(storageType);
String owner = ugi.getUserName();
final List<OzoneAcl> listOfAcls = new ArrayList<>();
//User ACL
OzoneAcl userAcl =
new OzoneAcl(OzoneAcl.OzoneACLType.USER,
owner, userRights);
listOfAcls.add(userAcl);
//Group ACLs of the User
List<String> userGroups = Arrays.asList(UserGroupInformation
.createRemoteUser(owner).getGroupNames());
userGroups.stream().forEach((group) -> listOfAcls.add(
new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights)));
//ACLs passed as argument
if(acls != null) {
Arrays.stream(acls).forEach((acl) -> listOfAcls.add(acl));
}
LOG.info("Creating Bucket: {}/{}, with Versioning {} and " +
"Storage Type set to {}", volumeName, bucketName, versioning,
storageType);
throw new UnsupportedOperationException("Not yet implemented.");
}
/**
* Converts OzoneConts.Versioning enum to boolean.
*
* @param version
* @return corresponding boolean value
*/
private boolean getBucketVersioningProtobuf(
Versioning version) {
if(version != null) {
switch(version) {
case ENABLED:
return true;
case NOT_DEFINED:
case DISABLED:
default:
return false;
}
}
return false;
}
@Override
public void addBucketAcls(String volumeName, String bucketName,
List<OzoneAcl> addAcls)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(addAcls);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public void removeBucketAcls(String volumeName, String bucketName,
List<OzoneAcl> removeAcls)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(removeAcls);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public void setBucketVersioning(String volumeName, String bucketName,
Versioning versioning)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(versioning);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public void setBucketStorageType(String volumeName, String bucketName,
StorageType storageType)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(storageType);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public void deleteBucket(String volumeName, String bucketName)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public void checkBucketAccess(String volumeName, String bucketName)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public OzoneBucket getBucketDetails(String volumeName,
String bucketName)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public Iterator<OzoneBucket> listBuckets(String volumeName,
String bucketPrefix)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public OzoneOutputStream createKey(String volumeName, String bucketName,
String keyName, long size)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public OzoneInputStream getKey(String volumeName, String bucketName,
String keyName)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(keyName);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public void deleteKey(String volumeName, String bucketName,
String keyName)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(keyName);
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public List<OzoneKey> listKeys(String volumeName, String bucketName,
String keyPrefix)
throws IOException {
throw new UnsupportedOperationException("Not yet implemented.");
}
@Override
public OzoneKey getKeyDetails(String volumeName, String bucketName,
String keyName)
throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(keyName);
throw new UnsupportedOperationException("Not yet implemented.");
}
/**
* Converts Versioning to boolean.
*
* @param version
* @return corresponding boolean value
*/
private boolean getBucketVersioningFlag(
Versioning version) {
if(version != null) {
switch(version) {
case ENABLED:
return true;
case DISABLED:
case NOT_DEFINED:
default:
return false;
}
}
return false;
}
/**
* Returns a standard HttpPost Object to use for ozone post requests.
*
* @param user - If the use is being made on behalf of user, that user
* @param uriString - UriString
* @return HttpPost
*/
public HttpPost getHttpPost(String user, String uriString) {
HttpPost httpPost = new HttpPost(uriString);
addOzoneHeaders(httpPost);
if (user != null) {
httpPost.addHeader(Header.OZONE_USER, user);
}
return httpPost;
}
/**
* Add Ozone Headers.
*
* @param httpRequest - Http Request
*/
private void addOzoneHeaders(HttpRequestBase httpRequest) {
SimpleDateFormat format =
new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
httpRequest.addHeader(Header.OZONE_VERSION_HEADER,
Header.OZONE_V1_VERSION_HEADER);
httpRequest.addHeader(HttpHeaders.DATE,
format.format(new Date(Time.monotonicNow())));
httpRequest.addHeader(HttpHeaders.AUTHORIZATION,
Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
ugi.getUserName());
}
@Override
public void close() throws IOException {
}
}

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.headers;
package org.apache.hadoop.ozone.client.rest.headers;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -21,6 +21,6 @@
* Ozone HTTP header definitions.
*/
@InterfaceAudience.Private
package org.apache.hadoop.ozone.web.headers;
package org.apache.hadoop.ozone.client.rest.headers;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -0,0 +1,23 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rest;
/**
* This package contains Ozone rest client library classes.
*/

View File

@ -15,33 +15,41 @@
* the License.
*/
package org.apache.hadoop.ozone;
package org.apache.hadoop.ozone.client.rpc;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ksm.protocolPB
import org.apache.hadoop.ozone.client.io.LengthInputStream;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.ksm.protocolPB
.KeySpaceManagerProtocolClientSideTranslatorPB;
import org.apache.hadoop.ksm.protocolPB
import org.apache.hadoop.ozone.ksm.protocolPB
.KeySpaceManagerProtocolPB;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.web.storage.ChunkGroupInputStream;
import org.apache.hadoop.ozone.io.OzoneInputStream;
import org.apache.hadoop.ozone.io.OzoneOutputStream;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts.Versioning;
import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
import org.apache.hadoop.ozone.web.storage.ChunkGroupOutputStream;
import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
import org.apache.hadoop.scm.ScmConfigKeys;
import org.apache.hadoop.scm.XceiverClientManager;
import org.apache.hadoop.scm.protocolPB
@ -63,14 +71,14 @@ import java.util.UUID;
import java.util.stream.Collectors;
/**
* Ozone Client Implementation, it connects to KSM, SCM and DataNode
* Ozone RPC Client Implementation, it connects to KSM, SCM and DataNode
* to execute client calls. This uses RPC protocol for communication
* with the servers.
*/
public class OzoneClientImpl implements OzoneClient, Closeable {
public class OzoneRpcClient implements OzoneClient, Closeable {
private static final Logger LOG =
LoggerFactory.getLogger(OzoneClient.class);
LoggerFactory.getLogger(OzoneRpcClient.class);
private final StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
@ -85,22 +93,22 @@ public class OzoneClientImpl implements OzoneClient, Closeable {
private final OzoneAcl.OzoneACLRights groupRights;
/**
* Creates OzoneClientImpl instance with new OzoneConfiguration.
* Creates OzoneRpcClient instance with new OzoneConfiguration.
*
* @throws IOException
*/
public OzoneClientImpl() throws IOException {
public OzoneRpcClient() throws IOException {
this(new OzoneConfiguration());
}
/**
* Creates OzoneClientImpl instance with the given configuration.
* Creates OzoneRpcClient instance with the given configuration.
*
* @param conf
*
* @throws IOException
*/
public OzoneClientImpl(Configuration conf) throws IOException {
public OzoneRpcClient(Configuration conf) throws IOException {
Preconditions.checkNotNull(conf);
this.ugi = UserGroupInformation.getCurrentUser();
this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,

View File

@ -0,0 +1,23 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rpc;
/**
* This package contains Ozone rpc client library classes.
*/

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ksm.helpers;
package org.apache.hadoop.ozone.ksm.helpers;
import java.util.List;
import java.util.stream.Collectors;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ksm.helpers;
package org.apache.hadoop.ozone.ksm.helpers;
import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.StorageType;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ksm.helpers;
package org.apache.hadoop.ozone.ksm.helpers;
/**
* Args for key. Client use this to specify key's attributes on key creation

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ksm.helpers;
package org.apache.hadoop.ozone.ksm.helpers;
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo;

View File

@ -14,7 +14,7 @@
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.ksm.helpers;
package org.apache.hadoop.ozone.ksm.helpers;
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyLocation;

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.ksm.helpers;
package org.apache.hadoop.ozone.ksm.helpers;
import org.apache.hadoop.ozone.protocol.proto
.KeySpaceManagerProtocolProtos.OzoneAclInfo;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ksm.helpers;
package org.apache.hadoop.ozone.ksm.helpers;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.protocol.proto

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ksm.helpers;
package org.apache.hadoop.ozone.ksm.helpers;
import com.google.common.base.Preconditions;

View File

@ -15,4 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ksm.helpers;
package org.apache.hadoop.ozone.ksm.helpers;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ksm;
package org.apache.hadoop.ozone.ksm;
/**
This package contains client side protocol library to communicate with KSM.
*/

View File

@ -15,13 +15,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ksm.protocol;
package org.apache.hadoop.ozone.ksm.protocol;
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.protocol.proto
.KeySpaceManagerProtocolProtos.OzoneAclInfo;
import java.io.IOException;

View File

@ -16,4 +16,4 @@
* limitations under the License.
*/
package org.apache.hadoop.ksm.protocol;
package org.apache.hadoop.ozone.ksm.protocol;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ksm.protocolPB;
package org.apache.hadoop.ozone.ksm.protocolPB;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
@ -24,12 +24,12 @@ import com.google.protobuf.ServiceException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ksm.protocol.KeySpaceManagerProtocol;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol;
import org.apache.hadoop.ozone.protocol.proto
.KeySpaceManagerProtocolProtos.BucketArgs;
import org.apache.hadoop.ozone.protocol.proto

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ksm.protocolPB;
package org.apache.hadoop.ozone.ksm.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.ProtocolInfo;

View File

@ -16,4 +16,4 @@
* limitations under the License.
*/
package org.apache.hadoop.ksm.protocolPB;
package org.apache.hadoop.ozone.ksm.protocolPB;

View File

@ -34,7 +34,7 @@ import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
import org.apache.hadoop.cblock.protocolPB
.CBlockServiceProtocolServerSideTranslatorPB;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.XceiverClientManager;

View File

@ -22,7 +22,7 @@ import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.security.UserGroupInformation;

View File

@ -31,12 +31,13 @@ import com.sun.jersey.api.container.ContainerFactory;
import com.sun.jersey.api.core.ApplicationAdapter;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ksm.protocolPB
import org.apache.hadoop.ozone.ksm.protocolPB
.KeySpaceManagerProtocolClientSideTranslatorPB;
import org.apache.hadoop.ksm.protocolPB.KeySpaceManagerProtocolPB;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.protocolPB
.ScmBlockLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.protocolPB.ScmBlockLocationProtocolPB;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -47,7 +48,8 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.ozone.web.handlers.ServiceFilter;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;

View File

@ -25,7 +25,7 @@ import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.HttpRequest;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer;
import org.apache.hadoop.ozone.web.netty.RequestDispatchObjectStoreChannelHandler;

View File

@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;

View File

@ -20,7 +20,7 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CommandDispatcher;
import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ContainerReportHandler;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;

View File

@ -17,7 +17,7 @@
package org.apache.hadoop.ozone.container.common.statemachine;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.protocol.VersionResponse;
import org.apache.hadoop.ozone.protocolPB
.StorageContainerDatanodeProtocolClientSideTranslatorPB;

View File

@ -21,7 +21,7 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.protocolPB
.StorageContainerDatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.container.common.states.datanode;
import com.google.common.base.Strings;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;

View File

@ -16,8 +16,8 @@
*/
package org.apache.hadoop.ozone.ksm;
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import java.io.IOException;
import java.util.List;

View File

@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.ksm;
import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
import org.apache.hadoop.ozone.protocol.proto
.KeySpaceManagerProtocolProtos.BucketInfo;

View File

@ -16,8 +16,8 @@
*/
package org.apache.hadoop.ozone.ksm;
import org.apache.hadoop.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import java.io.IOException;
import java.util.List;

View File

@ -17,9 +17,9 @@
package org.apache.hadoop.ozone.ksm;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ksm.helpers.KsmKeyLocationInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes;

View File

@ -24,17 +24,17 @@ import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.jmx.ServiceRuntimeInfo;
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ksm.protocol.KeySpaceManagerProtocol;
import org.apache.hadoop.ksm.protocolPB.KeySpaceManagerProtocolPB;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol;
import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.protocol.proto
.KeySpaceManagerProtocolProtos.OzoneAclInfo;

View File

@ -17,9 +17,9 @@
package org.apache.hadoop.ozone.ksm;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.utils.BatchOperation;
import org.apache.hadoop.utils.MetadataStore;

View File

@ -21,9 +21,9 @@ import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;

View File

@ -16,7 +16,7 @@
*/
package org.apache.hadoop.ozone.ksm;
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.protocol.proto
.KeySpaceManagerProtocolProtos.OzoneAclInfo;

View File

@ -17,7 +17,7 @@
package org.apache.hadoop.ozone.ksm;
import com.google.common.base.Preconditions;
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
import org.apache.hadoop.ozone.protocol.proto
@ -34,10 +34,8 @@ import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import static org.apache.hadoop.ozone.ksm
.KSMConfigKeys.OZONE_KSM_USER_MAX_VOLUME_DEFAULT;
import static org.apache.hadoop.ozone.ksm
.KSMConfigKeys.OZONE_KSM_USER_MAX_VOLUME;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_USER_MAX_VOLUME_DEFAULT;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_USER_MAX_VOLUME;
import static org.apache.hadoop.ozone.ksm.exceptions
.KSMException.ResultCodes;

View File

@ -19,13 +19,13 @@ package org.apache.hadoop.ozone.protocolPB;
import com.google.common.collect.Lists;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ksm.protocol.KeySpaceManagerProtocol;
import org.apache.hadoop.ksm.protocolPB.KeySpaceManagerProtocolPB;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol;
import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB;
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
import org.apache.hadoop.ozone.protocol.proto
.KeySpaceManagerProtocolProtos.CreateBucketRequest;
@ -89,8 +89,8 @@ import java.util.List;
/**
* This class is the server-side translator that forwards requests received on
* {@link org.apache.hadoop.ksm.protocolPB.KeySpaceManagerProtocolPB} to the
* KeySpaceManagerService server implementation.
* {@link org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB}
* to the KeySpaceManagerService server implementation.
*/
public class KeySpaceManagerProtocolServerSideTranslatorPB implements
KeySpaceManagerProtocolPB {

View File

@ -28,7 +28,7 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;

View File

@ -25,7 +25,7 @@ import org.apache.commons.collections.map.HashedMap;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
import org.apache.hadoop.ozone.protocol.VersionResponse;

View File

@ -23,7 +23,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@ -21,10 +21,10 @@ package org.apache.hadoop.ozone.web.client;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.web.response.BucketInfo;
import org.apache.hadoop.ozone.web.response.KeyInfo;

View File

@ -23,9 +23,9 @@ import com.google.common.base.Strings;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.response.ListVolumes;
import org.apache.hadoop.ozone.web.response.VolumeInfo;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;

View File

@ -21,9 +21,9 @@ package org.apache.hadoop.ozone.web.client;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.request.OzoneQuota;
import org.apache.hadoop.ozone.web.response.BucketInfo;
import org.apache.hadoop.ozone.web.response.ListBuckets;

View File

@ -22,7 +22,7 @@ package org.apache.hadoop.ozone.web.handlers;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.interfaces.Bucket;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.OzoneConsts;

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.web.handlers;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.interfaces.UserAuth;
import org.apache.hadoop.ozone.web.response.BucketInfo;

View File

@ -19,10 +19,10 @@
package org.apache.hadoop.ozone.web.handlers;
import org.apache.commons.codec.binary.Hex;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.ozone.client.io.LengthInputStream;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.interfaces.Keys;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.response.KeyInfo;

View File

@ -22,7 +22,7 @@ import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.interfaces.UserAuth;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.web.handlers;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.ext.Provider;

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.web.handlers;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
import org.apache.hadoop.ozone.web.interfaces.UserAuth;
import org.apache.hadoop.ozone.web.interfaces.Volume;

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.ozone.web.interfaces;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.ozone.web.interfaces;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.ozone.web.interfaces;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.ozone.client.io.LengthInputStream;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.KeyArgs;

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.web.interfaces;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.web.localstorage;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.ozone.client.io.LengthInputStream;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;
import org.apache.hadoop.ozone.web.handlers.KeyArgs;

View File

@ -20,9 +20,9 @@ package org.apache.hadoop.ozone.web.localstorage;
import com.google.common.base.Preconditions;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.io.LengthInputStream;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.handlers.BucketArgs;

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.ozone.web.messages;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.client.io.LengthInputStream;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;

View File

@ -19,10 +19,8 @@
package org.apache.hadoop.ozone.web.ozShell.keys;
import org.apache.commons.cli.CommandLine;
import org.apache.hadoop.ozone.web.client.OzoneBucket;
import org.apache.hadoop.ozone.web.client.OzoneRestClientException;
import org.apache.hadoop.ozone.web.client.OzoneKey;
import org.apache.hadoop.ozone.web.client.OzoneVolume;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.ozShell.Handler;
import org.apache.hadoop.ozone.web.ozShell.Shell;

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.web.request;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import com.fasterxml.jackson.annotation.JsonIgnore;
/**

View File

@ -19,20 +19,21 @@
package org.apache.hadoop.ozone.web.storage;
import com.google.common.base.Strings;
import org.apache.hadoop.hdfs.server.datanode.fsdataset
.LengthInputStream;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ksm.protocolPB
import org.apache.hadoop.ozone.client.io.LengthInputStream;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
import org.apache.hadoop.ozone.ksm.protocolPB
.KeySpaceManagerProtocolClientSideTranslatorPB;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.OzoneConsts.Versioning;
import org.apache.hadoop.ozone.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos;
import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
import org.apache.hadoop.ozone.ksm.KSMConfigKeys;

View File

@ -1,261 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.storage;
import java.util.List;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.KeyData;
import org.apache.hadoop.ozone.protocol.proto.OzoneProtos.KeyValue;
import org.apache.hadoop.ozone.OzoneConsts.Versioning;
import org.apache.hadoop.ozone.web.request.OzoneQuota;
import org.apache.hadoop.ozone.web.response.BucketInfo;
import org.apache.hadoop.ozone.web.response.KeyInfo;
import org.apache.hadoop.ozone.web.response.VolumeInfo;
import org.apache.hadoop.ozone.web.response.VolumeOwner;
import org.apache.hadoop.util.StringUtils;
/**
* This class contains methods that define the translation between the Ozone
* domain model and the storage container domain model.
*/
final class OzoneContainerTranslation {
private static final String ACLS = "ACLS";
private static final String BUCKET = "BUCKET";
private static final String BUCKET_NAME = "BUCKET_NAME";
private static final String CREATED_BY = "CREATED_BY";
private static final String CREATED_ON = "CREATED_ON";
private static final String KEY = "KEY";
private static final String OWNER = "OWNER";
private static final String QUOTA = "QUOTA";
private static final String STORAGE_TYPE = "STORAGE_TYPE";
private static final String TYPE = "TYPE";
private static final String VERSIONING = "VERSIONING";
private static final String VOLUME = "VOLUME";
private static final String VOLUME_NAME = "VOLUME_NAME";
/**
* Creates key data intended for reading a container key.
*
* @param containerName container name
* @param containerKey container key
* @return KeyData intended for reading the container key
*/
public static KeyData containerKeyDataForRead(String containerName,
String containerKey) {
return KeyData
.newBuilder()
.setContainerName(containerName)
.setName(containerKey)
.build();
}
/**
* Translates a bucket to its container representation.
*
* @param containerName container name
* @param containerKey container key
* @param bucket the bucket to translate
* @return KeyData representation of bucket
*/
public static KeyData fromBucketToContainerKeyData(
String containerName, String containerKey, BucketInfo bucket) {
KeyData.Builder containerKeyData = KeyData
.newBuilder()
.setContainerName(containerName)
.setName(containerKey)
.addMetadata(newKeyValue(TYPE, BUCKET))
.addMetadata(newKeyValue(VOLUME_NAME, bucket.getVolumeName()))
.addMetadata(newKeyValue(BUCKET_NAME, bucket.getBucketName()));
if (bucket.getAcls() != null) {
containerKeyData.addMetadata(newKeyValue(ACLS,
StringUtils.join(',', bucket.getAcls())));
}
if (bucket.getVersioning() != null &&
bucket.getVersioning() != Versioning.NOT_DEFINED) {
containerKeyData.addMetadata(newKeyValue(VERSIONING,
bucket.getVersioning().name()));
}
if (bucket.getStorageType() != StorageType.RAM_DISK) {
containerKeyData.addMetadata(newKeyValue(STORAGE_TYPE,
bucket.getStorageType().name()));
}
return containerKeyData.build();
}
/**
* Translates a bucket from its container representation.
*
* @param metadata container metadata representing the bucket
* @return bucket translated from container representation
*/
public static BucketInfo fromContainerKeyValueListToBucket(
List<KeyValue> metadata) {
BucketInfo bucket = new BucketInfo();
for (KeyValue keyValue : metadata) {
switch (keyValue.getKey()) {
case VOLUME_NAME:
bucket.setVolumeName(keyValue.getValue());
break;
case BUCKET_NAME:
bucket.setBucketName(keyValue.getValue());
break;
case VERSIONING:
bucket.setVersioning(
Enum.valueOf(Versioning.class, keyValue.getValue()));
break;
case STORAGE_TYPE:
bucket.setStorageType(
Enum.valueOf(StorageType.class, keyValue.getValue()));
break;
default:
break;
}
}
return bucket;
}
/**
* Translates a volume from its container representation.
*
* @param metadata container metadata representing the volume
* @return volume translated from container representation
*/
public static VolumeInfo fromContainerKeyValueListToVolume(
List<KeyValue> metadata) {
VolumeInfo volume = new VolumeInfo();
for (KeyValue keyValue : metadata) {
switch (keyValue.getKey()) {
case VOLUME_NAME:
volume.setVolumeName(keyValue.getValue());
break;
case CREATED_BY:
volume.setCreatedBy(keyValue.getValue());
break;
case CREATED_ON:
volume.setCreatedOn(keyValue.getValue());
break;
case OWNER:
volume.setOwner(new VolumeOwner(keyValue.getValue()));
break;
case QUOTA:
volume.setQuota(OzoneQuota.parseQuota(keyValue.getValue()));
break;
default:
break;
}
}
return volume;
}
/**
* Translates a key to its container representation.
*
* @param containerName container name
* @param containerKey container key
* @param keyInfo key information received from call
* @return KeyData intended for reading the container key
*/
public static KeyData fromKeyToContainerKeyData(String containerName,
String containerKey, KeyInfo key) {
return KeyData
.newBuilder()
.setContainerName(containerName)
.setName(containerKey)
.addMetadata(newKeyValue(TYPE, KEY))
.build();
}
/**
* Translates a key to its container representation. The return value is a
* builder that can be manipulated further before building the result.
*
* @param containerName container name
* @param containerKey container key
* @param keyInfo key information received from call
* @return KeyData builder
*/
public static KeyData.Builder fromKeyToContainerKeyDataBuilder(
String containerName, String containerKey, KeyInfo key) {
return KeyData
.newBuilder()
.setContainerName(containerName)
.setName(containerKey)
.addMetadata(newKeyValue(TYPE, KEY));
}
/**
* Translates a volume to its container representation.
*
* @param containerName container name
* @param containerKey container key
* @param volume the volume to translate
* @return KeyData representation of volume
*/
public static KeyData fromVolumeToContainerKeyData(
String containerName, String containerKey, VolumeInfo volume) {
KeyData.Builder containerKeyData = KeyData
.newBuilder()
.setContainerName(containerName)
.setName(containerKey)
.addMetadata(newKeyValue(TYPE, VOLUME))
.addMetadata(newKeyValue(VOLUME_NAME, volume.getVolumeName()))
.addMetadata(newKeyValue(CREATED_ON, volume.getCreatedOn()));
if (volume.getQuota() != null && volume.getQuota().sizeInBytes() != -1L) {
containerKeyData.addMetadata(newKeyValue(QUOTA,
OzoneQuota.formatQuota(volume.getQuota())));
}
if (volume.getOwner() != null && volume.getOwner().getName() != null &&
!volume.getOwner().getName().isEmpty()) {
containerKeyData.addMetadata(newKeyValue(OWNER,
volume.getOwner().getName()));
}
if (volume.getCreatedBy() != null && !volume.getCreatedBy().isEmpty()) {
containerKeyData.addMetadata(
newKeyValue(CREATED_BY, volume.getCreatedBy()));
}
return containerKeyData.build();
}
/**
* Translates a key-value pair to its container representation.
*
* @param key the key
* @param value the value
* @return container representation of key-value pair
*/
private static KeyValue newKeyValue(String key, Object value) {
return KeyValue.newBuilder().setKey(key).setValue(value.toString()).build();
}
/**
* There is no need to instantiate this class.
*/
private OzoneContainerTranslation() {
}
}

View File

@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.web.interfaces.UserAuth;
import org.apache.hadoop.ozone.OzoneConsts;

View File

@ -23,11 +23,11 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.ozone.client.io.LengthInputStream;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.handlers.UserArgs;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.util.Time;
import javax.ws.rs.core.HttpHeaders;

View File

@ -26,7 +26,9 @@ import org.apache.commons.cli.Options;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.ozone.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@ -132,7 +134,7 @@ public final class Corona extends Configured implements Tool {
numberOfBucketsCreated = new AtomicInteger();
numberOfKeysAdded = new AtomicLong();
OzoneClientFactory.setConfiguration(conf);
ozoneClient = OzoneClientFactory.getRpcClient();
ozoneClient = OzoneClientFactory.getClient();
}
@Override

View File

@ -16,9 +16,10 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone;
package org.apache.hadoop.ozone.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
import org.apache.hadoop.scm.ScmConfigKeys;
import org.junit.Test;
@ -106,24 +107,30 @@ public class TestOzoneClientUtils {
conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
InetSocketAddress addr = OzoneClientUtils.getScmAddressForDataNodes(conf);
assertThat(addr.getHostString(), is("1.2.3.4"));
assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
assertThat(addr.getPort(), is(
ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
// Next try a client address with just a host name and port. Verify the port
// is ignored and the default DataNode port is used.
// Next try a client address with just a host name and port.
// Verify the port is ignored and the default DataNode port is used.
conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
addr = OzoneClientUtils.getScmAddressForDataNodes(conf);
assertThat(addr.getHostString(), is("1.2.3.4"));
assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
assertThat(addr.getPort(), is(
ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
// Set both OZONE_SCM_CLIENT_ADDRESS_KEY and OZONE_SCM_DATANODE_ADDRESS_KEY.
// Verify that the latter overrides and the port number is still the default.
// Set both OZONE_SCM_CLIENT_ADDRESS_KEY and
// OZONE_SCM_DATANODE_ADDRESS_KEY.
// Verify that the latter overrides and the port number is still the
// default.
conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "5.6.7.8");
addr = OzoneClientUtils.getScmAddressForDataNodes(conf);
assertThat(addr.getHostString(), is("5.6.7.8"));
assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
assertThat(addr.getPort(), is(
ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
// Set both OZONE_SCM_CLIENT_ADDRESS_KEY and OZONE_SCM_DATANODE_ADDRESS_KEY.
// Set both OZONE_SCM_CLIENT_ADDRESS_KEY and
// OZONE_SCM_DATANODE_ADDRESS_KEY.
// Verify that the latter overrides and the port number from the latter is
// used.
conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
@ -165,7 +172,8 @@ public class TestOzoneClientUtils {
conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY, "5.6.7.8");
addr = OzoneClientUtils.getScmClientBindAddress(conf);
assertThat(addr.getHostString(), is("5.6.7.8"));
assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
assertThat(addr.getPort(), is(
ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
// OZONE_SCM_CLIENT_BIND_HOST_KEY should be respected.
// Port number from OZONE_SCM_CLIENT_ADDRESS_KEY should be
@ -191,7 +199,8 @@ public class TestOzoneClientUtils {
conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
InetSocketAddress addr = OzoneClientUtils.getScmDataNodeBindAddress(conf);
assertThat(addr.getHostString(), is("0.0.0.0"));
assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
assertThat(addr.getPort(), is(
ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
// The bind host should be 0.0.0.0 unless OZONE_SCM_DATANODE_BIND_HOST_KEY
// is set differently. The port number from OZONE_SCM_DATANODE_ADDRESS_KEY
@ -210,7 +219,8 @@ public class TestOzoneClientUtils {
conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY, "5.6.7.8");
addr = OzoneClientUtils.getScmDataNodeBindAddress(conf);
assertThat(addr.getHostString(), is("5.6.7.8"));
assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
assertThat(addr.getPort(), is(
ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
// OZONE_SCM_DATANODE_BIND_HOST_KEY should be respected.
// Port number from OZONE_SCM_DATANODE_ADDRESS_KEY should be
@ -261,7 +271,8 @@ public class TestOzoneClientUtils {
hostsAndPorts.put("scm3", 3456);
// Verify multiple hosts and port
conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234,scm2:2345,scm3:3456");
conf.setStrings(
ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234,scm2:2345,scm3:3456");
addresses = OzoneClientUtils.getSCMAddresses(conf);
assertThat(addresses.size(), is(3));
it = addresses.iterator();
@ -274,7 +285,8 @@ public class TestOzoneClientUtils {
assertTrue(expected1.isEmpty());
// Verify names with spaces
conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, " scm1:1234, scm2:2345 , scm3:3456 ");
conf.setStrings(
ScmConfigKeys.OZONE_SCM_NAMES, " scm1:1234, scm2:2345 , scm3:3456 ");
addresses = OzoneClientUtils.getSCMAddresses(conf);
assertThat(addresses.size(), is(3));
it = addresses.iterator();

View File

@ -0,0 +1,23 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client;
/**
* This package contains test classes for Ozone Client.
*/

View File

@ -16,11 +16,21 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone;
package org.apache.hadoop.ozone.client.rpc;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ozone.io.OzoneInputStream;
import org.apache.hadoop.ozone.io.OzoneOutputStream;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.junit.AfterClass;
import org.junit.Assert;
@ -37,13 +47,13 @@ import java.util.UUID;
/**
* This class is to test all the public facing APIs of Ozone Client.
*/
public class TestOzoneClientImpl {
public class TestOzoneRpcClient {
@Rule
public ExpectedException thrown = ExpectedException.none();
private static MiniOzoneCluster cluster = null;
private static OzoneClientImpl ozClient = null;
private static OzoneClient ozClient = null;
/**
* Create a MiniDFSCluster for testing.
@ -60,7 +70,8 @@ public class TestOzoneClientImpl {
OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
cluster = new MiniOzoneCluster.Builder(conf)
.setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
ozClient = new OzoneClientImpl(conf);
OzoneClientFactory.setConfiguration(conf);
ozClient = OzoneClientFactory.getRpcClient();
}
@Test

View File

@ -0,0 +1,23 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.client.rpc;
/**
* This package contains test class for Ozone rpc client library.
*/

View File

@ -28,7 +28,6 @@ import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.scm.XceiverClient;
import org.apache.hadoop.scm.XceiverClientSpi;
import org.apache.hadoop.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;

View File

@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.ksm;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
import org.apache.hadoop.ozone.ksm.exceptions

View File

@ -17,8 +17,8 @@
package org.apache.hadoop.ozone.ksm;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.ozone.web.storage.ChunkGroupInputStream;
import org.apache.hadoop.ozone.web.storage.ChunkGroupOutputStream;
import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.ozone.web;
import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.hadoop.util.Time;

View File

@ -23,7 +23,7 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.web.headers.Header;
import org.apache.hadoop.ozone.client.rest.headers.Header;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.http.HttpResponse;

View File

@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.Status;
import org.apache.hadoop.ozone.OzoneClientUtils;
import org.apache.hadoop.ozone.client.OzoneClientUtils;
import org.apache.hadoop.ozone.web.exceptions.OzoneException;
import org.apache.hadoop.ozone.web.request.OzoneQuota;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;