HADOOP-13122 Customize User-Agent header sent in HTTP requests by S3A. Chris Nauroth via stevel.

This commit is contained in:
Steve Loughran 2016-05-12 13:56:48 +01:00
parent b7ac85259c
commit def2a6d385
5 changed files with 113 additions and 13 deletions

View File

@ -945,6 +945,21 @@
upload. No effect if fs.s3a.fast.upload is false.</description>
</property>
<property>
<name>fs.s3a.user.agent.prefix</name>
<value></value>
<description>
Sets a custom value that will be prepended to the User-Agent header sent in
HTTP requests to the S3 back-end by S3AFileSystem. The User-Agent header
always includes the Hadoop version number followed by a string generated by
the AWS SDK. An example is "User-Agent: Hadoop 2.8.0, aws-sdk-java/1.10.6".
If this optional property is set, then its value is prepended to create a
customized User-Agent. For example, if this configuration property was set
to "MyApp", then an example of the resulting User-Agent would be
"User-Agent: MyApp, Hadoop 2.8.0, aws-sdk-java/1.10.6".
</description>
</property>
<property>
<name>fs.s3a.impl</name>
<value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>

View File

@ -122,4 +122,6 @@ public class Constants {
public static final String FS_S3A = "s3a";
public static final int S3A_DEFAULT_PORT = -1;
public static final String USER_AGENT_PREFIX = "fs.s3a.user.agent.prefix";
}

View File

@ -69,6 +69,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.VersionInfo;
import static org.apache.hadoop.fs.s3a.Constants.*;
@ -139,6 +140,8 @@ public class S3AFileSystem extends FileSystem {
initProxySupport(conf, awsConf, secureConnections);
initUserAgent(conf, awsConf);
initAmazonS3Client(conf, credentials, awsConf);
maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
@ -231,6 +234,26 @@ public class S3AFileSystem extends FileSystem {
}
}
/**
* Initializes the User-Agent header to send in HTTP requests to the S3
* back-end. We always include the Hadoop version number. The user also may
* set an optional custom prefix to put in front of the Hadoop version number.
* The AWS SDK interally appends its own information, which seems to include
* the AWS SDK version, OS and JVM version.
*
* @param conf Hadoop configuration
* @param awsConf AWS SDK configuration
*/
private void initUserAgent(Configuration conf, ClientConfiguration awsConf) {
String userAgent = "Hadoop " + VersionInfo.getVersion();
String userAgentPrefix = conf.getTrimmed(USER_AGENT_PREFIX, "");
if (!userAgentPrefix.isEmpty()) {
userAgent = userAgentPrefix + ", " + userAgent;
}
LOG.debug("Using User-Agent: {}", userAgent);
awsConf.setUserAgent(userAgent);
}
private void initAmazonS3Client(Configuration conf,
AWSCredentialsProviderChain credentials, ClientConfiguration awsConf)
throws IllegalArgumentException {

View File

@ -407,6 +407,21 @@ this capability.
</description>
</property>
<property>
<name>fs.s3a.user.agent.prefix</name>
<value></value>
<description>
Sets a custom value that will be prepended to the User-Agent header sent in
HTTP requests to the S3 back-end by S3AFileSystem. The User-Agent header
always includes the Hadoop version number followed by a string generated by
the AWS SDK. An example is "User-Agent: Hadoop 2.8.0, aws-sdk-java/1.10.6".
If this optional property is set, then its value is prepended to create a
customized User-Agent. For example, if this configuration property was set
to "MyApp", then an example of the resulting User-Agent would be
"User-Agent: MyApp, Hadoop 2.8.0, aws-sdk-java/1.10.6".
</description>
</property>
<property>
<name>fs.s3a.impl</name>
<value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>

View File

@ -18,11 +18,13 @@
package org.apache.hadoop.fs.s3a;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.S3ClientOptions;
import com.amazonaws.services.s3.model.AmazonS3Exception;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.reflect.FieldUtils;
import com.amazonaws.AmazonClientException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@ -46,6 +48,7 @@ import java.lang.reflect.Field;
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import org.apache.hadoop.util.VersionInfo;
import org.apache.http.HttpStatus;
import org.junit.rules.TemporaryFolder;
@ -370,10 +373,13 @@ public class TestS3AConfiguration {
try {
fs = S3ATestUtils.createTestFileSystem(conf);
final Object object = getClientOptionsField(fs.getAmazonS3Client(), "clientOptions");
assertNotNull(object);
assertTrue("Unexpected type found for clientOptions!", object instanceof S3ClientOptions);
assertTrue("Expected to find path style access to be switched on!", ((S3ClientOptions) object).isPathStyleAccess());
assertNotNull(fs);
AmazonS3Client s3 = fs.getAmazonS3Client();
assertNotNull(s3);
S3ClientOptions clientOptions = getField(s3, S3ClientOptions.class,
"clientOptions");
assertTrue("Expected to find path style access to be switched on!",
clientOptions.isPathStyleAccess());
byte[] file = ContractTestUtils.toAsciiByteArray("test file");
ContractTestUtils.writeAndRead(fs, new Path("/path/style/access/testFile"), file, file.length, conf.getInt(Constants.FS_S3A_BLOCK_SIZE, file.length), false, true);
} catch (final AmazonS3Exception e) {
@ -385,14 +391,53 @@ public class TestS3AConfiguration {
}
}
private Object getClientOptionsField(AmazonS3Client s3client, String field)
throws NoSuchFieldException, IllegalAccessException {
final Field clientOptionsProps = s3client.getClass().getDeclaredField(field);
assertNotNull(clientOptionsProps);
if (!clientOptionsProps.isAccessible()) {
clientOptionsProps.setAccessible(true);
}
final Object object = clientOptionsProps.get(s3client);
return object;
@Test
public void testDefaultUserAgent() throws Exception {
conf = new Configuration();
fs = S3ATestUtils.createTestFileSystem(conf);
assertNotNull(fs);
AmazonS3Client s3 = fs.getAmazonS3Client();
assertNotNull(s3);
ClientConfiguration awsConf = getField(s3, ClientConfiguration.class,
"clientConfiguration");
assertEquals("Hadoop " + VersionInfo.getVersion(), awsConf.getUserAgent());
}
@Test
public void testCustomUserAgent() throws Exception {
conf = new Configuration();
conf.set(Constants.USER_AGENT_PREFIX, "MyApp");
fs = S3ATestUtils.createTestFileSystem(conf);
assertNotNull(fs);
AmazonS3Client s3 = fs.getAmazonS3Client();
assertNotNull(s3);
ClientConfiguration awsConf = getField(s3, ClientConfiguration.class,
"clientConfiguration");
assertEquals("MyApp, Hadoop " + VersionInfo.getVersion(),
awsConf.getUserAgent());
}
/**
* Reads and returns a field from an object using reflection. If the field
* cannot be found, is null, or is not the expected type, then this method
* fails the test.
*
* @param target object to read
* @param fieldType type of field to read, which will also be the return type
* @param fieldName name of field to read
* @return field that was read
* @throws IllegalAccessException if access not allowed
*/
private static <T> T getField(Object target, Class<T> fieldType,
String fieldName) throws IllegalAccessException {
Object obj = FieldUtils.readField(target, fieldName, true);
assertNotNull(String.format(
"Could not read field named %s in object with class %s.", fieldName,
target.getClass().getName()), obj);
assertTrue(String.format(
"Unexpected type found for field named %s, expected %s, actual %s.",
fieldName, fieldType.getName(), obj.getClass().getName()),
fieldType.isAssignableFrom(obj.getClass()));
return fieldType.cast(obj);
}
}