HDFS-10662. Optimize UTF8 string/byte conversions. Contributed by Daryn Sharp.
This commit is contained in:
parent
70c2781152
commit
6ae39199da
|
@ -91,11 +91,21 @@ public class DFSUtilClient {
|
||||||
public static final byte[] EMPTY_BYTES = {};
|
public static final byte[] EMPTY_BYTES = {};
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(
|
private static final Logger LOG = LoggerFactory.getLogger(
|
||||||
DFSUtilClient.class);
|
DFSUtilClient.class);
|
||||||
|
|
||||||
|
// Using the charset canonical name for String/byte[] conversions is much
|
||||||
|
// more efficient due to use of cached encoders/decoders.
|
||||||
|
private static final String UTF8_CSN = StandardCharsets.UTF_8.name();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Converts a string to a byte array using UTF8 encoding.
|
* Converts a string to a byte array using UTF8 encoding.
|
||||||
*/
|
*/
|
||||||
public static byte[] string2Bytes(String str) {
|
public static byte[] string2Bytes(String str) {
|
||||||
return str.getBytes(StandardCharsets.UTF_8);
|
try {
|
||||||
|
return str.getBytes(UTF8_CSN);
|
||||||
|
} catch (UnsupportedEncodingException e) {
|
||||||
|
// should never happen!
|
||||||
|
throw new IllegalArgumentException("UTF8 decoding is not supported", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -281,13 +291,13 @@ public class DFSUtilClient {
|
||||||
* @param length The number of bytes to decode
|
* @param length The number of bytes to decode
|
||||||
* @return The decoded string
|
* @return The decoded string
|
||||||
*/
|
*/
|
||||||
private static String bytes2String(byte[] bytes, int offset, int length) {
|
static String bytes2String(byte[] bytes, int offset, int length) {
|
||||||
try {
|
try {
|
||||||
return new String(bytes, offset, length, "UTF8");
|
return new String(bytes, offset, length, UTF8_CSN);
|
||||||
} catch(UnsupportedEncodingException e) {
|
} catch (UnsupportedEncodingException e) {
|
||||||
assert false : "UTF8 encoding is not supported ";
|
// should never happen!
|
||||||
|
throw new IllegalArgumentException("UTF8 encoding is not supported", e);
|
||||||
}
|
}
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -38,7 +38,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_P
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
||||||
import java.io.UnsupportedEncodingException;
|
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
@ -248,12 +247,7 @@ public class DFSUtil {
|
||||||
* @return The decoded string
|
* @return The decoded string
|
||||||
*/
|
*/
|
||||||
public static String bytes2String(byte[] bytes, int offset, int length) {
|
public static String bytes2String(byte[] bytes, int offset, int length) {
|
||||||
try {
|
return DFSUtilClient.bytes2String(bytes, 0, bytes.length);
|
||||||
return new String(bytes, offset, length, "UTF8");
|
|
||||||
} catch(UnsupportedEncodingException e) {
|
|
||||||
assert false : "UTF8 encoding is not supported ";
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.charset.StandardCharsets;
|
|
||||||
import java.util.AbstractMap;
|
import java.util.AbstractMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -121,8 +120,7 @@ class FSDirMkdirOp {
|
||||||
static Map.Entry<INodesInPath, String> createAncestorDirectories(
|
static Map.Entry<INodesInPath, String> createAncestorDirectories(
|
||||||
FSDirectory fsd, INodesInPath iip, PermissionStatus permission)
|
FSDirectory fsd, INodesInPath iip, PermissionStatus permission)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
final String last =
|
final String last = DFSUtil.bytes2String(iip.getLastLocalName());
|
||||||
new String(iip.getLastLocalName(), StandardCharsets.UTF_8);
|
|
||||||
INodesInPath existing = iip.getExistingINodes();
|
INodesInPath existing = iip.getExistingINodes();
|
||||||
List<String> children = iip.getPath(existing.length(),
|
List<String> children = iip.getPath(existing.length(),
|
||||||
iip.length() - existing.length());
|
iip.length() - existing.length());
|
||||||
|
@ -190,7 +188,7 @@ class FSDirMkdirOp {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert fsd.hasWriteLock();
|
assert fsd.hasWriteLock();
|
||||||
existing = unprotectedMkdir(fsd, fsd.allocateNewInodeId(), existing,
|
existing = unprotectedMkdir(fsd, fsd.allocateNewInodeId(), existing,
|
||||||
localName.getBytes(StandardCharsets.UTF_8), perm, null, now());
|
DFSUtil.string2Bytes(localName), perm, null, now());
|
||||||
if (existing == null) {
|
if (existing == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,6 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.charset.StandardCharsets;
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
|
||||||
import static org.apache.hadoop.util.Time.now;
|
import static org.apache.hadoop.util.Time.now;
|
||||||
|
@ -55,8 +54,7 @@ class FSDirStatAndListingOp {
|
||||||
byte[] startAfter, boolean needLocation) throws IOException {
|
byte[] startAfter, boolean needLocation) throws IOException {
|
||||||
byte[][] pathComponents = FSDirectory
|
byte[][] pathComponents = FSDirectory
|
||||||
.getPathComponentsForReservedPath(srcArg);
|
.getPathComponentsForReservedPath(srcArg);
|
||||||
final String startAfterString =
|
final String startAfterString = DFSUtil.bytes2String(startAfter);
|
||||||
new String(startAfter, StandardCharsets.UTF_8);
|
|
||||||
String src = null;
|
String src = null;
|
||||||
|
|
||||||
if (fsd.isPermissionEnabled()) {
|
if (fsd.isPermissionEnabled()) {
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
|
@ -59,7 +60,6 @@ import org.apache.hadoop.util.ChunkedArrayList;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.charset.StandardCharsets;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -610,7 +610,7 @@ class FSDirWriteFileOp {
|
||||||
}
|
}
|
||||||
INodeFile newNode = newINodeFile(fsd.allocateNewInodeId(), permissions,
|
INodeFile newNode = newINodeFile(fsd.allocateNewInodeId(), permissions,
|
||||||
modTime, modTime, replication, preferredBlockSize, ecPolicy != null);
|
modTime, modTime, replication, preferredBlockSize, ecPolicy != null);
|
||||||
newNode.setLocalName(localName.getBytes(StandardCharsets.UTF_8));
|
newNode.setLocalName(DFSUtil.string2Bytes(localName));
|
||||||
newNode.toUnderConstruction(clientName, clientMachine);
|
newNode.toUnderConstruction(clientName, clientMachine);
|
||||||
newiip = fsd.addINode(existing, newNode);
|
newiip = fsd.addINode(existing, newNode);
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -18,7 +18,6 @@
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Charsets;
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
|
@ -26,6 +25,7 @@ import org.apache.hadoop.fs.XAttr;
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
||||||
|
@ -410,7 +410,7 @@ class FSDirXAttrOp {
|
||||||
* the configured limit. Setting a limit of zero disables this check.
|
* the configured limit. Setting a limit of zero disables this check.
|
||||||
*/
|
*/
|
||||||
private static void checkXAttrSize(FSDirectory fsd, XAttr xAttr) {
|
private static void checkXAttrSize(FSDirectory fsd, XAttr xAttr) {
|
||||||
int size = xAttr.getName().getBytes(Charsets.UTF_8).length;
|
int size = DFSUtil.string2Bytes(xAttr.getName()).length;
|
||||||
if (xAttr.getValue() != null) {
|
if (xAttr.getValue() != null) {
|
||||||
size += xAttr.getValue().length;
|
size += xAttr.getValue().length;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue