svn merge -c 1609401 from trunk for HDFS-6643. Refactor INodeWithAdditionalFields.PermissionStatusFormat and INodeFile.HeaderFormat.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1609402 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2014-07-10 06:17:55 +00:00
parent f1e29d5800
commit 52a07f1a39
7 changed files with 119 additions and 64 deletions

View File

@ -21,6 +21,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6645. Add test for successive Snapshots between XAttr modifications. HDFS-6645. Add test for successive Snapshots between XAttr modifications.
(Stephen Chu via jing9) (Stephen Chu via jing9)
HDFS-6643. Refactor INodeWithAdditionalFields.PermissionStatusFormat and
INodeFile.HeaderFormat. (szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -97,14 +97,12 @@ public interface INodeAttributes {
@Override @Override
public final String getUserName() { public final String getUserName() {
final int n = (int)PermissionStatusFormat.USER.retrieve(permission); return PermissionStatusFormat.getUser(permission);
return SerialNumberManager.INSTANCE.getUser(n);
} }
@Override @Override
public final String getGroupName() { public final String getGroupName() {
final int n = (int)PermissionStatusFormat.GROUP.retrieve(permission); return PermissionStatusFormat.getGroup(permission);
return SerialNumberManager.INSTANCE.getGroup(n);
} }
@Override @Override
@ -114,7 +112,7 @@ public interface INodeAttributes {
@Override @Override
public final short getFsPermissionShort() { public final short getFsPermissionShort() {
return (short)PermissionStatusFormat.MODE.retrieve(permission); return PermissionStatusFormat.getMode(permission);
} }
@Override @Override

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.LongBitFormat;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -71,37 +72,29 @@ public class INodeFile extends INodeWithAdditionalFields
} }
/** Format: [16 bits for replication][48 bits for PreferredBlockSize] */ /** Format: [16 bits for replication][48 bits for PreferredBlockSize] */
static class HeaderFormat { static enum HeaderFormat {
/** Number of bits for Block size */ PREFERRED_BLOCK_SIZE(null, 48, 1),
static final int BLOCKBITS = 48; REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 16, 1);
/** Header mask 64-bit representation */
static final long HEADERMASK = 0xffffL << BLOCKBITS; private final LongBitFormat BITS;
static final long MAX_BLOCK_SIZE = ~HEADERMASK;
private HeaderFormat(LongBitFormat previous, int length, long min) {
BITS = new LongBitFormat(name(), previous, length, min);
}
static short getReplication(long header) { static short getReplication(long header) {
return (short) ((header & HEADERMASK) >> BLOCKBITS); return (short)REPLICATION.BITS.retrieve(header);
}
static long combineReplication(long header, short replication) {
if (replication <= 0) {
throw new IllegalArgumentException(
"Unexpected value for the replication: " + replication);
}
return ((long)replication << BLOCKBITS) | (header & MAX_BLOCK_SIZE);
} }
static long getPreferredBlockSize(long header) { static long getPreferredBlockSize(long header) {
return header & MAX_BLOCK_SIZE; return PREFERRED_BLOCK_SIZE.BITS.retrieve(header);
} }
static long combinePreferredBlockSize(long header, long blockSize) { static long toLong(long preferredBlockSize, short replication) {
if (blockSize < 0) { long h = 0;
throw new IllegalArgumentException("Block size < 0: " + blockSize); h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
} else if (blockSize > MAX_BLOCK_SIZE) { h = REPLICATION.BITS.combine(replication, h);
throw new IllegalArgumentException("Block size = " + blockSize return h;
+ " > MAX_BLOCK_SIZE = " + MAX_BLOCK_SIZE);
}
return (header & HEADERMASK) | (blockSize & MAX_BLOCK_SIZE);
} }
} }
@ -113,8 +106,7 @@ public class INodeFile extends INodeWithAdditionalFields
long atime, BlockInfo[] blklist, short replication, long atime, BlockInfo[] blklist, short replication,
long preferredBlockSize) { long preferredBlockSize) {
super(id, name, permissions, mtime, atime); super(id, name, permissions, mtime, atime);
header = HeaderFormat.combineReplication(header, replication); header = HeaderFormat.toLong(preferredBlockSize, replication);
header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
this.blocks = blklist; this.blocks = blklist;
} }
@ -347,7 +339,7 @@ public class INodeFile extends INodeWithAdditionalFields
/** Set the replication factor of this file. */ /** Set the replication factor of this file. */
public final void setFileReplication(short replication) { public final void setFileReplication(short replication) {
header = HeaderFormat.combineReplication(header, replication); header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
} }
/** Set the replication factor of this file. */ /** Set the replication factor of this file. */

View File

@ -48,9 +48,7 @@ public interface INodeFileAttributes extends INodeAttributes {
short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) { short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) {
super(name, permissions, aclFeature, modificationTime, accessTime, super(name, permissions, aclFeature, modificationTime, accessTime,
xAttrsFeature); xAttrsFeature);
header = HeaderFormat.toLong(preferredBlockSize, replication);
final long h = HeaderFormat.combineReplication(0L, replication);
header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize);
} }
public SnapshotCopy(INodeFile file) { public SnapshotCopy(INodeFile file) {

View File

@ -21,9 +21,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INode.Feature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; import org.apache.hadoop.hdfs.util.LongBitFormat;
import org.apache.hadoop.util.LightWeightGSet.LinkedElement; import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -36,26 +35,28 @@ import com.google.common.base.Preconditions;
public abstract class INodeWithAdditionalFields extends INode public abstract class INodeWithAdditionalFields extends INode
implements LinkedElement { implements LinkedElement {
static enum PermissionStatusFormat { static enum PermissionStatusFormat {
MODE(0, 16), MODE(null, 16),
GROUP(MODE.OFFSET + MODE.LENGTH, 25), GROUP(MODE.BITS, 25),
USER(GROUP.OFFSET + GROUP.LENGTH, 23); USER(GROUP.BITS, 23);
final int OFFSET; final LongBitFormat BITS;
final int LENGTH; //bit length
final long MASK;
PermissionStatusFormat(int offset, int length) { private PermissionStatusFormat(LongBitFormat previous, int length) {
OFFSET = offset; BITS = new LongBitFormat(name(), previous, length, 0);
LENGTH = length;
MASK = ((-1L) >>> (64 - LENGTH)) << OFFSET;
} }
long retrieve(long record) { static String getUser(long permission) {
return (record & MASK) >>> OFFSET; final int n = (int)USER.BITS.retrieve(permission);
return SerialNumberManager.INSTANCE.getUser(n);
} }
long combine(long bits, long record) { static String getGroup(long permission) {
return (record & ~MASK) | (bits << OFFSET); final int n = (int)GROUP.BITS.retrieve(permission);
return SerialNumberManager.INSTANCE.getGroup(n);
}
static short getMode(long permission) {
return (short)MODE.BITS.retrieve(permission);
} }
/** Encode the {@link PermissionStatus} to a long. */ /** Encode the {@link PermissionStatus} to a long. */
@ -63,12 +64,12 @@ public abstract class INodeWithAdditionalFields extends INode
long permission = 0L; long permission = 0L;
final int user = SerialNumberManager.INSTANCE.getUserSerialNumber( final int user = SerialNumberManager.INSTANCE.getUserSerialNumber(
ps.getUserName()); ps.getUserName());
permission = USER.combine(user, permission); permission = USER.BITS.combine(user, permission);
final int group = SerialNumberManager.INSTANCE.getGroupSerialNumber( final int group = SerialNumberManager.INSTANCE.getGroupSerialNumber(
ps.getGroupName()); ps.getGroupName());
permission = GROUP.combine(group, permission); permission = GROUP.BITS.combine(group, permission);
final int mode = ps.getPermission().toShort(); final int mode = ps.getPermission().toShort();
permission = MODE.combine(mode, permission); permission = MODE.BITS.combine(mode, permission);
return permission; return permission;
} }
} }
@ -162,7 +163,7 @@ public abstract class INodeWithAdditionalFields extends INode
} }
private final void updatePermissionStatus(PermissionStatusFormat f, long n) { private final void updatePermissionStatus(PermissionStatusFormat f, long n) {
this.permission = f.combine(n, permission); this.permission = f.BITS.combine(n, permission);
} }
@Override @Override
@ -170,9 +171,7 @@ public abstract class INodeWithAdditionalFields extends INode
if (snapshotId != Snapshot.CURRENT_STATE_ID) { if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getUserName(); return getSnapshotINode(snapshotId).getUserName();
} }
return PermissionStatusFormat.getUser(permission);
int n = (int)PermissionStatusFormat.USER.retrieve(permission);
return SerialNumberManager.INSTANCE.getUser(n);
} }
@Override @Override
@ -186,9 +185,7 @@ public abstract class INodeWithAdditionalFields extends INode
if (snapshotId != Snapshot.CURRENT_STATE_ID) { if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getGroupName(); return getSnapshotINode(snapshotId).getGroupName();
} }
return PermissionStatusFormat.getGroup(permission);
int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
return SerialNumberManager.INSTANCE.getGroup(n);
} }
@Override @Override
@ -208,7 +205,7 @@ public abstract class INodeWithAdditionalFields extends INode
@Override @Override
public final short getFsPermissionShort() { public final short getFsPermissionShort() {
return (short)PermissionStatusFormat.MODE.retrieve(permission); return PermissionStatusFormat.getMode(permission);
} }
@Override @Override
void setPermission(FsPermission permission) { void setPermission(FsPermission permission) {

View File

@ -0,0 +1,67 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import java.io.Serializable;
/**
* Bit format in a long.
*/
public class LongBitFormat implements Serializable {
private static final long serialVersionUID = 1L;
private final String NAME;
/** Bit offset */
private final int OFFSET;
/** Bit length */
private final int LENGTH;
/** Minimum value */
private final long MIN;
/** Maximum value */
private final long MAX;
/** Bit mask */
private final long MASK;
public LongBitFormat(String name, LongBitFormat previous, int length, long min) {
NAME = name;
OFFSET = previous == null? 0: previous.OFFSET + previous.LENGTH;
LENGTH = length;
MIN = min;
MAX = ((-1L) >>> (64 - LENGTH));
MASK = MAX << OFFSET;
}
/** Retrieve the value from the record. */
public long retrieve(long record) {
return (record & MASK) >>> OFFSET;
}
/** Combine the value to the record. */
public long combine(long value, long record) {
if (value < MIN) {
throw new IllegalArgumentException(
"Illagal value: " + NAME + " = " + value + " < MIN = " + MIN);
}
if (value > MAX) {
throw new IllegalArgumentException(
"Illagal value: " + NAME + " = " + value + " > MAX = " + MAX);
}
return (record & ~MASK) | (value << OFFSET);
}
}

View File

@ -80,7 +80,7 @@ public class TestINodeFile {
private final PermissionStatus perm = new PermissionStatus( private final PermissionStatus perm = new PermissionStatus(
"userName", null, FsPermission.getDefault()); "userName", null, FsPermission.getDefault());
private short replication; private short replication;
private long preferredBlockSize; private long preferredBlockSize = 1024;
INodeFile createINodeFile(short replication, long preferredBlockSize) { INodeFile createINodeFile(short replication, long preferredBlockSize) {
return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,