HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed by Haohui Mai.

This commit is contained in:
Haohui Mai 2015-04-16 23:13:15 -07:00
parent 8511d80804
commit 5c97db07fb
43 changed files with 299 additions and 235 deletions

View File

@ -1,2 +1,10 @@
<FindBugsFilter> <FindBugsFilter>
<Match>
<Or>
<Class name="org.apache.hadoop.hdfs.protocol.HdfsFileStatus"/>
<Class name="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
<Class name="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
</Or>
<Bug pattern="EI_EXPOSE_REP,EI_EXPOSE_REP2" />
</Match>
</FindBugsFilter> </FindBugsFilter>

View File

@ -0,0 +1,65 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.util.StringUtils;
import java.io.UnsupportedEncodingException;
public class DFSUtilClient {
/**
* Converts a byte array to a string using UTF8 encoding.
*/
public static String bytes2String(byte[] bytes) {
return bytes2String(bytes, 0, bytes.length);
}
/** Return used as percentage of capacity */
public static float getPercentUsed(long used, long capacity) {
return capacity <= 0 ? 100 : (used * 100.0f)/capacity;
}
/** Return remaining as percentage of capacity */
public static float getPercentRemaining(long remaining, long capacity) {
return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity;
}
/** Convert percentage to a string. */
public static String percent2String(double percentage) {
return StringUtils.format("%.2f%%", percentage);
}
/**
* Decode a specific range of bytes of the given byte array to a string
* using UTF8.
*
* @param bytes The bytes to be decoded into characters
* @param offset The index of the first byte to decode
* @param length The number of bytes to decode
* @return The decoded string
*/
private static String bytes2String(byte[] bytes, int offset, int length) {
try {
return new String(bytes, offset, length, "UTF8");
} catch(UnsupportedEncodingException e) {
assert false : "UTF8 encoding is not supported ";
}
return null;
}
}

View File

@ -19,8 +19,7 @@ package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
@ -32,7 +31,7 @@ import java.util.Date;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import static org.apache.hadoop.hdfs.DFSUtil.percent2String; import static org.apache.hadoop.hdfs.DFSUtilClient.percent2String;
/** /**
* This class extends the primary identifier of a Datanode with ephemeral * This class extends the primary identifier of a Datanode with ephemeral
@ -176,7 +175,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
/** The used space by the data node as percentage of present capacity */ /** The used space by the data node as percentage of present capacity */
public float getDfsUsedPercent() { public float getDfsUsedPercent() {
return DFSUtil.getPercentUsed(dfsUsed, capacity); return DFSUtilClient.getPercentUsed(dfsUsed, capacity);
} }
/** The raw free space. */ /** The raw free space. */
@ -184,12 +183,12 @@ public class DatanodeInfo extends DatanodeID implements Node {
/** Used space by the block pool as percentage of present capacity */ /** Used space by the block pool as percentage of present capacity */
public float getBlockPoolUsedPercent() { public float getBlockPoolUsedPercent() {
return DFSUtil.getPercentUsed(blockPoolUsed, capacity); return DFSUtilClient.getPercentUsed(blockPoolUsed, capacity);
} }
/** The remaining space as percentage of configured capacity. */ /** The remaining space as percentage of configured capacity. */
public float getRemainingPercent() { public float getRemainingPercent() {
return DFSUtil.getPercentRemaining(remaining, capacity); return DFSUtilClient.getPercentRemaining(remaining, capacity);
} }
/** /**
@ -210,7 +209,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
* @return Cache used as a percentage of the datanode's total cache capacity * @return Cache used as a percentage of the datanode's total cache capacity
*/ */
public float getCacheUsedPercent() { public float getCacheUsedPercent() {
return DFSUtil.getPercentUsed(cacheUsed, cacheCapacity); return DFSUtilClient.getPercentUsed(cacheUsed, cacheCapacity);
} }
/** /**
@ -225,7 +224,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
* capacity * capacity
*/ */
public float getCacheRemainingPercent() { public float getCacheRemainingPercent() {
return DFSUtil.getPercentRemaining(getCacheRemaining(), cacheCapacity); return DFSUtilClient.getPercentRemaining(getCacheRemaining(), cacheCapacity);
} }
/** /**
@ -448,8 +447,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
/** /**
* Check if the datanode is in stale state. Here if * Check if the datanode is in stale state. Here if
* the namenode has not received heartbeat msg from a * the namenode has not received heartbeat msg from a
* datanode for more than staleInterval (default value is * datanode for more than staleInterval,
* {@link DFSConfigKeys#DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT}),
* the datanode will be treated as stale node. * the datanode will be treated as stale node.
* *
* @param staleInterval * @param staleInterval

View File

@ -31,4 +31,5 @@ public interface HdfsConstantsClient {
* uses GRANDFATHER_INODE_ID for backward compatibility. * uses GRANDFATHER_INODE_ID for backward compatibility.
*/ */
long GRANDFATHER_INODE_ID = 0; long GRANDFATHER_INODE_ID = 0;
byte BLOCK_STORAGE_POLICY_ID_UNSPECIFIED = 0;
} }

View File

@ -25,7 +25,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient;
/** Interface that represents the over the wire information for a file. /** Interface that represents the over the wire information for a file.
*/ */
@ -188,7 +188,7 @@ public class HdfsFileStatus {
* @return the local name in string * @return the local name in string
*/ */
public final String getLocalName() { public final String getLocalName() {
return DFSUtil.bytes2String(path); return DFSUtilClient.bytes2String(path);
} }
/** /**
@ -235,7 +235,7 @@ public class HdfsFileStatus {
* @return the symlink as a string. * @return the symlink as a string.
*/ */
public final String getSymlink() { public final String getSymlink() {
return DFSUtil.bytes2String(symlink); return DFSUtilClient.bytes2String(symlink);
} }
public final byte[] getSymlinkInBytes() { public final byte[] getSymlinkInBytes() {

View File

@ -24,8 +24,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
@ -71,14 +69,6 @@ public class LocatedBlock {
this(b, locs, storageIDs, storageTypes, -1, false, EMPTY_LOCS); this(b, locs, storageIDs, storageTypes, -1, false, EMPTY_LOCS);
} }
public LocatedBlock(ExtendedBlock b, DatanodeStorageInfo[] storages,
long startOffset, boolean corrupt) {
this(b, DatanodeStorageInfo.toDatanodeInfos(storages),
DatanodeStorageInfo.toStorageIDs(storages),
DatanodeStorageInfo.toStorageTypes(storages),
startOffset, corrupt, EMPTY_LOCS); // startOffset is unknown
}
public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, String[] storageIDs, public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, String[] storageIDs,
StorageType[] storageTypes, long startOffset, StorageType[] storageTypes, long startOffset,
boolean corrupt, DatanodeInfo[] cachedLocs) { boolean corrupt, DatanodeInfo[] cachedLocs) {
@ -214,4 +204,3 @@ public class LocatedBlock {
+ "}"; + "}";
} }
} }

View File

@ -23,8 +23,7 @@ import java.io.DataInputStream;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
@ -85,7 +84,7 @@ public class DelegationTokenIdentifier
} }
@Override @Override
public Text getKind() { public Text getKind() {
return WebHdfsFileSystem.TOKEN_KIND; return WebHdfsConstants.WEBHDFS_TOKEN_KIND;
} }
} }
@ -95,7 +94,7 @@ public class DelegationTokenIdentifier
} }
@Override @Override
public Text getKind() { public Text getKind() {
return SWebHdfsFileSystem.TOKEN_KIND; return WebHdfsConstants.SWEBHDFS_TOKEN_KIND;
} }
} }
} }

View File

@ -17,9 +17,16 @@
*/ */
package org.apache.hadoop.hdfs.web; package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.io.Text;
@InterfaceAudience.Private
public class WebHdfsConstants {
/** Delegation token kind */
public static final Text WEBHDFS_TOKEN_KIND = new Text("WEBHDFS delegation");
public static final Text SWEBHDFS_TOKEN_KIND = new Text("SWEBHDFS delegation");
class WebHdfsConstants {
enum PathType { enum PathType {
FILE, DIRECTORY, SYMLINK; FILE, DIRECTORY, SYMLINK;

View File

@ -450,6 +450,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8152. Refactoring of lazy persist storage cases. (Arpit Agarwal) HDFS-8152. Refactoring of lazy persist storage cases. (Arpit Agarwal)
HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. (wheat9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -1235,21 +1235,6 @@ public class DFSUtil {
} }
} }
/** Return used as percentage of capacity */
public static float getPercentUsed(long used, long capacity) {
return capacity <= 0 ? 100 : (used * 100.0f)/capacity;
}
/** Return remaining as percentage of capacity */
public static float getPercentRemaining(long remaining, long capacity) {
return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity;
}
/** Convert percentage to a string. */
public static String percent2String(double percentage) {
return StringUtils.format("%.2f%%", percentage);
}
/** /**
* Round bytes to GiB (gibibyte) * Round bytes to GiB (gibibyte)
* @param bytes number of bytes * @param bytes number of bytes

View File

@ -25,7 +25,6 @@ import java.util.Date;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
/** /**
* Metadata about a snapshottable directory * Metadata about a snapshottable directory
@ -62,7 +61,7 @@ public class SnapshottableDirectoryStatus {
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) { int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time, this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
access_time, permission, owner, group, null, localName, inodeId, access_time, permission, owner, group, null, localName, inodeId,
childrenNum, null, BlockStoragePolicySuite.ID_UNSPECIFIED); childrenNum, null, HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
this.snapshotNumber = snapshotNumber; this.snapshotNumber = snapshotNumber;
this.snapshotQuota = snapshotQuota; this.snapshotQuota = snapshotQuota;
this.parentFullPath = parentFullPath; this.parentFullPath = parentFullPath;

View File

@ -184,7 +184,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
@ -1442,7 +1441,7 @@ public class PBHelper {
fs.hasChildrenNum() ? fs.getChildrenNum() : -1, fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null, fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy() fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
: BlockStoragePolicySuite.ID_UNSPECIFIED); : HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
} }
public static SnapshottableDirectoryStatus convert( public static SnapshottableDirectoryStatus convert(

View File

@ -836,7 +836,7 @@ public class BlockManager {
(BlockInfoContiguousUnderConstruction) blk; (BlockInfoContiguousUnderConstruction) blk;
final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk);
return new LocatedBlock(eb, storages, pos, false); return newLocatedBlock(eb, storages, pos, false);
} }
// get block locations // get block locations
@ -868,7 +868,7 @@ public class BlockManager {
" numCorrupt: " + numCorruptNodes + " numCorrupt: " + numCorruptNodes +
" numCorruptRepls: " + numCorruptReplicas; " numCorruptRepls: " + numCorruptReplicas;
final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk);
return new LocatedBlock(eb, machines, pos, isCorrupt); return newLocatedBlock(eb, machines, pos, isCorrupt);
} }
/** Create a LocatedBlocks. */ /** Create a LocatedBlocks. */
@ -3724,6 +3724,17 @@ public class BlockManager {
postponedMisreplicatedBlocksCount.set(0); postponedMisreplicatedBlocksCount.set(0);
}; };
public static LocatedBlock newLocatedBlock(
ExtendedBlock b, DatanodeStorageInfo[] storages,
long startOffset, boolean corrupt) {
// startOffset is unknown
return new LocatedBlock(
b, DatanodeStorageInfo.toDatanodeInfos(storages),
DatanodeStorageInfo.toStorageIDs(storages),
DatanodeStorageInfo.toStorageTypes(storages),
startOffset, corrupt,
null);
}
private static class ReplicationWork { private static class ReplicationWork {

View File

@ -42,7 +42,6 @@ public class BlockStoragePolicySuite {
public static final XAttr.NameSpace XAttrNS = XAttr.NameSpace.SYSTEM; public static final XAttr.NameSpace XAttrNS = XAttr.NameSpace.SYSTEM;
public static final int ID_BIT_LENGTH = 4; public static final int ID_BIT_LENGTH = 4;
public static final byte ID_UNSPECIFIED = 0;
@VisibleForTesting @VisibleForTesting
public static BlockStoragePolicySuite createDefaultSuite() { public static BlockStoragePolicySuite createDefaultSuite() {

View File

@ -25,6 +25,9 @@ import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport;

View File

@ -22,7 +22,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport;
@ -115,7 +115,7 @@ class HeartbeatManager implements DatanodeStatistics {
@Override @Override
public synchronized float getCapacityUsedPercent() { public synchronized float getCapacityUsedPercent() {
return DFSUtil.getPercentUsed(stats.capacityUsed, stats.capacityTotal); return DFSUtilClient.getPercentUsed(stats.capacityUsed, stats.capacityTotal);
} }
@Override @Override
@ -125,8 +125,8 @@ class HeartbeatManager implements DatanodeStatistics {
@Override @Override
public synchronized float getCapacityRemainingPercent() { public synchronized float getCapacityRemainingPercent() {
return DFSUtil.getPercentRemaining( return DFSUtilClient.getPercentRemaining(stats.capacityRemaining,
stats.capacityRemaining, stats.capacityTotal); stats.capacityTotal);
} }
@Override @Override
@ -136,7 +136,8 @@ class HeartbeatManager implements DatanodeStatistics {
@Override @Override
public synchronized float getPercentBlockPoolUsed() { public synchronized float getPercentBlockPoolUsed() {
return DFSUtil.getPercentUsed(stats.blockPoolUsed, stats.capacityTotal); return DFSUtilClient.getPercentUsed(stats.blockPoolUsed,
stats.capacityTotal);
} }
@Override @Override

View File

@ -53,7 +53,6 @@ import org.apache.hadoop.util.ToolRunner;
import java.io.BufferedReader; import java.io.BufferedReader;
import java.io.FileInputStream; import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException; import java.io.IOException;
import java.io.InputStreamReader; import java.io.InputStreamReader;
import java.net.URI; import java.net.URI;
@ -331,7 +330,7 @@ public class Mover {
private boolean processFile(String fullPath, HdfsLocatedFileStatus status) { private boolean processFile(String fullPath, HdfsLocatedFileStatus status) {
final byte policyId = status.getStoragePolicy(); final byte policyId = status.getStoragePolicy();
// currently we ignore files with unspecified storage policy // currently we ignore files with unspecified storage policy
if (policyId == BlockStoragePolicySuite.ID_UNSPECIFIED) { if (policyId == HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
return false; return false;
} }
final BlockStoragePolicy policy = blockStoragePolicies[policyId]; final BlockStoragePolicy policy = blockStoragePolicies[policyId];

View File

@ -31,12 +31,12 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.hdfs.util.ReadOnlyList;
@ -140,7 +140,7 @@ class FSDirStatAndListingOp {
} }
private static byte getStoragePolicyID(byte inodePolicy, byte parentPolicy) { private static byte getStoragePolicyID(byte inodePolicy, byte parentPolicy) {
return inodePolicy != BlockStoragePolicySuite.ID_UNSPECIFIED ? inodePolicy : return inodePolicy != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ? inodePolicy :
parentPolicy; parentPolicy;
} }
@ -176,8 +176,8 @@ class FSDirStatAndListingOp {
if (targetNode == null) if (targetNode == null)
return null; return null;
byte parentStoragePolicy = isSuperUser ? byte parentStoragePolicy = isSuperUser ?
targetNode.getStoragePolicyID() : BlockStoragePolicySuite targetNode.getStoragePolicyID() : HdfsConstantsClient
.ID_UNSPECIFIED; .BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
if (!targetNode.isDirectory()) { if (!targetNode.isDirectory()) {
return new DirectoryListing( return new DirectoryListing(
@ -199,7 +199,7 @@ class FSDirStatAndListingOp {
INode cur = contents.get(startChild+i); INode cur = contents.get(startChild+i);
byte curPolicy = isSuperUser && !cur.isSymlink()? byte curPolicy = isSuperUser && !cur.isSymlink()?
cur.getLocalStoragePolicyID(): cur.getLocalStoragePolicyID():
BlockStoragePolicySuite.ID_UNSPECIFIED; HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur, listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur,
needLocation, getStoragePolicyID(curPolicy, needLocation, getStoragePolicyID(curPolicy,
parentStoragePolicy), snapshot, isRawPath, iip); parentStoragePolicy), snapshot, isRawPath, iip);
@ -254,7 +254,7 @@ class FSDirStatAndListingOp {
for (int i = 0; i < numOfListing; i++) { for (int i = 0; i < numOfListing; i++) {
Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot(); Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot, listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
false, INodesInPath.fromINode(sRoot)); false, INodesInPath.fromINode(sRoot));
} }
return new DirectoryListing( return new DirectoryListing(
@ -277,7 +277,7 @@ class FSDirStatAndListingOp {
try { try {
final INode i = src.getLastINode(); final INode i = src.getLastINode();
byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ? byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ?
i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED; i.getStoragePolicyID() : HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return i == null ? null : createFileStatus( return i == null ? null : createFileStatus(
fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId, fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId,
src.getPathSnapshotId(), isRawPath, src); src.getPathSnapshotId(), isRawPath, src);
@ -295,7 +295,7 @@ class FSDirStatAndListingOp {
if (fsd.getINode4DotSnapshot(srcs) != null) { if (fsd.getINode4DotSnapshot(srcs) != null) {
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
BlockStoragePolicySuite.ID_UNSPECIFIED); HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
} }
return null; return null;
} }
@ -322,7 +322,7 @@ class FSDirStatAndListingOp {
if (fsd.getINode4DotSnapshot(src) != null) { if (fsd.getINode4DotSnapshot(src) != null) {
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
BlockStoragePolicySuite.ID_UNSPECIFIED); HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
} }
return null; return null;
} }

View File

@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
@ -61,7 +62,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
import org.apache.hadoop.hdfs.util.ByteArray; import org.apache.hadoop.hdfs.util.ByteArray;
import org.apache.hadoop.hdfs.util.EnumCounters; import org.apache.hadoop.hdfs.util.EnumCounters;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
@ -742,7 +742,7 @@ public class FSDirectory implements Closeable {
EnumCounters<StorageType> typeSpaceDeltas = EnumCounters<StorageType> typeSpaceDeltas =
new EnumCounters<StorageType>(StorageType.class); new EnumCounters<StorageType>(StorageType.class);
// Storage type and its quota are only available when storage policy is set // Storage type and its quota are only available when storage policy is set
if (storagePolicyID != BlockStoragePolicySuite.ID_UNSPECIFIED) { if (storagePolicyID != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
BlockStoragePolicy storagePolicy = getBlockManager().getStoragePolicy(storagePolicyID); BlockStoragePolicy storagePolicy = getBlockManager().getStoragePolicy(storagePolicyID);
if (oldRep != newRep) { if (oldRep != newRep) {

View File

@ -36,7 +36,6 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -380,7 +379,7 @@ public class FSEditLogLoader {
if (toAddRetryCache) { if (toAddRetryCache) {
HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus( HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus(
fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, newFile, fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, newFile,
BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
false, iip); false, iip);
fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
addCloseOp.rpcCallId, stat); addCloseOp.rpcCallId, stat);
@ -400,7 +399,7 @@ public class FSEditLogLoader {
HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus( HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus(
fsNamesys.dir, path, fsNamesys.dir, path,
HdfsFileStatus.EMPTY_NAME, newFile, HdfsFileStatus.EMPTY_NAME, newFile,
BlockStoragePolicySuite.ID_UNSPECIFIED, HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
Snapshot.CURRENT_STATE_ID, false, iip); Snapshot.CURRENT_STATE_ID, false, iip);
fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
addCloseOp.rpcCallId, new LastBlockWithStatus(lb, stat)); addCloseOp.rpcCallId, new LastBlockWithStatus(lb, stat));
@ -473,7 +472,7 @@ public class FSEditLogLoader {
if (toAddRetryCache) { if (toAddRetryCache) {
HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus( HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus(
fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, file, fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, file,
BlockStoragePolicySuite.ID_UNSPECIFIED, HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
Snapshot.CURRENT_STATE_ID, false, iip); Snapshot.CURRENT_STATE_ID, false, iip);
fsNamesys.addCacheEntryWithPayload(appendOp.rpcClientId, fsNamesys.addCacheEntryWithPayload(appendOp.rpcClientId,
appendOp.rpcCallId, new LastBlockWithStatus(lb, stat)); appendOp.rpcCallId, new LastBlockWithStatus(lb, stat));

View File

@ -108,7 +108,6 @@ import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEditLogProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrEditLogProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrEditLogProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.util.XMLUtils; import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
import org.apache.hadoop.hdfs.util.XMLUtils.Stanza; import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
@ -434,7 +433,7 @@ public abstract class FSEditLogOp {
private AddCloseOp(FSEditLogOpCodes opCode) { private AddCloseOp(FSEditLogOpCodes opCode) {
super(opCode); super(opCode);
storagePolicyId = BlockStoragePolicySuite.ID_UNSPECIFIED; storagePolicyId = HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
assert(opCode == OP_ADD || opCode == OP_CLOSE || opCode == OP_APPEND); assert(opCode == OP_ADD || opCode == OP_CLOSE || opCode == OP_APPEND);
} }
@ -636,7 +635,7 @@ public abstract class FSEditLogOp {
NameNodeLayoutVersion.Feature.BLOCK_STORAGE_POLICY, logVersion)) { NameNodeLayoutVersion.Feature.BLOCK_STORAGE_POLICY, logVersion)) {
this.storagePolicyId = FSImageSerialization.readByte(in); this.storagePolicyId = FSImageSerialization.readByte(in);
} else { } else {
this.storagePolicyId = BlockStoragePolicySuite.ID_UNSPECIFIED; this.storagePolicyId = HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
} }
// read clientId and callId // read clientId and callId
readRpcIds(in, logVersion); readRpcIds(in, logVersion);

View File

@ -3287,7 +3287,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs, LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
long offset) throws IOException { long offset) throws IOException {
LocatedBlock lBlk = new LocatedBlock( LocatedBlock lBlk = BlockManager.newLocatedBlock(
getExtendedBlock(blk), locs, offset, false); getExtendedBlock(blk), locs, offset, false);
getBlockManager().setBlockToken( getBlockManager().setBlockToken(
lBlk, BlockTokenIdentifier.AccessMode.WRITE); lBlk, BlockTokenIdentifier.AccessMode.WRITE);
@ -3351,7 +3351,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode( final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode(
src, numAdditionalNodes, clientnode, chosen, src, numAdditionalNodes, clientnode, chosen,
excludes, preferredblocksize, storagePolicyID); excludes, preferredblocksize, storagePolicyID);
final LocatedBlock lb = new LocatedBlock(blk, targets, -1, false); final LocatedBlock lb = BlockManager.newLocatedBlock(
blk, targets, -1, false);
blockManager.setBlockToken(lb, BlockTokenIdentifier.AccessMode.COPY); blockManager.setBlockToken(lb, BlockTokenIdentifier.AccessMode.COPY);
return lb; return lb;
} }

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
@ -510,7 +511,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
*/ */
public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) { public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) {
final byte storagePolicyId = isSymlink() ? final byte storagePolicyId = isSymlink() ?
BlockStoragePolicySuite.ID_UNSPECIFIED : getStoragePolicyID(); HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID();
return computeQuotaUsage(bsps, storagePolicyId, return computeQuotaUsage(bsps, storagePolicyId,
new QuotaCounts.Builder().build(), true, Snapshot.CURRENT_STATE_ID); new QuotaCounts.Builder().build(), true, Snapshot.CURRENT_STATE_ID);
} }
@ -554,7 +555,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
public final QuotaCounts computeQuotaUsage( public final QuotaCounts computeQuotaUsage(
BlockStoragePolicySuite bsps, QuotaCounts counts, boolean useCache) { BlockStoragePolicySuite bsps, QuotaCounts counts, boolean useCache) {
final byte storagePolicyId = isSymlink() ? final byte storagePolicyId = isSymlink() ?
BlockStoragePolicySuite.ID_UNSPECIFIED : getStoragePolicyID(); HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID();
return computeQuotaUsage(bsps, storagePolicyId, counts, return computeQuotaUsage(bsps, storagePolicyId, counts,
useCache, Snapshot.CURRENT_STATE_ID); useCache, Snapshot.CURRENT_STATE_ID);
} }
@ -711,7 +712,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
/** /**
* @return the storage policy directly specified on the INode. Return * @return the storage policy directly specified on the INode. Return
* {@link BlockStoragePolicySuite#ID_UNSPECIFIED} if no policy has * {@link HdfsConstantsClient#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED} if no policy has
* been specified. * been specified.
*/ */
public abstract byte getLocalStoragePolicyID(); public abstract byte getLocalStoragePolicyID();
@ -720,13 +721,13 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* Get the storage policy ID while computing quota usage * Get the storage policy ID while computing quota usage
* @param parentStoragePolicyId the storage policy ID of the parent directory * @param parentStoragePolicyId the storage policy ID of the parent directory
* @return the storage policy ID of this INode. Note that for an * @return the storage policy ID of this INode. Note that for an
* {@link INodeSymlink} we return {@link BlockStoragePolicySuite#ID_UNSPECIFIED} * {@link INodeSymlink} we return {@link HdfsConstantsClient#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED}
* instead of throwing Exception * instead of throwing Exception
*/ */
public byte getStoragePolicyIDForQuota(byte parentStoragePolicyId) { public byte getStoragePolicyIDForQuota(byte parentStoragePolicyId) {
byte localId = isSymlink() ? byte localId = isSymlink() ?
BlockStoragePolicySuite.ID_UNSPECIFIED : getLocalStoragePolicyID(); HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getLocalStoragePolicyID();
return localId != BlockStoragePolicySuite.ID_UNSPECIFIED ? return localId != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ?
localId : parentStoragePolicyId; localId : parentStoragePolicyId;
} }

View File

@ -45,7 +45,7 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import static org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite.ID_UNSPECIFIED; import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
/** /**
* Directory INode class. * Directory INode class.
@ -125,18 +125,17 @@ public class INodeDirectory extends INodeWithAdditionalFields
return (xattr.getValue())[0]; return (xattr.getValue())[0];
} }
} }
return ID_UNSPECIFIED; return BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
} }
@Override @Override
public byte getStoragePolicyID() { public byte getStoragePolicyID() {
byte id = getLocalStoragePolicyID(); byte id = getLocalStoragePolicyID();
if (id != ID_UNSPECIFIED) { if (id != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
return id; return id;
} }
// if it is unspecified, check its parent // if it is unspecified, check its parent
return getParent() != null ? getParent().getStoragePolicyID() : return getParent() != null ? getParent().getStoragePolicyID() : BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
ID_UNSPECIFIED;
} }
void setQuota(BlockStoragePolicySuite bsps, long nsQuota, long ssQuota, StorageType type) { void setQuota(BlockStoragePolicySuite bsps, long nsQuota, long ssQuota, StorageType type) {

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite.ID_UNSPECIFIED; import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
@ -393,7 +393,7 @@ public class INodeFile extends INodeWithAdditionalFields
@Override @Override
public byte getStoragePolicyID() { public byte getStoragePolicyID() {
byte id = getLocalStoragePolicyID(); byte id = getLocalStoragePolicyID();
if (id == ID_UNSPECIFIED) { if (id == BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
return this.getParent() != null ? return this.getParent() != null ?
this.getParent().getStoragePolicyID() : id; this.getParent().getStoragePolicyID() : id;
} }
@ -584,7 +584,7 @@ public class INodeFile extends INodeWithAdditionalFields
counts.addNameSpace(nsDelta); counts.addNameSpace(nsDelta);
counts.addStorageSpace(ssDeltaNoReplication * replication); counts.addStorageSpace(ssDeltaNoReplication * replication);
if (blockStoragePolicyId != ID_UNSPECIFIED){ if (blockStoragePolicyId != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED){
BlockStoragePolicy bsp = bsps.getPolicy(blockStoragePolicyId); BlockStoragePolicy bsp = bsps.getPolicy(blockStoragePolicyId);
List<StorageType> storageTypes = bsp.chooseStorageTypes(replication); List<StorageType> storageTypes = bsp.chooseStorageTypes(replication);
for (StorageType t : storageTypes) { for (StorageType t : storageTypes) {
@ -619,7 +619,7 @@ public class INodeFile extends INodeWithAdditionalFields
counts.addContent(Content.LENGTH, fileLen); counts.addContent(Content.LENGTH, fileLen);
counts.addContent(Content.DISKSPACE, storagespaceConsumed()); counts.addContent(Content.DISKSPACE, storagespaceConsumed());
if (getStoragePolicyID() != ID_UNSPECIFIED){ if (getStoragePolicyID() != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED){
BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite(). BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
getPolicy(getStoragePolicyID()); getPolicy(getStoragePolicyID());
List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication()); List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());

View File

@ -22,6 +22,7 @@ import java.util.List;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.LightWeightGSet;
@ -123,12 +124,12 @@ public class INodeMap {
@Override @Override
public byte getStoragePolicyID(){ public byte getStoragePolicyID(){
return BlockStoragePolicySuite.ID_UNSPECIFIED; return HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
} }
@Override @Override
public byte getLocalStoragePolicyID() { public byte getLocalStoragePolicyID() {
return BlockStoragePolicySuite.ID_UNSPECIFIED; return HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
} }
}; };

View File

@ -21,6 +21,7 @@ import java.util.List;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.AclFeature; import org.apache.hadoop.hdfs.server.namenode.AclFeature;
@ -150,7 +151,7 @@ public class FileWithSnapshotFeature implements INode.Feature {
BlockStoragePolicy bsp = null; BlockStoragePolicy bsp = null;
EnumCounters<StorageType> typeSpaces = EnumCounters<StorageType> typeSpaces =
new EnumCounters<StorageType>(StorageType.class); new EnumCounters<StorageType>(StorageType.class);
if (storagePolicyID != BlockStoragePolicySuite.ID_UNSPECIFIED) { if (storagePolicyID != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
bsp = bsps.getPolicy(file.getStoragePolicyID()); bsp = bsps.getPolicy(file.getStoragePolicyID());
} }

View File

@ -73,7 +73,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.hdfs.web.ParamFilter; import org.apache.hadoop.hdfs.web.ParamFilter;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.*; import org.apache.hadoop.hdfs.web.resources.*;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -246,8 +246,8 @@ public class NamenodeWebHdfsMethods {
return null; return null;
} }
final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next(); final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND Text kind = request.getScheme().equals("http") ? WebHdfsConstants.WEBHDFS_TOKEN_KIND
: SWebHdfsFileSystem.TOKEN_KIND; : WebHdfsConstants.SWEBHDFS_TOKEN_KIND;
t.setKind(kind); t.setKind(kind);
return t; return t;
} }

View File

@ -22,8 +22,8 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.tools.TableListing; import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
@ -151,7 +151,7 @@ public class StoragePolicyAdmin extends Configured implements Tool {
return 2; return 2;
} }
byte storagePolicyId = status.getStoragePolicy(); byte storagePolicyId = status.getStoragePolicy();
if (storagePolicyId == BlockStoragePolicySuite.ID_UNSPECIFIED) { if (storagePolicyId == HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
System.out.println("The storage policy of " + path + " is unspecified"); System.out.println("The storage policy of " + path + " is unspecified");
return 0; return 0;
} }

View File

@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenIdentifier;
@ -128,7 +127,7 @@ class JsonUtilClient {
final int childrenNum = getInt(m, "childrenNum", -1); final int childrenNum = getInt(m, "childrenNum", -1);
final byte storagePolicy = m.containsKey("storagePolicy") ? final byte storagePolicy = m.containsKey("storagePolicy") ?
(byte) ((Number) m.get("storagePolicy")).longValue() : (byte) ((Number) m.get("storagePolicy")).longValue() :
BlockStoragePolicySuite.ID_UNSPECIFIED; HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication, return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication,
blockSize, mTime, aTime, permission, owner, group, blockSize, mTime, aTime, permission, owner, group,
symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null, symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null,

View File

@ -22,7 +22,6 @@ import org.apache.hadoop.io.Text;
public class SWebHdfsFileSystem extends WebHdfsFileSystem { public class SWebHdfsFileSystem extends WebHdfsFileSystem {
public static final Text TOKEN_KIND = new Text("SWEBHDFS delegation");
public static final String SCHEME = "swebhdfs"; public static final String SCHEME = "swebhdfs";
@Override @Override
@ -37,7 +36,7 @@ public class SWebHdfsFileSystem extends WebHdfsFileSystem {
@Override @Override
protected Text getTokenKind() { protected Text getTokenKind() {
return TOKEN_KIND; return WebHdfsConstants.SWEBHDFS_TOKEN_KIND;
} }
@Override @Override

View File

@ -17,8 +17,6 @@
*/ */
package org.apache.hadoop.hdfs.web; package org.apache.hadoop.hdfs.web;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.URI; import java.net.URI;
@ -57,8 +55,8 @@ final class TokenAspect<T extends FileSystem & Renewable> {
@Override @Override
public boolean handleKind(Text kind) { public boolean handleKind(Text kind) {
return kind.equals(WebHdfsFileSystem.TOKEN_KIND) return kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)
|| kind.equals(SWebHdfsFileSystem.TOKEN_KIND); || kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND);
} }
@Override @Override
@ -87,9 +85,9 @@ final class TokenAspect<T extends FileSystem & Renewable> {
} }
private static String getSchemeByKind(Text kind) { private static String getSchemeByKind(Text kind) {
if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) { if (kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)) {
return WebHdfsFileSystem.SCHEME; return WebHdfsFileSystem.SCHEME;
} else if (kind.equals(SWebHdfsFileSystem.TOKEN_KIND)) { } else if (kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND)) {
return SWebHdfsFileSystem.SCHEME; return SWebHdfsFileSystem.SCHEME;
} else { } else {
throw new IllegalArgumentException("Unsupported scheme"); throw new IllegalArgumentException("Unsupported scheme");

View File

@ -101,9 +101,6 @@ public class WebHdfsFileSystem extends FileSystem
/** Default connection factory may be overridden in tests to use smaller timeout values */ /** Default connection factory may be overridden in tests to use smaller timeout values */
protected URLConnectionFactory connectionFactory; protected URLConnectionFactory connectionFactory;
/** Delegation token kind */
public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
@VisibleForTesting @VisibleForTesting
public static final String CANT_FALLBACK_TO_INSECURE_MSG = public static final String CANT_FALLBACK_TO_INSECURE_MSG =
"The client is configured to only allow connecting to secure cluster"; "The client is configured to only allow connecting to secure cluster";
@ -139,7 +136,7 @@ public class WebHdfsFileSystem extends FileSystem
} }
protected Text getTokenKind() { protected Text getTokenKind() {
return TOKEN_KIND; return WebHdfsConstants.WEBHDFS_TOKEN_KIND;
} }
@Override @Override

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite.ID_UNSPECIFIED; import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
import java.io.File; import java.io.File;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
@ -853,8 +853,10 @@ public class TestBlockStoragePolicy {
HdfsFileStatus.EMPTY_NAME, true).getPartialListing(); HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
HdfsFileStatus[] barList = fs.getClient().listPaths(barDir.toString(), HdfsFileStatus[] barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing(); HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, ID_UNSPECIFIED, ID_UNSPECIFIED); checkDirectoryListing(dirList, BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
checkDirectoryListing(barList, ID_UNSPECIFIED, ID_UNSPECIFIED); BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
checkDirectoryListing(barList, BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
final Path invalidPath = new Path("/invalidPath"); final Path invalidPath = new Path("/invalidPath");
try { try {

View File

@ -34,7 +34,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -103,10 +103,11 @@ public class TestNamenodeCapacityReport {
+ " percentRemaining " + percentRemaining); + " percentRemaining " + percentRemaining);
assertTrue(configCapacity == (used + remaining + nonDFSUsed)); assertTrue(configCapacity == (used + remaining + nonDFSUsed));
assertTrue(percentUsed == DFSUtil.getPercentUsed(used, configCapacity)); assertTrue(percentUsed == DFSUtilClient.getPercentUsed(used,
assertTrue(percentRemaining == DFSUtil.getPercentRemaining(remaining,
configCapacity)); configCapacity));
assertTrue(percentBpUsed == DFSUtil.getPercentUsed(bpUsed, assertTrue(percentRemaining == DFSUtilClient.getPercentRemaining(
remaining, configCapacity));
assertTrue(percentBpUsed == DFSUtilClient.getPercentUsed(bpUsed,
configCapacity)); configCapacity));
} }
@ -152,10 +153,12 @@ public class TestNamenodeCapacityReport {
assertTrue(configCapacity == (used + remaining + nonDFSUsed)); assertTrue(configCapacity == (used + remaining + nonDFSUsed));
// Ensure percent used is calculated based on used and present capacity // Ensure percent used is calculated based on used and present capacity
assertTrue(percentUsed == DFSUtil.getPercentUsed(used, configCapacity)); assertTrue(percentUsed == DFSUtilClient.getPercentUsed(used,
configCapacity));
// Ensure percent used is calculated based on used and present capacity // Ensure percent used is calculated based on used and present capacity
assertTrue(percentBpUsed == DFSUtil.getPercentUsed(bpUsed, configCapacity)); assertTrue(percentBpUsed == DFSUtilClient.getPercentUsed(bpUsed,
configCapacity));
// Ensure percent used is calculated based on used and present capacity // Ensure percent used is calculated based on used and present capacity
assertTrue(percentRemaining == ((float)remaining * 100.0f)/(float)configCapacity); assertTrue(percentRemaining == ((float)remaining * 100.0f)/(float)configCapacity);

View File

@ -328,7 +328,7 @@ public class TestWebHdfsUrl {
dtId, dtSecretManager); dtId, dtSecretManager);
SecurityUtil.setTokenService( SecurityUtil.setTokenService(
token, NetUtils.createSocketAddr(uri.getAuthority())); token, NetUtils.createSocketAddr(uri.getAuthority()));
token.setKind(WebHdfsFileSystem.TOKEN_KIND); token.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
ugi.addToken(token); ugi.addToken(token);
} }
return (WebHdfsFileSystem) FileSystem.get(uri, conf); return (WebHdfsFileSystem) FileSystem.get(uri, conf);