HDFS-12258. ec -listPolicies should list all policies in system, no matter it's enabled or disabled. Contributed by Wei Zhou.
This commit is contained in:
parent
32cba6c303
commit
200b11368d
|
@ -17,6 +17,35 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.fs.CreateFlag;
|
||||||
|
import org.apache.hadoop.fs.StreamCapabilities;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
||||||
|
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||||
|
import org.apache.hadoop.io.ByteBufferPool;
|
||||||
|
import org.apache.hadoop.io.ElasticByteBufferPool;
|
||||||
|
import org.apache.hadoop.io.MultipleIOException;
|
||||||
|
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||||
|
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
|
||||||
|
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
||||||
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
|
import org.apache.hadoop.util.Progressable;
|
||||||
|
import org.apache.hadoop.util.Time;
|
||||||
|
import org.apache.htrace.core.TraceScope;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InterruptedIOException;
|
import java.io.InterruptedIOException;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
|
@ -32,45 +61,15 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.BlockingQueue;
|
import java.util.concurrent.BlockingQueue;
|
||||||
import java.util.concurrent.LinkedBlockingQueue;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.Executors;
|
|
||||||
import java.util.concurrent.Callable;
|
import java.util.concurrent.Callable;
|
||||||
import java.util.concurrent.CompletionService;
|
import java.util.concurrent.CompletionService;
|
||||||
import java.util.concurrent.ExecutorCompletionService;
|
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.ExecutorCompletionService;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.Future;
|
import java.util.concurrent.Future;
|
||||||
|
import java.util.concurrent.LinkedBlockingQueue;
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import java.util.concurrent.TimeUnit;
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.fs.CreateFlag;
|
|
||||||
import org.apache.hadoop.fs.StreamCapabilities;
|
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
|
||||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
|
||||||
import org.apache.hadoop.io.ByteBufferPool;
|
|
||||||
import org.apache.hadoop.io.ElasticByteBufferPool;
|
|
||||||
import org.apache.hadoop.io.MultipleIOException;
|
|
||||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
|
||||||
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
|
|
||||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
|
||||||
import org.apache.hadoop.util.Progressable;
|
|
||||||
import org.apache.hadoop.util.Time;
|
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
|
||||||
import org.apache.htrace.core.TraceScope;
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -777,7 +776,8 @@ public class DFSStripedOutputStream extends DFSOutputStream
|
||||||
// should update the block group length based on the acked length
|
// should update the block group length based on the acked length
|
||||||
final long sentBytes = currentBlockGroup.getNumBytes();
|
final long sentBytes = currentBlockGroup.getNumBytes();
|
||||||
final long ackedBytes = getNumAckedStripes() * cellSize * numDataBlocks;
|
final long ackedBytes = getNumAckedStripes() * cellSize * numDataBlocks;
|
||||||
Preconditions.checkState(ackedBytes <= sentBytes);
|
Preconditions.checkState(ackedBytes <= sentBytes,
|
||||||
|
"Acked:" + ackedBytes + ", Sent:" + sentBytes);
|
||||||
currentBlockGroup.setNumBytes(ackedBytes);
|
currentBlockGroup.setNumBytes(ackedBytes);
|
||||||
newBG.setNumBytes(ackedBytes);
|
newBG.setNumBytes(ackedBytes);
|
||||||
dfsClient.namenode.updatePipeline(dfsClient.clientName, currentBlockGroup,
|
dfsClient.namenode.updatePipeline(dfsClient.clientName, currentBlockGroup,
|
||||||
|
|
|
@ -18,21 +18,14 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.net.InetSocketAddress;
|
|
||||||
import java.net.URI;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.EnumSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
|
||||||
import org.apache.hadoop.fs.BlockLocation;
|
import org.apache.hadoop.fs.BlockLocation;
|
||||||
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
||||||
import org.apache.hadoop.fs.CacheFlag;
|
import org.apache.hadoop.fs.CacheFlag;
|
||||||
|
@ -53,24 +46,24 @@ import org.apache.hadoop.fs.GlobalStorageStatistics;
|
||||||
import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
|
import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
|
||||||
import org.apache.hadoop.fs.LocatedFileStatus;
|
import org.apache.hadoop.fs.LocatedFileStatus;
|
||||||
import org.apache.hadoop.fs.Options;
|
import org.apache.hadoop.fs.Options;
|
||||||
import org.apache.hadoop.fs.StorageStatistics;
|
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
|
||||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.PathFilter;
|
import org.apache.hadoop.fs.PathFilter;
|
||||||
import org.apache.hadoop.fs.QuotaUsage;
|
import org.apache.hadoop.fs.QuotaUsage;
|
||||||
import org.apache.hadoop.fs.RemoteIterator;
|
import org.apache.hadoop.fs.RemoteIterator;
|
||||||
|
import org.apache.hadoop.fs.StorageStatistics;
|
||||||
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
||||||
|
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
|
import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
|
||||||
import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
|
import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||||
|
@ -81,6 +74,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
|
||||||
|
@ -95,17 +89,22 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.Credentials;
|
import org.apache.hadoop.security.Credentials;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
|
||||||
import com.google.common.base.Preconditions;
|
|
||||||
|
|
||||||
import javax.annotation.Nonnull;
|
import javax.annotation.Nonnull;
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.URI;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
/****************************************************************
|
/****************************************************************
|
||||||
* Implementation of the abstract FileSystem for the DFS system.
|
* Implementation of the abstract FileSystem for the DFS system.
|
||||||
|
@ -2603,7 +2602,8 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Retrieve all the erasure coding policies supported by this file system,
|
* Retrieve all the erasure coding policies supported by this file system,
|
||||||
* excluding REPLICATION policy.
|
* including enabled, disabled and removed policies, but excluding
|
||||||
|
* REPLICATION policy.
|
||||||
*
|
*
|
||||||
* @return all erasure coding policies supported by this file system.
|
* @return all erasure coding policies supported by this file system.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
|
@ -2628,8 +2628,9 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
/**
|
/**
|
||||||
* Add Erasure coding policies to HDFS. For each policy input, schema and
|
* Add Erasure coding policies to HDFS. For each policy input, schema and
|
||||||
* cellSize are musts, name and id are ignored. They will be automatically
|
* cellSize are musts, name and id are ignored. They will be automatically
|
||||||
* created and assigned by Namenode once the policy is successfully added, and
|
* created and assigned by Namenode once the policy is successfully added,
|
||||||
* will be returned in the response.
|
* and will be returned in the response; policy states will be set to
|
||||||
|
* DISABLED automatically.
|
||||||
*
|
*
|
||||||
* @param policies The user defined ec policy list to add.
|
* @param policies The user defined ec policy list to add.
|
||||||
* @return Return the response list of adding operations.
|
* @return Return the response list of adding operations.
|
||||||
|
|
|
@ -17,12 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.client;
|
package org.apache.hadoop.hdfs.client;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.net.URI;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.EnumSet;
|
|
||||||
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
@ -50,10 +44,16 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
|
||||||
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
|
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
|
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URI;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The public API for performing administrative functions on HDFS. Those writing
|
* The public API for performing administrative functions on HDFS. Those writing
|
||||||
* applications against HDFS should prefer this interface to directly accessing
|
* applications against HDFS should prefer this interface to directly accessing
|
||||||
|
@ -554,8 +554,9 @@ public class HdfsAdmin {
|
||||||
/**
|
/**
|
||||||
* Add Erasure coding policies to HDFS. For each policy input, schema and
|
* Add Erasure coding policies to HDFS. For each policy input, schema and
|
||||||
* cellSize are musts, name and id are ignored. They will be automatically
|
* cellSize are musts, name and id are ignored. They will be automatically
|
||||||
* created and assigned by Namenode once the policy is successfully added, and
|
* created and assigned by Namenode once the policy is successfully added,
|
||||||
* will be returned in the response.
|
* and will be returned in the response; policy states will be set to
|
||||||
|
* DISABLED automatically.
|
||||||
*
|
*
|
||||||
* @param policies The user defined ec policy list to add.
|
* @param policies The user defined ec policy list to add.
|
||||||
* @return Return the response list of adding operations.
|
* @return Return the response list of adding operations.
|
||||||
|
|
|
@ -17,8 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.protocol;
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.lang.builder.EqualsBuilder;
|
import org.apache.commons.lang.builder.EqualsBuilder;
|
||||||
import org.apache.commons.lang.builder.HashCodeBuilder;
|
import org.apache.commons.lang.builder.HashCodeBuilder;
|
||||||
|
@ -27,6 +25,8 @@ import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||||
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
|
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A policy about how to write/read/code an erasure coding file.
|
* A policy about how to write/read/code an erasure coding file.
|
||||||
*/
|
*/
|
||||||
|
@ -40,9 +40,11 @@ public final class ErasureCodingPolicy implements Serializable {
|
||||||
private final ECSchema schema;
|
private final ECSchema schema;
|
||||||
private final int cellSize;
|
private final int cellSize;
|
||||||
private byte id;
|
private byte id;
|
||||||
|
private ErasureCodingPolicyState state;
|
||||||
|
|
||||||
|
|
||||||
public ErasureCodingPolicy(String name, ECSchema schema,
|
public ErasureCodingPolicy(String name, ECSchema schema,
|
||||||
int cellSize, byte id) {
|
int cellSize, byte id, ErasureCodingPolicyState state) {
|
||||||
Preconditions.checkNotNull(name);
|
Preconditions.checkNotNull(name);
|
||||||
Preconditions.checkNotNull(schema);
|
Preconditions.checkNotNull(schema);
|
||||||
Preconditions.checkArgument(cellSize > 0, "cellSize must be positive");
|
Preconditions.checkArgument(cellSize > 0, "cellSize must be positive");
|
||||||
|
@ -52,14 +54,22 @@ public final class ErasureCodingPolicy implements Serializable {
|
||||||
this.schema = schema;
|
this.schema = schema;
|
||||||
this.cellSize = cellSize;
|
this.cellSize = cellSize;
|
||||||
this.id = id;
|
this.id = id;
|
||||||
|
this.state = state;
|
||||||
|
}
|
||||||
|
|
||||||
|
public ErasureCodingPolicy(String name, ECSchema schema, int cellSize,
|
||||||
|
byte id) {
|
||||||
|
this(name, schema, cellSize, id, ErasureCodingPolicyState.DISABLED);
|
||||||
}
|
}
|
||||||
|
|
||||||
public ErasureCodingPolicy(ECSchema schema, int cellSize, byte id) {
|
public ErasureCodingPolicy(ECSchema schema, int cellSize, byte id) {
|
||||||
this(composePolicyName(schema, cellSize), schema, cellSize, id);
|
this(composePolicyName(schema, cellSize), schema, cellSize, id,
|
||||||
|
ErasureCodingPolicyState.DISABLED);
|
||||||
}
|
}
|
||||||
|
|
||||||
public ErasureCodingPolicy(ECSchema schema, int cellSize) {
|
public ErasureCodingPolicy(ECSchema schema, int cellSize) {
|
||||||
this(composePolicyName(schema, cellSize), schema, cellSize, (byte) -1);
|
this(composePolicyName(schema, cellSize), schema, cellSize, (byte) -1,
|
||||||
|
ErasureCodingPolicyState.DISABLED);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String composePolicyName(ECSchema schema, int cellSize) {
|
public static String composePolicyName(ECSchema schema, int cellSize) {
|
||||||
|
@ -108,10 +118,35 @@ public final class ErasureCodingPolicy implements Serializable {
|
||||||
this.id = id;
|
this.id = id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public boolean isReplicationPolicy() {
|
public boolean isReplicationPolicy() {
|
||||||
return (id == ErasureCodeConstants.REPLICATION_POLICY_ID);
|
return (id == ErasureCodeConstants.REPLICATION_POLICY_ID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public ErasureCodingPolicyState getState() {
|
||||||
|
return state;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setState(ErasureCodingPolicyState state) {
|
||||||
|
this.state = state;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isSystemPolicy() {
|
||||||
|
return (this.id < ErasureCodeConstants.USER_DEFINED_POLICY_START_ID);
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isEnabled() {
|
||||||
|
return (this.state == ErasureCodingPolicyState.ENABLED);
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isDisabled() {
|
||||||
|
return (this.state == ErasureCodingPolicyState.DISABLED);
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isRemoved() {
|
||||||
|
return (this.state == ErasureCodingPolicyState.REMOVED);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object o) {
|
public boolean equals(Object o) {
|
||||||
if (o == null) {
|
if (o == null) {
|
||||||
|
@ -129,6 +164,7 @@ public final class ErasureCodingPolicy implements Serializable {
|
||||||
.append(schema, rhs.schema)
|
.append(schema, rhs.schema)
|
||||||
.append(cellSize, rhs.cellSize)
|
.append(cellSize, rhs.cellSize)
|
||||||
.append(id, rhs.id)
|
.append(id, rhs.id)
|
||||||
|
.append(state, rhs.state)
|
||||||
.isEquals();
|
.isEquals();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,6 +175,7 @@ public final class ErasureCodingPolicy implements Serializable {
|
||||||
.append(schema)
|
.append(schema)
|
||||||
.append(cellSize)
|
.append(cellSize)
|
||||||
.append(id)
|
.append(id)
|
||||||
|
.append(state)
|
||||||
.toHashCode();
|
.toHashCode();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,7 +184,8 @@ public final class ErasureCodingPolicy implements Serializable {
|
||||||
return "ErasureCodingPolicy=[" + "Name=" + name + ", "
|
return "ErasureCodingPolicy=[" + "Name=" + name + ", "
|
||||||
+ "Schema=[" + schema.toString() + "], "
|
+ "Schema=[" + schema.toString() + "], "
|
||||||
+ "CellSize=" + cellSize + ", "
|
+ "CellSize=" + cellSize + ", "
|
||||||
+ "Id=" + id
|
+ "Id=" + id + ", "
|
||||||
|
+ "State=" + state.toString()
|
||||||
+ "]";
|
+ "]";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,73 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
|
import java.io.DataInput;
|
||||||
|
import java.io.DataOutput;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Value denotes the possible states of an ErasureCodingPolicy.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
|
public enum ErasureCodingPolicyState {
|
||||||
|
|
||||||
|
/** Policy is disabled. It's policy default state. */
|
||||||
|
DISABLED(1),
|
||||||
|
/** Policy is enabled. It can be applied to directory and file. */
|
||||||
|
ENABLED(2),
|
||||||
|
/**
|
||||||
|
* Policy is removed from the system. Due to there are potential files
|
||||||
|
* use this policy, it cannot be deleted from system immediately. A removed
|
||||||
|
* policy can be re-enabled later.*/
|
||||||
|
REMOVED(3);
|
||||||
|
|
||||||
|
private static final ErasureCodingPolicyState[] CACHED_VALUES =
|
||||||
|
ErasureCodingPolicyState.values();
|
||||||
|
|
||||||
|
private final int value;
|
||||||
|
|
||||||
|
ErasureCodingPolicyState(int v) {
|
||||||
|
value = v;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getValue() {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ErasureCodingPolicyState fromValue(int v) {
|
||||||
|
if (v > 0 && v <= CACHED_VALUES.length) {
|
||||||
|
return CACHED_VALUES[v - 1];
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Read from in. */
|
||||||
|
public static ErasureCodingPolicyState read(DataInput in) throws IOException {
|
||||||
|
return fromValue(in.readByte());
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Write to out. */
|
||||||
|
public void write(DataOutput out) throws IOException {
|
||||||
|
out.writeByte(ordinal());
|
||||||
|
}
|
||||||
|
}
|
|
@ -79,6 +79,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
|
import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
|
||||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
@ -2925,6 +2926,16 @@ public class PBHelperClient {
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static ErasureCodingPolicyState convertECState(
|
||||||
|
HdfsProtos.ErasureCodingPolicyState state) {
|
||||||
|
return ErasureCodingPolicyState.fromValue(state.getNumber());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static HdfsProtos.ErasureCodingPolicyState convertECState(
|
||||||
|
ErasureCodingPolicyState state) {
|
||||||
|
return HdfsProtos.ErasureCodingPolicyState.valueOf(state.getValue());
|
||||||
|
}
|
||||||
|
|
||||||
public static ErasureCodingPolicy convertErasureCodingPolicy(
|
public static ErasureCodingPolicy convertErasureCodingPolicy(
|
||||||
ErasureCodingPolicyProto proto) {
|
ErasureCodingPolicyProto proto) {
|
||||||
final byte id = (byte) (proto.getId() & 0xFF);
|
final byte id = (byte) (proto.getId() & 0xFF);
|
||||||
|
@ -2938,10 +2949,12 @@ public class PBHelperClient {
|
||||||
"Missing schema field in ErasureCodingPolicy proto");
|
"Missing schema field in ErasureCodingPolicy proto");
|
||||||
Preconditions.checkArgument(proto.hasCellSize(),
|
Preconditions.checkArgument(proto.hasCellSize(),
|
||||||
"Missing cellsize field in ErasureCodingPolicy proto");
|
"Missing cellsize field in ErasureCodingPolicy proto");
|
||||||
|
Preconditions.checkArgument(proto.hasState(),
|
||||||
|
"Missing state field in ErasureCodingPolicy proto");
|
||||||
|
|
||||||
return new ErasureCodingPolicy(proto.getName(),
|
return new ErasureCodingPolicy(proto.getName(),
|
||||||
convertECSchema(proto.getSchema()),
|
convertECSchema(proto.getSchema()),
|
||||||
proto.getCellSize(), id);
|
proto.getCellSize(), id, convertECState(proto.getState()));
|
||||||
}
|
}
|
||||||
return policy;
|
return policy;
|
||||||
}
|
}
|
||||||
|
@ -2955,7 +2968,8 @@ public class PBHelperClient {
|
||||||
if (SystemErasureCodingPolicies.getByID(policy.getId()) == null) {
|
if (SystemErasureCodingPolicies.getByID(policy.getId()) == null) {
|
||||||
builder.setName(policy.getName())
|
builder.setName(policy.getName())
|
||||||
.setSchema(convertECSchema(policy.getSchema()))
|
.setSchema(convertECSchema(policy.getSchema()))
|
||||||
.setCellSize(policy.getCellSize());
|
.setCellSize(policy.getCellSize())
|
||||||
|
.setState(convertECState(policy.getState()));
|
||||||
}
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -373,11 +373,21 @@ message ECSchemaProto {
|
||||||
repeated ECSchemaOptionEntryProto options = 4;
|
repeated ECSchemaOptionEntryProto options = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* EC policy state.
|
||||||
|
*/
|
||||||
|
enum ErasureCodingPolicyState {
|
||||||
|
DISABLED = 1;
|
||||||
|
ENABLED = 2;
|
||||||
|
REMOVED = 3;
|
||||||
|
}
|
||||||
|
|
||||||
message ErasureCodingPolicyProto {
|
message ErasureCodingPolicyProto {
|
||||||
optional string name = 1;
|
optional string name = 1;
|
||||||
optional ECSchemaProto schema = 2;
|
optional ECSchemaProto schema = 2;
|
||||||
optional uint32 cellSize = 3;
|
optional uint32 cellSize = 3;
|
||||||
required uint32 id = 4; // Actually a byte - only 8 bits used
|
required uint32 id = 4; // Actually a byte - only 8 bits used
|
||||||
|
optional ErasureCodingPolicyState state = 5 [default = ENABLED];
|
||||||
}
|
}
|
||||||
|
|
||||||
message AddECPolicyResponseProto {
|
message AddECPolicyResponseProto {
|
||||||
|
|
|
@ -17,10 +17,12 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import org.apache.commons.lang.ArrayUtils;
|
import org.apache.commons.lang.ArrayUtils;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
|
||||||
import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
|
import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
|
||||||
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
@ -31,11 +33,11 @@ import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
import java.util.stream.Stream;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This manages erasure coding policies predefined and activated in the system.
|
* This manages erasure coding policies predefined and activated in the system.
|
||||||
|
@ -60,25 +62,32 @@ public final class ErasureCodingPolicyManager {
|
||||||
HdfsConstants.ALLSSD_STORAGE_POLICY_ID};
|
HdfsConstants.ALLSSD_STORAGE_POLICY_ID};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* All user defined policies sorted by name for fast querying.
|
* All policies sorted by name for fast querying, include built-in policy,
|
||||||
|
* user defined policy, removed policy.
|
||||||
*/
|
*/
|
||||||
private Map<String, ErasureCodingPolicy> userPoliciesByName;
|
private Map<String, ErasureCodingPolicy> policiesByName;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* All user defined policies sorted by ID for fast querying.
|
* All policies sorted by ID for fast querying, including built-in policy,
|
||||||
|
* user defined policy, removed policy.
|
||||||
*/
|
*/
|
||||||
private Map<Byte, ErasureCodingPolicy> userPoliciesByID;
|
private Map<Byte, ErasureCodingPolicy> policiesByID;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* All removed policies sorted by name.
|
* For better performance when query all Policies.
|
||||||
*/
|
*/
|
||||||
private Map<String, ErasureCodingPolicy> removedPoliciesByName;
|
private ErasureCodingPolicy[] allPolicies;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* All enabled policies maintained in NN memory for fast querying,
|
* All enabled policies sorted by name for fast querying, including built-in
|
||||||
* identified and sorted by its name.
|
* policy, user defined policy.
|
||||||
*/
|
*/
|
||||||
private Map<String, ErasureCodingPolicy> enabledPoliciesByName;
|
private Map<String, ErasureCodingPolicy> enabledPoliciesByName;
|
||||||
|
/**
|
||||||
|
* For better performance when query all enabled Policies.
|
||||||
|
*/
|
||||||
|
private ErasureCodingPolicy[] enabledPolicies;
|
||||||
|
|
||||||
|
|
||||||
private volatile static ErasureCodingPolicyManager instance = null;
|
private volatile static ErasureCodingPolicyManager instance = null;
|
||||||
|
|
||||||
|
@ -101,46 +110,51 @@ public final class ErasureCodingPolicyManager {
|
||||||
DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
|
DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
|
||||||
final String[] policyNames =
|
final String[] policyNames =
|
||||||
(String[]) ArrayUtils.add(enablePolicyNames, defaultPolicyName);
|
(String[]) ArrayUtils.add(enablePolicyNames, defaultPolicyName);
|
||||||
this.userPoliciesByID = new TreeMap<>();
|
this.policiesByName = new TreeMap<>();
|
||||||
this.userPoliciesByName = new TreeMap<>();
|
this.policiesByID = new TreeMap<>();
|
||||||
this.removedPoliciesByName = new TreeMap<>();
|
|
||||||
this.enabledPoliciesByName = new TreeMap<>();
|
this.enabledPoliciesByName = new TreeMap<>();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* TODO: load user defined EC policy from fsImage HDFS-7859
|
||||||
|
* load persistent policies from image and editlog, which is done only once
|
||||||
|
* during NameNode startup. This can be done here or in a separate method.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add all System built-in policies into policy map
|
||||||
|
*/
|
||||||
|
for (ErasureCodingPolicy policy :
|
||||||
|
SystemErasureCodingPolicies.getPolicies()) {
|
||||||
|
policiesByName.put(policy.getName(), policy);
|
||||||
|
policiesByID.put(policy.getId(), policy);
|
||||||
|
}
|
||||||
|
|
||||||
for (String policyName : policyNames) {
|
for (String policyName : policyNames) {
|
||||||
if (policyName.trim().isEmpty()) {
|
if (policyName.trim().isEmpty()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
ErasureCodingPolicy ecPolicy =
|
ErasureCodingPolicy ecPolicy = policiesByName.get(policyName);
|
||||||
SystemErasureCodingPolicies.getByName(policyName);
|
|
||||||
if (ecPolicy == null) {
|
if (ecPolicy == null) {
|
||||||
ecPolicy = userPoliciesByName.get(policyName);
|
String names = policiesByName.values()
|
||||||
if (ecPolicy == null) {
|
.stream().map(ErasureCodingPolicy::getName)
|
||||||
String allPolicies = SystemErasureCodingPolicies.getPolicies()
|
.collect(Collectors.joining(", "));
|
||||||
.stream().map(ErasureCodingPolicy::getName)
|
String msg = String.format("EC policy '%s' specified at %s is not a "
|
||||||
.collect(Collectors.joining(", ")) + ", " +
|
+ "valid policy. Please choose from list of available "
|
||||||
userPoliciesByName.values().stream()
|
+ "policies: [%s]",
|
||||||
.map(ErasureCodingPolicy::getName)
|
policyName,
|
||||||
.collect(Collectors.joining(", "));
|
DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
|
||||||
String msg = String.format("EC policy '%s' specified at %s is not a "
|
names);
|
||||||
+ "valid policy. Please choose from list of available "
|
throw new IllegalArgumentException(msg);
|
||||||
+ "policies: [%s]",
|
|
||||||
policyName,
|
|
||||||
DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
|
|
||||||
allPolicies);
|
|
||||||
throw new IllegalArgumentException(msg);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
enabledPoliciesByName.put(ecPolicy.getName(), ecPolicy);
|
enabledPoliciesByName.put(ecPolicy.getName(), ecPolicy);
|
||||||
}
|
}
|
||||||
|
enabledPolicies =
|
||||||
|
enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
|
||||||
|
allPolicies = policiesByName.values().toArray(new ErasureCodingPolicy[0]);
|
||||||
|
|
||||||
maxCellSize = conf.getInt(
|
maxCellSize = conf.getInt(
|
||||||
DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_KEY,
|
DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT);
|
DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT);
|
||||||
|
|
||||||
/**
|
|
||||||
* TODO: HDFS-7859 persist into NameNode
|
|
||||||
* load persistent policies from image and editlog, which is done only once
|
|
||||||
* during NameNode startup. This can be done here or in a separate method.
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -148,9 +162,7 @@ public final class ErasureCodingPolicyManager {
|
||||||
* @return all policies
|
* @return all policies
|
||||||
*/
|
*/
|
||||||
public ErasureCodingPolicy[] getEnabledPolicies() {
|
public ErasureCodingPolicy[] getEnabledPolicies() {
|
||||||
ErasureCodingPolicy[] results =
|
return enabledPolicies;
|
||||||
new ErasureCodingPolicy[enabledPoliciesByName.size()];
|
|
||||||
return enabledPoliciesByName.values().toArray(results);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -187,9 +199,7 @@ public final class ErasureCodingPolicyManager {
|
||||||
* @return all policies
|
* @return all policies
|
||||||
*/
|
*/
|
||||||
public ErasureCodingPolicy[] getPolicies() {
|
public ErasureCodingPolicy[] getPolicies() {
|
||||||
return Stream.concat(SystemErasureCodingPolicies.getPolicies().stream(),
|
return allPolicies;
|
||||||
userPoliciesByName.values().stream())
|
|
||||||
.toArray(ErasureCodingPolicy[]::new);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -197,11 +207,7 @@ public final class ErasureCodingPolicyManager {
|
||||||
* @return ecPolicy, or null if not found
|
* @return ecPolicy, or null if not found
|
||||||
*/
|
*/
|
||||||
public ErasureCodingPolicy getByID(byte id) {
|
public ErasureCodingPolicy getByID(byte id) {
|
||||||
ErasureCodingPolicy policy = SystemErasureCodingPolicies.getByID(id);
|
return this.policiesByID.get(id);
|
||||||
if (policy == null) {
|
|
||||||
return this.userPoliciesByID.get(id);
|
|
||||||
}
|
|
||||||
return policy;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -209,11 +215,7 @@ public final class ErasureCodingPolicyManager {
|
||||||
* @return ecPolicy, or null if not found
|
* @return ecPolicy, or null if not found
|
||||||
*/
|
*/
|
||||||
public ErasureCodingPolicy getByName(String name) {
|
public ErasureCodingPolicy getByName(String name) {
|
||||||
ErasureCodingPolicy policy = SystemErasureCodingPolicies.getByName(name);
|
return this.policiesByName.get(name);
|
||||||
if (policy == null) {
|
|
||||||
return this.userPoliciesByName.get(name);
|
|
||||||
}
|
|
||||||
return policy;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -230,6 +232,9 @@ public final class ErasureCodingPolicyManager {
|
||||||
*/
|
*/
|
||||||
public synchronized ErasureCodingPolicy addPolicy(ErasureCodingPolicy policy)
|
public synchronized ErasureCodingPolicy addPolicy(ErasureCodingPolicy policy)
|
||||||
throws IllegalECPolicyException {
|
throws IllegalECPolicyException {
|
||||||
|
// Set policy state into DISABLED when adding into Hadoop.
|
||||||
|
policy.setState(ErasureCodingPolicyState.DISABLED);
|
||||||
|
|
||||||
if (!CodecUtil.hasCodec(policy.getCodecName())) {
|
if (!CodecUtil.hasCodec(policy.getCodecName())) {
|
||||||
throw new IllegalECPolicyException("Codec name "
|
throw new IllegalECPolicyException("Codec name "
|
||||||
+ policy.getCodecName() + " is not supported");
|
+ policy.getCodecName() + " is not supported");
|
||||||
|
@ -256,15 +261,17 @@ public final class ErasureCodingPolicyManager {
|
||||||
}
|
}
|
||||||
policy.setName(assignedNewName);
|
policy.setName(assignedNewName);
|
||||||
policy.setId(getNextAvailablePolicyID());
|
policy.setId(getNextAvailablePolicyID());
|
||||||
this.userPoliciesByName.put(policy.getName(), policy);
|
this.policiesByName.put(policy.getName(), policy);
|
||||||
this.userPoliciesByID.put(policy.getId(), policy);
|
this.policiesByID.put(policy.getId(), policy);
|
||||||
|
allPolicies = policiesByName.values().toArray(new ErasureCodingPolicy[0]);
|
||||||
return policy;
|
return policy;
|
||||||
}
|
}
|
||||||
|
|
||||||
private byte getNextAvailablePolicyID() {
|
private byte getNextAvailablePolicyID() {
|
||||||
byte currentId = this.userPoliciesByID.keySet().stream()
|
byte currentId = this.policiesByID.keySet().stream()
|
||||||
.max(Byte::compareTo).orElse(
|
.max(Byte::compareTo)
|
||||||
ErasureCodeConstants.USER_DEFINED_POLICY_START_ID);
|
.filter(id -> id >= ErasureCodeConstants.USER_DEFINED_POLICY_START_ID)
|
||||||
|
.orElse(ErasureCodeConstants.USER_DEFINED_POLICY_START_ID);
|
||||||
return (byte) (currentId + 1);
|
return (byte) (currentId + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -272,69 +279,71 @@ public final class ErasureCodingPolicyManager {
|
||||||
* Remove an User erasure coding policy by policyName.
|
* Remove an User erasure coding policy by policyName.
|
||||||
*/
|
*/
|
||||||
public synchronized void removePolicy(String name) {
|
public synchronized void removePolicy(String name) {
|
||||||
if (SystemErasureCodingPolicies.getByName(name) != null) {
|
ErasureCodingPolicy ecPolicy = policiesByName.get(name);
|
||||||
throw new IllegalArgumentException("System erasure coding policy " +
|
if (ecPolicy == null) {
|
||||||
name + " cannot be removed");
|
|
||||||
}
|
|
||||||
ErasureCodingPolicy policy = userPoliciesByName.get(name);
|
|
||||||
if (policy == null) {
|
|
||||||
throw new IllegalArgumentException("The policy name " +
|
throw new IllegalArgumentException("The policy name " +
|
||||||
name + " does not exists");
|
name + " does not exists");
|
||||||
}
|
}
|
||||||
enabledPoliciesByName.remove(name);
|
|
||||||
removedPoliciesByName.put(name, policy);
|
if (ecPolicy.isSystemPolicy()) {
|
||||||
|
throw new IllegalArgumentException("System erasure coding policy " +
|
||||||
|
name + " cannot be removed");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (enabledPoliciesByName.containsKey(name)) {
|
||||||
|
enabledPoliciesByName.remove(name);
|
||||||
|
enabledPolicies =
|
||||||
|
enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
|
||||||
|
}
|
||||||
|
ecPolicy.setState(ErasureCodingPolicyState.REMOVED);
|
||||||
|
LOG.info("Remove erasure coding policy " + name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
public List<ErasureCodingPolicy> getRemovedPolicies() {
|
public List<ErasureCodingPolicy> getRemovedPolicies() {
|
||||||
return removedPoliciesByName.values().stream().collect(Collectors.toList());
|
ArrayList<ErasureCodingPolicy> removedPolicies =
|
||||||
|
new ArrayList<ErasureCodingPolicy>();
|
||||||
|
for (ErasureCodingPolicy ecPolicy : policiesByName.values()) {
|
||||||
|
if (ecPolicy.isRemoved()) {
|
||||||
|
removedPolicies.add(ecPolicy);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return removedPolicies;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Disable an erasure coding policy by policyName.
|
* Disable an erasure coding policy by policyName.
|
||||||
*/
|
*/
|
||||||
public synchronized void disablePolicy(String name) {
|
public synchronized void disablePolicy(String name) {
|
||||||
ErasureCodingPolicy sysEcPolicy = SystemErasureCodingPolicies
|
ErasureCodingPolicy ecPolicy = policiesByName.get(name);
|
||||||
.getByName(name);
|
if (ecPolicy == null) {
|
||||||
ErasureCodingPolicy userEcPolicy = userPoliciesByName.get(name);
|
|
||||||
LOG.info("Disable the erasure coding policy " + name);
|
|
||||||
if (sysEcPolicy == null &&
|
|
||||||
userEcPolicy == null) {
|
|
||||||
throw new IllegalArgumentException("The policy name " +
|
throw new IllegalArgumentException("The policy name " +
|
||||||
name + " does not exists");
|
name + " does not exists");
|
||||||
}
|
}
|
||||||
|
|
||||||
if(sysEcPolicy != null){
|
if (enabledPoliciesByName.containsKey(name)) {
|
||||||
enabledPoliciesByName.remove(name);
|
enabledPoliciesByName.remove(name);
|
||||||
removedPoliciesByName.put(name, sysEcPolicy);
|
enabledPolicies =
|
||||||
}
|
enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
|
||||||
if(userEcPolicy != null){
|
|
||||||
enabledPoliciesByName.remove(name);
|
|
||||||
removedPoliciesByName.put(name, userEcPolicy);
|
|
||||||
}
|
}
|
||||||
|
ecPolicy.setState(ErasureCodingPolicyState.DISABLED);
|
||||||
|
LOG.info("Disable the erasure coding policy " + name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enable an erasure coding policy by policyName.
|
* Enable an erasure coding policy by policyName.
|
||||||
*/
|
*/
|
||||||
public synchronized void enablePolicy(String name) {
|
public synchronized void enablePolicy(String name) {
|
||||||
ErasureCodingPolicy sysEcPolicy = SystemErasureCodingPolicies
|
ErasureCodingPolicy ecPolicy = policiesByName.get(name);
|
||||||
.getByName(name);
|
if (ecPolicy == null) {
|
||||||
ErasureCodingPolicy userEcPolicy = userPoliciesByName.get(name);
|
|
||||||
LOG.info("Enable the erasure coding policy " + name);
|
|
||||||
if (sysEcPolicy == null &&
|
|
||||||
userEcPolicy == null) {
|
|
||||||
throw new IllegalArgumentException("The policy name " +
|
throw new IllegalArgumentException("The policy name " +
|
||||||
name + " does not exists");
|
name + " does not exists");
|
||||||
}
|
}
|
||||||
|
|
||||||
if(sysEcPolicy != null){
|
enabledPoliciesByName.put(name, ecPolicy);
|
||||||
enabledPoliciesByName.put(name, sysEcPolicy);
|
ecPolicy.setState(ErasureCodingPolicyState.ENABLED);
|
||||||
removedPoliciesByName.remove(name);
|
enabledPolicies =
|
||||||
}
|
enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
|
||||||
if(userEcPolicy != null) {
|
LOG.info("Enable the erasure coding policy " + name);
|
||||||
enabledPoliciesByName.put(name, userEcPolicy);
|
|
||||||
removedPoliciesByName.remove(name);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,23 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.collect.Lists;
|
||||||
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
import org.apache.hadoop.fs.XAttr;
|
||||||
|
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.io.WritableUtils;
|
||||||
|
import org.apache.hadoop.io.erasurecode.CodecRegistry;
|
||||||
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
|
@ -29,24 +46,6 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
|
||||||
import com.google.common.collect.Lists;
|
|
||||||
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
|
||||||
import org.apache.hadoop.fs.XAttr;
|
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
|
|
||||||
import org.apache.hadoop.io.IOUtils;
|
|
||||||
import org.apache.hadoop.io.WritableUtils;
|
|
||||||
import org.apache.hadoop.io.erasurecode.CodecRegistry;
|
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERASURECODING_POLICY;
|
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERASURECODING_POLICY;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -341,7 +340,7 @@ final class FSDirErasureCodingOp {
|
||||||
static ErasureCodingPolicy[] getErasureCodingPolicies(final FSNamesystem fsn)
|
static ErasureCodingPolicy[] getErasureCodingPolicies(final FSNamesystem fsn)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert fsn.hasReadLock();
|
assert fsn.hasReadLock();
|
||||||
return fsn.getErasureCodingPolicyManager().getEnabledPolicies();
|
return fsn.getErasureCodingPolicyManager().getPolicies();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -20,7 +20,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.Configured;
|
import org.apache.hadoop.conf.Configured;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
|
import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
@ -100,9 +99,7 @@ public class ECAdmin extends Configured implements Tool {
|
||||||
@Override
|
@Override
|
||||||
public String getLongUsage() {
|
public String getLongUsage() {
|
||||||
return getShortUsage() + "\n" +
|
return getShortUsage() + "\n" +
|
||||||
"Get the list of enabled erasure coding policies.\n" +
|
"Get the list of all erasure coding policies.\n";
|
||||||
"Policies can be enabled on the NameNode via `" +
|
|
||||||
DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY + "`.\n";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -117,17 +114,13 @@ public class ECAdmin extends Configured implements Tool {
|
||||||
Collection<ErasureCodingPolicy> policies =
|
Collection<ErasureCodingPolicy> policies =
|
||||||
dfs.getAllErasureCodingPolicies();
|
dfs.getAllErasureCodingPolicies();
|
||||||
if (policies.isEmpty()) {
|
if (policies.isEmpty()) {
|
||||||
System.out.println("No erasure coding policies are enabled on the " +
|
System.out.println("There is no erasure coding policies in the " +
|
||||||
"cluster.");
|
"cluster.");
|
||||||
System.out.println("The set of enabled policies can be " +
|
|
||||||
"configured at '" +
|
|
||||||
DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY + "' on the " +
|
|
||||||
"NameNode.");
|
|
||||||
} else {
|
} else {
|
||||||
System.out.println("Erasure Coding Policies:");
|
System.out.println("Erasure Coding Policies:");
|
||||||
for (ErasureCodingPolicy policy : policies) {
|
for (ErasureCodingPolicy policy : policies) {
|
||||||
if (policy != null) {
|
if (policy != null) {
|
||||||
System.out.println("\t" + policy.getName());
|
System.out.println(policy.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -192,7 +192,7 @@ Below are the details about each command.
|
||||||
|
|
||||||
* `[-listPolicies]`
|
* `[-listPolicies]`
|
||||||
|
|
||||||
Lists the set of enabled erasure coding policies. These names are suitable for use with the `setPolicy` command.
|
Lists all (enabled, disabled and removed) erasure coding policies registered in HDFS. Only the enabled policies are suitable for use with the `setPolicy` command.
|
||||||
|
|
||||||
* `[-addPolicies -policyFile <file>]`
|
* `[-addPolicies -policyFile <file>]`
|
||||||
|
|
||||||
|
|
|
@ -1608,8 +1608,6 @@ public class TestDistributedFileSystem {
|
||||||
assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
|
assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
|
||||||
getByName(policyName).getName());
|
getByName(policyName).getName());
|
||||||
fs.disableErasureCodingPolicy(policyName);
|
fs.disableErasureCodingPolicy(policyName);
|
||||||
assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
|
|
||||||
getRemovedPolicies().get(0).getName());
|
|
||||||
fs.enableErasureCodingPolicy(policyName);
|
fs.enableErasureCodingPolicy(policyName);
|
||||||
assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
|
assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
|
||||||
getByName(policyName).getName());
|
getByName(policyName).getName());
|
||||||
|
|
|
@ -211,7 +211,8 @@ public class TestErasureCodingPolicies {
|
||||||
|
|
||||||
// Only default policy should be enabled after restart
|
// Only default policy should be enabled after restart
|
||||||
Assert.assertEquals("Only default policy should be enabled after restart",
|
Assert.assertEquals("Only default policy should be enabled after restart",
|
||||||
1, fs.getAllErasureCodingPolicies().size());
|
1,
|
||||||
|
ErasureCodingPolicyManager.getInstance().getEnabledPolicies().length);
|
||||||
|
|
||||||
// Already set directory-level policies should still be in effect
|
// Already set directory-level policies should still be in effect
|
||||||
Path disabledPolicy = new Path(dir1, "afterDisabled");
|
Path disabledPolicy = new Path(dir1, "afterDisabled");
|
||||||
|
@ -383,6 +384,18 @@ public class TestErasureCodingPolicies {
|
||||||
.getAllErasureCodingPolicies();
|
.getAllErasureCodingPolicies();
|
||||||
assertTrue("All system policies should be enabled",
|
assertTrue("All system policies should be enabled",
|
||||||
allECPolicies.containsAll(SystemErasureCodingPolicies.getPolicies()));
|
allECPolicies.containsAll(SystemErasureCodingPolicies.getPolicies()));
|
||||||
|
|
||||||
|
// Query after add a new policy
|
||||||
|
ECSchema toAddSchema = new ECSchema("rs", 5, 2);
|
||||||
|
ErasureCodingPolicy newPolicy =
|
||||||
|
new ErasureCodingPolicy(toAddSchema, 128 * 1024);
|
||||||
|
ErasureCodingPolicy[] policyArray = new ErasureCodingPolicy[]{newPolicy};
|
||||||
|
fs.addErasureCodingPolicies(policyArray);
|
||||||
|
allECPolicies = fs.getAllErasureCodingPolicies();
|
||||||
|
assertEquals("Should return new added policy",
|
||||||
|
SystemErasureCodingPolicies.getPolicies().size() + 1,
|
||||||
|
allECPolicies.size());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -17,15 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
|
|
||||||
import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -42,6 +33,7 @@ import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.BlockType;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
@ -50,14 +42,23 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockType;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.rules.ExpectedException;
|
import org.junit.rules.ExpectedException;
|
||||||
import org.junit.rules.Timeout;
|
import org.junit.rules.Timeout;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
|
||||||
|
import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class tests INodeFile with striped feature.
|
* This class tests INodeFile with striped feature.
|
||||||
*/
|
*/
|
||||||
|
@ -88,6 +89,12 @@ public class TestStripedINodeFile {
|
||||||
@Rule
|
@Rule
|
||||||
public ExpectedException thrown = ExpectedException.none();
|
public ExpectedException thrown = ExpectedException.none();
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void init() {
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
ErasureCodingPolicyManager.getInstance().init(conf);
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testInvalidECPolicy() throws IllegalArgumentException {
|
public void testInvalidECPolicy() throws IllegalArgumentException {
|
||||||
thrown.expect(IllegalArgumentException.class);
|
thrown.expect(IllegalArgumentException.class);
|
||||||
|
|
|
@ -135,7 +135,7 @@
|
||||||
<comparators>
|
<comparators>
|
||||||
<comparator>
|
<comparator>
|
||||||
<type>SubstringComparator</type>
|
<type>SubstringComparator</type>
|
||||||
<expected-output>Get the list of enabled erasure coding policies</expected-output>
|
<expected-output>Get the list of all erasure coding policies</expected-output>
|
||||||
</comparator>
|
</comparator>
|
||||||
<comparator>
|
<comparator>
|
||||||
<type>SubstringComparator</type>
|
<type>SubstringComparator</type>
|
||||||
|
@ -410,6 +410,25 @@
|
||||||
</comparators>
|
</comparators>
|
||||||
</test>
|
</test>
|
||||||
|
|
||||||
|
<test>
|
||||||
|
<description>listPolicies : get the list of ECPolicies supported</description>
|
||||||
|
<test-commands>
|
||||||
|
<ec-admin-command>-fs NAMENODE -listPolicies</ec-admin-command>
|
||||||
|
</test-commands>
|
||||||
|
<cleanup-commands>
|
||||||
|
</cleanup-commands>
|
||||||
|
<comparators>
|
||||||
|
<comparator>
|
||||||
|
<type>SubstringComparator</type>
|
||||||
|
<expected-output>XOR-2-1-128k</expected-output>
|
||||||
|
</comparator>
|
||||||
|
<comparator>
|
||||||
|
<type>SubstringComparator</type>
|
||||||
|
<expected-output>State=DISABLED</expected-output>
|
||||||
|
</comparator>
|
||||||
|
</comparators>
|
||||||
|
</test>
|
||||||
|
|
||||||
<test>
|
<test>
|
||||||
<description>enablePolicy : enable the erasure coding policy</description>
|
<description>enablePolicy : enable the erasure coding policy</description>
|
||||||
<test-commands>
|
<test-commands>
|
||||||
|
|
Loading…
Reference in New Issue