HDFS-11598. Improve -setrep for Erasure Coded files. Contributed by Yiqun Lin.

This commit is contained in:
Wei-Chiu Chuang 2017-04-03 07:57:28 -07:00
parent bc7aff7cec
commit bbd68478d5
2 changed files with 57 additions and 4 deletions

View File

@ -85,11 +85,20 @@ class SetReplication extends FsCommand {
} }
if (item.stat.isFile()) { if (item.stat.isFile()) {
if (!item.fs.setReplication(item.path, newRep)) { // Do the checking if the file is erasure coded since
throw new IOException("Could not set replication for: " + item); // replication factor for an EC file is meaningless.
if (!item.stat.isErasureCoded()) {
if (!item.fs.setReplication(item.path, newRep)) {
throw new IOException("Could not set replication for: " + item);
}
out.println("Replication " + newRep + " set: " + item);
if (waitOpt) {
waitList.add(item);
}
} else {
out.println("Did not set replication for: " + item
+ ", because it's an erasure coded file.");
} }
out.println("Replication " + newRep + " set: " + item);
if (waitOpt) waitList.add(item);
} }
} }

View File

@ -20,7 +20,9 @@ package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
@ -28,6 +30,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.junit.Test; import org.junit.Test;
@ -102,4 +105,45 @@ public class TestSetrepIncreasing {
cluster.shutdown(); cluster.shutdown();
} }
} }
@Test
public void testSetRepOnECFile() throws Exception {
ClientProtocol client;
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
StripedFileTestUtil.getDefaultECPolicy().getName());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.waitActive();
client = NameNodeProxies.createProxy(conf,
cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
client.setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
FileSystem dfs = cluster.getFileSystem();
try {
Path d = new Path("/tmp");
dfs.mkdirs(d);
Path f = new Path(d, "foo");
dfs.createNewFile(f);
FileStatus file = dfs.getFileStatus(f);
assertTrue(file.isErasureCoded());
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
String[] args = {"-setrep", "2", "" + f};
FsShell shell = new FsShell();
shell.setConf(conf);
assertEquals(0, shell.run(args));
assertTrue(
out.toString().contains("Did not set replication for: /tmp/foo"));
// verify the replication factor of the EC file
file = dfs.getFileStatus(f);
assertEquals(1, file.getReplication());
} finally {
dfs.close();
cluster.shutdown();
}
}
} }