HDFS-15063. HttpFS: getFileStatus doesn't return ecPolicy. Contributed by hemanthboyina.
This commit is contained in:
parent
62423910a4
commit
074050ca59
|
@ -115,7 +115,7 @@ public class JsonUtilClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Convert a Json map to a HdfsFileStatus object. */
|
/** Convert a Json map to a HdfsFileStatus object. */
|
||||||
static HdfsFileStatus toFileStatus(final Map<?, ?> json,
|
public static HdfsFileStatus toFileStatus(final Map<?, ?> json,
|
||||||
boolean includesType) {
|
boolean includesType) {
|
||||||
if (json == null) {
|
if (json == null) {
|
||||||
return null;
|
return null;
|
||||||
|
@ -158,11 +158,12 @@ public class JsonUtilClient {
|
||||||
if (ecPolicyObj != null) {
|
if (ecPolicyObj != null) {
|
||||||
Map<String, String> extraOptions = (Map) ecPolicyObj.get("extraOptions");
|
Map<String, String> extraOptions = (Map) ecPolicyObj.get("extraOptions");
|
||||||
ECSchema ecSchema = new ECSchema((String) ecPolicyObj.get("codecName"),
|
ECSchema ecSchema = new ECSchema((String) ecPolicyObj.get("codecName"),
|
||||||
(int) ecPolicyObj.get("numDataUnits"),
|
(int) ((Number) ecPolicyObj.get("numDataUnits")).longValue(),
|
||||||
(int) ecPolicyObj.get("numParityUnits"), extraOptions);
|
(int) ((Number) ecPolicyObj.get("numParityUnits")).longValue(),
|
||||||
|
extraOptions);
|
||||||
ecPolicy = new ErasureCodingPolicy((String) ecPolicyObj.get("name"),
|
ecPolicy = new ErasureCodingPolicy((String) ecPolicyObj.get("name"),
|
||||||
ecSchema, (int) ecPolicyObj.get("cellSize"),
|
ecSchema, (int) ((Number) ecPolicyObj.get("cellSize")).longValue(),
|
||||||
(byte) (int) ecPolicyObj.get("id"));
|
(byte) (int) ((Number) ecPolicyObj.get("id")).longValue());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
|
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||||
import org.apache.hadoop.hdfs.web.JsonUtilClient;
|
import org.apache.hadoop.hdfs.web.JsonUtilClient;
|
||||||
|
@ -188,10 +189,12 @@ public class HttpFSFileSystem extends FileSystem
|
||||||
public static final String FILE_ID_JSON = "fileId";
|
public static final String FILE_ID_JSON = "fileId";
|
||||||
public static final String REPLICATION_JSON = "replication";
|
public static final String REPLICATION_JSON = "replication";
|
||||||
public static final String STORAGEPOLICY_JSON = "storagePolicy";
|
public static final String STORAGEPOLICY_JSON = "storagePolicy";
|
||||||
|
public static final String ECPOLICYNAME_JSON = "ecPolicy";
|
||||||
public static final String XATTRS_JSON = "XAttrs";
|
public static final String XATTRS_JSON = "XAttrs";
|
||||||
public static final String XATTR_NAME_JSON = "name";
|
public static final String XATTR_NAME_JSON = "name";
|
||||||
public static final String XATTR_VALUE_JSON = "value";
|
public static final String XATTR_VALUE_JSON = "value";
|
||||||
public static final String XATTRNAMES_JSON = "XAttrNames";
|
public static final String XATTRNAMES_JSON = "XAttrNames";
|
||||||
|
public static final String ECPOLICY_JSON = "ecPolicyObj";
|
||||||
|
|
||||||
public static final String FILE_CHECKSUM_JSON = "FileChecksum";
|
public static final String FILE_CHECKSUM_JSON = "FileChecksum";
|
||||||
public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
|
public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
|
||||||
|
@ -846,9 +849,8 @@ public class HttpFSFileSystem extends FileSystem
|
||||||
params, f, true);
|
params, f, true);
|
||||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||||
json = (JSONObject) json.get(FILE_STATUS_JSON);
|
HdfsFileStatus status = JsonUtilClient.toFileStatus(json, true);
|
||||||
f = makeQualified(f);
|
return status.makeQualified(getUri(), f);
|
||||||
return createFileStatus(f, json);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -130,6 +130,13 @@ public class FSOperations {
|
||||||
hdfsFileStatus.getFileId());
|
hdfsFileStatus.getFileId());
|
||||||
json.put(HttpFSFileSystem.STORAGEPOLICY_JSON,
|
json.put(HttpFSFileSystem.STORAGEPOLICY_JSON,
|
||||||
hdfsFileStatus.getStoragePolicy());
|
hdfsFileStatus.getStoragePolicy());
|
||||||
|
if (hdfsFileStatus.getErasureCodingPolicy() != null) {
|
||||||
|
json.put(HttpFSFileSystem.ECPOLICYNAME_JSON,
|
||||||
|
hdfsFileStatus.getErasureCodingPolicy().getName());
|
||||||
|
json.put(HttpFSFileSystem.ECPOLICY_JSON,
|
||||||
|
JsonUtil.getEcPolicyAsMap(
|
||||||
|
hdfsFileStatus.getErasureCodingPolicy()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (fileStatus.getPermission().getAclBit()) {
|
if (fileStatus.getPermission().getAclBit()) {
|
||||||
json.put(HttpFSFileSystem.ACL_BIT_JSON, true);
|
json.put(HttpFSFileSystem.ACL_BIT_JSON, true);
|
||||||
|
|
|
@ -17,11 +17,18 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs.http.server;
|
package org.apache.hadoop.fs.http.server;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
|
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
|
||||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
||||||
import org.apache.hadoop.hdfs.web.JsonUtil;
|
import org.apache.hadoop.hdfs.web.JsonUtil;
|
||||||
import org.apache.hadoop.lib.service.FileSystemAccess;
|
import org.apache.hadoop.lib.service.FileSystemAccess;
|
||||||
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
|
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
|
||||||
|
@ -42,6 +49,7 @@ import java.io.InputStreamReader;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.io.Writer;
|
import java.io.Writer;
|
||||||
import java.net.HttpURLConnection;
|
import java.net.HttpURLConnection;
|
||||||
|
import java.net.URI;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.nio.charset.Charset;
|
import java.nio.charset.Charset;
|
||||||
import java.text.MessageFormat;
|
import java.text.MessageFormat;
|
||||||
|
@ -65,6 +73,7 @@ import org.apache.hadoop.fs.permission.AclEntryType;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
||||||
|
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||||
import org.apache.hadoop.lib.server.Service;
|
import org.apache.hadoop.lib.server.Service;
|
||||||
import org.apache.hadoop.lib.server.ServiceException;
|
import org.apache.hadoop.lib.server.ServiceException;
|
||||||
import org.apache.hadoop.lib.service.Groups;
|
import org.apache.hadoop.lib.service.Groups;
|
||||||
|
@ -1688,4 +1697,45 @@ public class TestHttpFSServer extends HFSTestCase {
|
||||||
sendRequestToHttpFSServer(dir, "CHECKACCESS", "fsaction=-w-");
|
sendRequestToHttpFSServer(dir, "CHECKACCESS", "fsaction=-w-");
|
||||||
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn1.getResponseCode());
|
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn1.getResponseCode());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@TestDir
|
||||||
|
@TestJetty
|
||||||
|
@TestHdfs
|
||||||
|
public void testECPolicy() throws Exception {
|
||||||
|
createHttpFSServer(false, false);
|
||||||
|
final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
|
||||||
|
.getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
|
||||||
|
final String ecPolicyName = ecPolicy.getName();
|
||||||
|
// Create an EC dir and write a test file in it
|
||||||
|
final Path ecDir = new Path("/ec");
|
||||||
|
|
||||||
|
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
|
||||||
|
.get(ecDir.toUri(), TestHdfsHelper.getHdfsConf());
|
||||||
|
Path ecFile = new Path(ecDir, "ec_file.txt");
|
||||||
|
dfs.mkdirs(ecDir);
|
||||||
|
dfs.enableErasureCodingPolicy(ecPolicyName);
|
||||||
|
dfs.setErasureCodingPolicy(ecDir, ecPolicyName);
|
||||||
|
// Create a EC file
|
||||||
|
DFSTestUtil.createFile(dfs, ecFile, 1024, (short) 1, 0);
|
||||||
|
|
||||||
|
// Verify that ecPolicy is set in getFileStatus response for ecFile
|
||||||
|
String getFileStatusResponse =
|
||||||
|
getStatus(ecFile.toString(), "GETFILESTATUS");
|
||||||
|
JSONParser parser = new JSONParser();
|
||||||
|
JSONObject jsonObject = (JSONObject) parser.parse(getFileStatusResponse);
|
||||||
|
JSONObject details = (JSONObject) jsonObject.get("FileStatus");
|
||||||
|
String ecpolicyForECfile = (String) details.get("ecPolicy");
|
||||||
|
assertEquals("EC policy for ecFile should match the set EC policy",
|
||||||
|
ecpolicyForECfile, ecPolicyName);
|
||||||
|
|
||||||
|
// Verify httpFs getFileStatus with WEBHDFS REST API
|
||||||
|
WebHdfsFileSystem httpfsWebHdfs = (WebHdfsFileSystem) FileSystem.get(
|
||||||
|
new URI("webhdfs://"
|
||||||
|
+ TestJettyHelper.getJettyURL().toURI().getAuthority()),
|
||||||
|
TestHdfsHelper.getHdfsConf());
|
||||||
|
HdfsFileStatus httpfsFileStatus =
|
||||||
|
(HdfsFileStatus) httpfsWebHdfs.getFileStatus(ecFile);
|
||||||
|
assertNotNull(httpfsFileStatus.getErasureCodingPolicy());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -157,7 +157,7 @@ public class JsonUtil {
|
||||||
return m;
|
return m;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Map<String, Object> getEcPolicyAsMap(
|
public static Map<String, Object> getEcPolicyAsMap(
|
||||||
final ErasureCodingPolicy ecPolicy) {
|
final ErasureCodingPolicy ecPolicy) {
|
||||||
/** Convert an ErasureCodingPolicy to a map. */
|
/** Convert an ErasureCodingPolicy to a map. */
|
||||||
ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
|
ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
|
||||||
|
|
Loading…
Reference in New Issue