diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index 2e7372b1620..fbc43246ff1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.web; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary.Builder; import org.apache.hadoop.fs.FileChecksum; @@ -589,35 +588,4 @@ class JsonUtilClient { lastLocatedBlock, isLastBlockComplete, null); } - /** Convert a Json map to BlockLocation. **/ - static BlockLocation toBlockLocation(Map m) - throws IOException{ - long length = ((Number) m.get("length")).longValue(); - long offset = ((Number) m.get("offset")).longValue(); - boolean corrupt = Boolean. - getBoolean(m.get("corrupt").toString()); - String[] storageIds = toStringArray(getList(m, "storageIds")); - String[] cachedHosts = toStringArray(getList(m, "cachedHosts")); - String[] hosts = toStringArray(getList(m, "hosts")); - String[] names = toStringArray(getList(m, "names")); - String[] topologyPaths = toStringArray(getList(m, "topologyPaths")); - StorageType[] storageTypes = toStorageTypeArray( - getList(m, "storageTypes")); - return new BlockLocation(names, hosts, cachedHosts, - topologyPaths, storageIds, storageTypes, - offset, length, corrupt); - } - - static String[] toStringArray(List list) { - if (list == null) { - return null; - } else { - final String[] array = new String[list.size()]; - int i = 0; - for (Object object : list) { - array[i++] = object.toString(); - } - return array; - } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index cd7ca749bf5..c0d6de92ccd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -1597,20 +1597,13 @@ public class WebHdfsFileSystem extends FileSystem statistics.incrementReadOps(1); storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS); - final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS; + final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS; return new FsPathResponseRunner(op, p, new OffsetParam(offset), new LengthParam(length)) { @Override - @SuppressWarnings("unchecked") BlockLocation[] decodeResponse(Map json) throws IOException { - List list = JsonUtilClient.getList(json, "BlockLocations"); - BlockLocation[] locations = new BlockLocation[list.size()]; - for(int i=0; i) list.get(i)); - locations[i] = bl; - } - return locations; + return DFSUtilClient.locatedBlocks2Locations( + JsonUtilClient.toLocatedBlocks(json)); } }.run(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java index ccb0bb3dcec..635e6d7e8ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java @@ -33,18 +33,8 @@ public class GetOpParam extends HttpOpParam { GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK), GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true), - /** - * GET_BLOCK_LOCATIONS is a private/stable API op. It returns a - * {@link org.apache.hadoop.hdfs.protocol.LocatedBlocks} - * json object. - */ + /** GET_BLOCK_LOCATIONS is a private unstable op. */ GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK), - /** - * GETFILEBLOCKLOCATIONS is the public op that complies with - * {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations} - * interface. - */ - GETFILEBLOCKLOCATIONS(false, HttpURLConnection.HTTP_OK), GETACLSTATUS(false, HttpURLConnection.HTTP_OK), GETXATTRS(false, HttpURLConnection.HTTP_OK), GETTRASHROOT(false, HttpURLConnection.HTTP_OK), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 91c98991882..80bff881771 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -54,7 +54,6 @@ import javax.ws.rs.core.StreamingOutput; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -924,22 +923,6 @@ public class NamenodeWebHdfsMethods { return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } } - case GETFILEBLOCKLOCATIONS: - { - final long offsetValue = offset.getValue(); - final Long lengthValue = length.getValue(); - - try (final FileSystem fs = FileSystem.get(conf != null ? - conf : new Configuration())) { - BlockLocation[] locations = fs.getFileBlockLocations( - new org.apache.hadoop.fs.Path(fullpath), - offsetValue, - lengthValue != null? lengthValue: Long.MAX_VALUE); - final String js = JsonUtil.toJsonString("BlockLocations", - JsonUtil.toJsonArray(locations)); - return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); - } - } case GET_BLOCK_LOCATIONS: { final long offsetValue = offset.getValue(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 43fa1327d10..ac9ab77dd6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -436,34 +436,4 @@ public class JsonUtil { return MAPPER.writeValueAsString(obj); } - public static Object[] toJsonArray(BlockLocation[] locations) - throws IOException { - if(locations == null) { - return null; - } - Object[] blockLocations = new Object[locations.length]; - for(int i=0; i toJsonMap( - final BlockLocation blockLocation) throws IOException { - if (blockLocation == null) { - return null; - } - - final Map m = new TreeMap(); - m.put("length", blockLocation.getLength()); - m.put("offset", blockLocation.getOffset()); - m.put("corrupt", blockLocation.isCorrupt()); - m.put("storageTypes", toJsonArray(blockLocation.getStorageTypes())); - m.put("storageIds", blockLocation.getStorageIds()); - m.put("cachedHosts", blockLocation.getCachedHosts()); - m.put("hosts", blockLocation.getHosts()); - m.put("names", blockLocation.getNames()); - m.put("topologyPaths", blockLocation.getTopologyPaths()); - return m; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index bee978af517..7ac928ec414 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -37,7 +37,6 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.security.PrivilegedExceptionAction; -import java.util.Map; import java.util.Random; import org.apache.commons.io.IOUtils; @@ -82,8 +81,6 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; -import org.codehaus.jackson.map.ObjectMapper; -import org.codehaus.jackson.map.type.MapType; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.junit.Assert; @@ -794,46 +791,6 @@ public class TestWebHDFS { Assert.assertTrue(storageTypes != null && storageTypes.length > 0 && storageTypes[0] == StorageType.DISK); } - - // Query webhdfs REST API to get block locations - InetSocketAddress addr = cluster.getNameNode().getHttpAddress(); - URL url = new URL("http", addr.getHostString(), addr.getPort(), - WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS"); - LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url); - - String response = getResponse(url, "GET"); - LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response); - // Expected output from rest API - // { "BlockLoactions" : [{Block_Loation_Json}, ...] } - ObjectMapper mapper = new ObjectMapper(); - MapType jsonType = mapper.getTypeFactory().constructMapType( - Map.class, - String.class, - BlockLocation[].class); - Map jsonMap = mapper.readValue(response, - jsonType); - BlockLocation[] array = jsonMap.get("BlockLocations"); - - for(int i=0; i