HADOOP-10882. Move DirectBufferPool into common util. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1613005 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2014-07-24 06:21:52 +00:00
parent 087a2acb8b
commit 8f8a933ca9
6 changed files with 11 additions and 7 deletions

View File

@ -47,6 +47,8 @@ Release 2.6.0 - UNRELEASED
HADOOP-10887. Add XAttrs to ViewFs and make XAttrs + ViewFileSystem HADOOP-10887. Add XAttrs to ViewFs and make XAttrs + ViewFileSystem
internal dir behavior consistent. (Stephen Chu via wang) internal dir behavior consistent. (Stephen Chu via wang)
HADOOP-10882. Move DirectBufferPool into common util. (todd)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.util; package org.apache.hadoop.util;
import java.lang.ref.WeakReference; import java.lang.ref.WeakReference;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
@ -27,6 +27,7 @@ import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceStability;
/** /**
* A simple class for pooling direct ByteBuffers. This is necessary * A simple class for pooling direct ByteBuffers. This is necessary
@ -40,7 +41,8 @@ import com.google.common.annotations.VisibleForTesting;
* allocated at the same size. There is no attempt to reuse larger * allocated at the same size. There is no attempt to reuse larger
* buffers to satisfy smaller allocations. * buffers to satisfy smaller allocations.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class DirectBufferPool { public class DirectBufferPool {
// Essentially implement a multimap with weak values. // Essentially implement a multimap with weak values.

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.util; package org.apache.hadoop.util;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertNotSame;
@ -29,7 +29,7 @@ import org.junit.Test;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
public class TestDirectBufferPool { public class TestDirectBufferPool {
final DirectBufferPool pool = new DirectBufferPool(); final org.apache.hadoop.util.DirectBufferPool pool = new org.apache.hadoop.util.DirectBufferPool();
@Test @Test
public void testBasics() { public void testBasics() {

View File

@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
import org.apache.hadoop.hdfs.util.DirectBufferPool; import org.apache.hadoop.util.DirectBufferPool;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;

View File

@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
import org.apache.hadoop.hdfs.util.DirectBufferPool; import org.apache.hadoop.util.DirectBufferPool;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;

View File

@ -27,7 +27,7 @@ import java.nio.channels.ReadableByteChannel;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.util.DirectBufferPool; import org.apache.hadoop.util.DirectBufferPool;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;