HDFS-6040. fix DFSClient issue without libhadoop.so and some other ShortCircuitShm cleanups (cmccabe)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1573885 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b44dfd6725
commit
c1db6bf511
|
@ -81,6 +81,10 @@ public final class DomainSocketWatcher implements Closeable {
|
||||||
*/
|
*/
|
||||||
private static native void anchorNative();
|
private static native void anchorNative();
|
||||||
|
|
||||||
|
public static String getLoadingFailureReason() {
|
||||||
|
return loadingFailureReason;
|
||||||
|
}
|
||||||
|
|
||||||
public interface Handler {
|
public interface Handler {
|
||||||
/**
|
/**
|
||||||
* Handles an event on a socket. An event may be the socket becoming
|
* Handles an event on a socket. An event may be the socket becoming
|
||||||
|
@ -244,7 +248,9 @@ public final class DomainSocketWatcher implements Closeable {
|
||||||
lock.lock();
|
lock.lock();
|
||||||
try {
|
try {
|
||||||
if (closed) return;
|
if (closed) return;
|
||||||
LOG.info(this + ": closing");
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug(this + ": closing");
|
||||||
|
}
|
||||||
closed = true;
|
closed = true;
|
||||||
} finally {
|
} finally {
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
|
@ -390,8 +396,10 @@ public final class DomainSocketWatcher implements Closeable {
|
||||||
final Thread watcherThread = new Thread(new Runnable() {
|
final Thread watcherThread = new Thread(new Runnable() {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
LOG.info(this + ": starting with interruptCheckPeriodMs = " +
|
if (LOG.isDebugEnabled()) {
|
||||||
interruptCheckPeriodMs);
|
LOG.debug(this + ": starting with interruptCheckPeriodMs = " +
|
||||||
|
interruptCheckPeriodMs);
|
||||||
|
}
|
||||||
final TreeMap<Integer, Entry> entries = new TreeMap<Integer, Entry>();
|
final TreeMap<Integer, Entry> entries = new TreeMap<Integer, Entry>();
|
||||||
FdSet fdSet = new FdSet();
|
FdSet fdSet = new FdSet();
|
||||||
addNotificationSocket(entries, fdSet);
|
addNotificationSocket(entries, fdSet);
|
||||||
|
@ -431,7 +439,9 @@ public final class DomainSocketWatcher implements Closeable {
|
||||||
// toRemove are now empty and processedCond has been notified if it
|
// toRemove are now empty and processedCond has been notified if it
|
||||||
// needed to be.
|
// needed to be.
|
||||||
if (closed) {
|
if (closed) {
|
||||||
LOG.info(toString() + " thread terminating.");
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug(toString() + " thread terminating.");
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Check if someone sent our thread an InterruptedException while we
|
// Check if someone sent our thread an InterruptedException while we
|
||||||
|
|
|
@ -273,6 +273,9 @@ Release 2.4.0 - UNRELEASED
|
||||||
HDFS-5866. '-maxSize' and '-step' option fail in OfflineImageViewer.
|
HDFS-5866. '-maxSize' and '-step' option fail in OfflineImageViewer.
|
||||||
(Akira Ajisaka via wheat9)
|
(Akira Ajisaka via wheat9)
|
||||||
|
|
||||||
|
HDFS-6040. fix DFSClient issue without libhadoop.so and some other
|
||||||
|
ShortCircuitShm cleanups (cmccabe)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)
|
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)
|
||||||
|
|
|
@ -165,13 +165,13 @@
|
||||||
<Bug pattern="DM_STRING_CTOR" />
|
<Bug pattern="DM_STRING_CTOR" />
|
||||||
</Match>
|
</Match>
|
||||||
<Match>
|
<Match>
|
||||||
<Class name="org.apache.hadoop.hdfs.client.ClientMmapManager" />
|
<Class name="org.apache.hadoop.hdfs.client.DfsClientShmManager$EndpointShmManager" />
|
||||||
<Method name="create" />
|
<Method name="allocSlot" />
|
||||||
<Bug pattern="UL_UNRELEASED_LOCK_EXCEPTION_PATH" />
|
<Bug pattern="UL_UNRELEASED_LOCK_EXCEPTION_PATH" />
|
||||||
</Match>
|
</Match>
|
||||||
<Match>
|
<Match>
|
||||||
<Class name="org.apache.hadoop.hdfs.client.ClientMmapManager" />
|
<Class name="org.apache.hadoop.hdfs.client.DfsClientShmManager$EndpointShmManager" />
|
||||||
<Method name="create" />
|
<Method name="allocSlot" />
|
||||||
<Bug pattern="UL_UNRELEASED_LOCK" />
|
<Bug pattern="UL_UNRELEASED_LOCK" />
|
||||||
</Match>
|
</Match>
|
||||||
<!-- Manually verified to be okay, we want to throw away the top bit here -->
|
<!-- Manually verified to be okay, we want to throw away the top bit here -->
|
||||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.commons.lang.builder.HashCodeBuilder;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.fs.InvalidRequestException;
|
import org.apache.hadoop.fs.InvalidRequestException;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO.POSIX;
|
import org.apache.hadoop.io.nativeio.NativeIO.POSIX;
|
||||||
import org.apache.hadoop.util.Shell;
|
import org.apache.hadoop.util.Shell;
|
||||||
|
@ -514,7 +513,9 @@ public class ShortCircuitShm {
|
||||||
* @return The base address of the slot.
|
* @return The base address of the slot.
|
||||||
*/
|
*/
|
||||||
private final long calculateSlotAddress(int slotIdx) {
|
private final long calculateSlotAddress(int slotIdx) {
|
||||||
return this.baseAddress + (slotIdx * BYTES_PER_SLOT);
|
long offset = slotIdx;
|
||||||
|
offset *= BYTES_PER_SLOT;
|
||||||
|
return this.baseAddress + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -536,7 +537,6 @@ public class ShortCircuitShm {
|
||||||
slot.makeValid();
|
slot.makeValid();
|
||||||
slots[idx] = slot;
|
slots[idx] = slot;
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
//LOG.trace(this + ": allocAndRegisterSlot " + idx);
|
|
||||||
LOG.trace(this + ": allocAndRegisterSlot " + idx + ": allocatedSlots=" + allocatedSlots +
|
LOG.trace(this + ": allocAndRegisterSlot " + idx + ": allocatedSlots=" + allocatedSlots +
|
||||||
StringUtils.getStackTrace(Thread.currentThread()));
|
StringUtils.getStackTrace(Thread.currentThread()));
|
||||||
}
|
}
|
||||||
|
@ -567,6 +567,14 @@ public class ShortCircuitShm {
|
||||||
*/
|
*/
|
||||||
synchronized public final Slot registerSlot(int slotIdx,
|
synchronized public final Slot registerSlot(int slotIdx,
|
||||||
ExtendedBlockId blockId) throws InvalidRequestException {
|
ExtendedBlockId blockId) throws InvalidRequestException {
|
||||||
|
if (slotIdx < 0) {
|
||||||
|
throw new InvalidRequestException(this + ": invalid negative slot " +
|
||||||
|
"index " + slotIdx);
|
||||||
|
}
|
||||||
|
if (slotIdx >= slots.length) {
|
||||||
|
throw new InvalidRequestException(this + ": invalid slot " +
|
||||||
|
"index " + slotIdx);
|
||||||
|
}
|
||||||
if (allocatedSlots.get(slotIdx)) {
|
if (allocatedSlots.get(slotIdx)) {
|
||||||
throw new InvalidRequestException(this + ": slot " + slotIdx +
|
throw new InvalidRequestException(this + ": slot " + slotIdx +
|
||||||
" is already in use.");
|
" is already in use.");
|
||||||
|
@ -579,7 +587,6 @@ public class ShortCircuitShm {
|
||||||
slots[slotIdx] = slot;
|
slots[slotIdx] = slot;
|
||||||
allocatedSlots.set(slotIdx, true);
|
allocatedSlots.set(slotIdx, true);
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
//LOG.trace(this + ": registerSlot " + slotIdx);
|
|
||||||
LOG.trace(this + ": registerSlot " + slotIdx + ": allocatedSlots=" + allocatedSlots +
|
LOG.trace(this + ": registerSlot " + slotIdx + ": allocatedSlots=" + allocatedSlots +
|
||||||
StringUtils.getStackTrace(Thread.currentThread()));
|
StringUtils.getStackTrace(Thread.currentThread()));
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.ipc.RetriableException;
|
import org.apache.hadoop.ipc.RetriableException;
|
||||||
import org.apache.hadoop.net.unix.DomainSocket;
|
import org.apache.hadoop.net.unix.DomainSocket;
|
||||||
|
import org.apache.hadoop.net.unix.DomainSocketWatcher;
|
||||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
@ -375,7 +376,8 @@ public class ShortCircuitCache implements Closeable {
|
||||||
this.mmapRetryTimeoutMs = mmapRetryTimeoutMs;
|
this.mmapRetryTimeoutMs = mmapRetryTimeoutMs;
|
||||||
this.staleThresholdMs = staleThresholdMs;
|
this.staleThresholdMs = staleThresholdMs;
|
||||||
DfsClientShmManager shmManager = null;
|
DfsClientShmManager shmManager = null;
|
||||||
if (shmInterruptCheckMs > 0) {
|
if ((shmInterruptCheckMs > 0) &&
|
||||||
|
(DomainSocketWatcher.getLoadingFailureReason() == null)) {
|
||||||
try {
|
try {
|
||||||
shmManager = new DfsClientShmManager(shmInterruptCheckMs);
|
shmManager = new DfsClientShmManager(shmInterruptCheckMs);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -0,0 +1,109 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.client;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FileInputStream;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Iterator;
|
||||||
|
|
||||||
|
import org.apache.commons.lang.SystemUtils;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
|
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||||
|
import org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory;
|
||||||
|
import org.apache.hadoop.hdfs.ExtendedBlockId;
|
||||||
|
import org.apache.hadoop.hdfs.ShortCircuitShm;
|
||||||
|
import org.apache.hadoop.hdfs.ShortCircuitShm.ShmId;
|
||||||
|
import org.apache.hadoop.hdfs.ShortCircuitShm.Slot;
|
||||||
|
import org.junit.Assume;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.Assert;
|
||||||
|
|
||||||
|
public class TestShortCircuitShm {
|
||||||
|
public static final Log LOG = LogFactory.getLog(TestShortCircuitShm.class);
|
||||||
|
|
||||||
|
private static final File TEST_BASE =
|
||||||
|
new File(System.getProperty("test.build.data", "/tmp"));
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void before() {
|
||||||
|
Assume.assumeTrue(NativeIO.isAvailable());
|
||||||
|
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=60000)
|
||||||
|
public void testStartupShutdown() throws Exception {
|
||||||
|
File path = new File(TEST_BASE, "testStartupShutdown");
|
||||||
|
path.mkdirs();
|
||||||
|
SharedFileDescriptorFactory factory =
|
||||||
|
new SharedFileDescriptorFactory("shm_", path.getAbsolutePath());
|
||||||
|
FileInputStream stream =
|
||||||
|
factory.createDescriptor("testStartupShutdown", 4096);
|
||||||
|
ShortCircuitShm shm = new ShortCircuitShm(ShmId.createRandom(), stream);
|
||||||
|
shm.free();
|
||||||
|
stream.close();
|
||||||
|
FileUtil.fullyDelete(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=60000)
|
||||||
|
public void testAllocateSlots() throws Exception {
|
||||||
|
File path = new File(TEST_BASE, "testAllocateSlots");
|
||||||
|
path.mkdirs();
|
||||||
|
SharedFileDescriptorFactory factory =
|
||||||
|
new SharedFileDescriptorFactory("shm_", path.getAbsolutePath());
|
||||||
|
FileInputStream stream =
|
||||||
|
factory.createDescriptor("testAllocateSlots", 4096);
|
||||||
|
ShortCircuitShm shm = new ShortCircuitShm(ShmId.createRandom(), stream);
|
||||||
|
int numSlots = 0;
|
||||||
|
ArrayList<Slot> slots = new ArrayList<Slot>();
|
||||||
|
while (!shm.isFull()) {
|
||||||
|
Slot slot = shm.allocAndRegisterSlot(new ExtendedBlockId(123L, "test_bp1"));
|
||||||
|
slots.add(slot);
|
||||||
|
numSlots++;
|
||||||
|
}
|
||||||
|
LOG.info("allocated " + numSlots + " slots before running out.");
|
||||||
|
int slotIdx = 0;
|
||||||
|
for (Iterator<Slot> iter = shm.slotIterator();
|
||||||
|
iter.hasNext(); ) {
|
||||||
|
Assert.assertTrue(slots.contains(iter.next()));
|
||||||
|
}
|
||||||
|
for (Slot slot : slots) {
|
||||||
|
Assert.assertFalse(slot.addAnchor());
|
||||||
|
Assert.assertEquals(slotIdx++, slot.getSlotIdx());
|
||||||
|
}
|
||||||
|
for (Slot slot : slots) {
|
||||||
|
slot.makeAnchorable();
|
||||||
|
}
|
||||||
|
for (Slot slot : slots) {
|
||||||
|
Assert.assertTrue(slot.addAnchor());
|
||||||
|
}
|
||||||
|
for (Slot slot : slots) {
|
||||||
|
slot.removeAnchor();
|
||||||
|
}
|
||||||
|
for (Slot slot : slots) {
|
||||||
|
shm.unregisterSlot(slot.getSlotIdx());
|
||||||
|
slot.makeInvalid();
|
||||||
|
}
|
||||||
|
shm.free();
|
||||||
|
stream.close();
|
||||||
|
FileUtil.fullyDelete(path);
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue