HADOOP-12973. Make DU pluggable. (Elliott Clark via cmccabe)
(cherry picked from commit35f0770555
) (cherry picked from commit2b0b332e2f
) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
This commit is contained in:
parent
b804b20843
commit
59b0661c7f
|
@ -0,0 +1,168 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
* Interface for class that can tell estimate much space
|
||||
* is used in a directory.
|
||||
* <p>
|
||||
* The implementor is fee to cache space used. As such there
|
||||
* are methods to update the cached value with any known changes.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceStability.Evolving
|
||||
public abstract class CachingGetSpaceUsed implements Closeable, GetSpaceUsed {
|
||||
static final Logger LOG = LoggerFactory.getLogger(CachingGetSpaceUsed.class);
|
||||
|
||||
protected final AtomicLong used = new AtomicLong();
|
||||
private final AtomicBoolean running = new AtomicBoolean(true);
|
||||
private final long refreshInterval;
|
||||
private final String dirPath;
|
||||
private Thread refreshUsed;
|
||||
|
||||
/**
|
||||
* This is the constructor used by the builder.
|
||||
* All overriding classes should implement this.
|
||||
*/
|
||||
public CachingGetSpaceUsed(CachingGetSpaceUsed.Builder builder)
|
||||
throws IOException {
|
||||
this(builder.getPath(), builder.getInterval(), builder.getInitialUsed());
|
||||
}
|
||||
|
||||
/**
|
||||
* Keeps track of disk usage.
|
||||
*
|
||||
* @param path the path to check disk usage in
|
||||
* @param interval refresh the disk usage at this interval
|
||||
* @param initialUsed use this value until next refresh
|
||||
* @throws IOException if we fail to refresh the disk usage
|
||||
*/
|
||||
CachingGetSpaceUsed(File path,
|
||||
long interval,
|
||||
long initialUsed) throws IOException {
|
||||
dirPath = path.getCanonicalPath();
|
||||
refreshInterval = interval;
|
||||
used.set(initialUsed);
|
||||
}
|
||||
|
||||
void init() {
|
||||
if (used.get() < 0) {
|
||||
used.set(0);
|
||||
refresh();
|
||||
}
|
||||
|
||||
if (refreshInterval > 0) {
|
||||
refreshUsed = new Thread(new RefreshThread(this),
|
||||
"refreshUsed-" + dirPath);
|
||||
refreshUsed.setDaemon(true);
|
||||
refreshUsed.start();
|
||||
} else {
|
||||
running.set(false);
|
||||
refreshUsed = null;
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void refresh();
|
||||
|
||||
/**
|
||||
* @return an estimate of space used in the directory path.
|
||||
*/
|
||||
@Override public long getUsed() throws IOException {
|
||||
return Math.max(used.get(), 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The directory path being monitored.
|
||||
*/
|
||||
public String getDirPath() {
|
||||
return dirPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the cached value of used space.
|
||||
*/
|
||||
public void incDfsUsed(long value) {
|
||||
used.addAndGet(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Is the background thread running.
|
||||
*/
|
||||
boolean running() {
|
||||
return running.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* How long in between runs of the background refresh.
|
||||
*/
|
||||
long getRefreshInterval() {
|
||||
return refreshInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the current used data amount. This should be called
|
||||
* when the cached value is re-computed.
|
||||
*
|
||||
* @param usedValue new value that should be the disk usage.
|
||||
*/
|
||||
protected void setUsed(long usedValue) {
|
||||
this.used.set(usedValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
running.set(false);
|
||||
if (refreshUsed != null) {
|
||||
refreshUsed.interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
private static final class RefreshThread implements Runnable {
|
||||
|
||||
final CachingGetSpaceUsed spaceUsed;
|
||||
|
||||
RefreshThread(CachingGetSpaceUsed spaceUsed) {
|
||||
this.spaceUsed = spaceUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
while (spaceUsed.running()) {
|
||||
try {
|
||||
Thread.sleep(spaceUsed.getRefreshInterval());
|
||||
// update the used variable
|
||||
spaceUsed.refresh();
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn("Thread Interrupted waiting to refresh disk information", e);
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -17,213 +17,56 @@
|
|||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/** Filesystem disk space usage statistics. Uses the unix 'du' program*/
|
||||
/** Filesystem disk space usage statistics. Uses the unix 'du' program */
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceStability.Evolving
|
||||
public class DU extends Shell {
|
||||
private String dirPath;
|
||||
public class DU extends CachingGetSpaceUsed {
|
||||
private DUShell duShell;
|
||||
|
||||
private AtomicLong used = new AtomicLong();
|
||||
private volatile boolean shouldRun = true;
|
||||
private Thread refreshUsed;
|
||||
private IOException duException = null;
|
||||
private long refreshInterval;
|
||||
|
||||
/**
|
||||
* Keeps track of disk usage.
|
||||
* @param path the path to check disk usage in
|
||||
* @param interval refresh the disk usage at this interval
|
||||
* @throws IOException if we fail to refresh the disk usage
|
||||
*/
|
||||
public DU(File path, long interval) throws IOException {
|
||||
this(path, interval, -1L);
|
||||
}
|
||||
|
||||
/**
|
||||
* Keeps track of disk usage.
|
||||
* @param path the path to check disk usage in
|
||||
* @param interval refresh the disk usage at this interval
|
||||
* @param initialUsed use this value until next refresh
|
||||
* @throws IOException if we fail to refresh the disk usage
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public DU(File path, long interval, long initialUsed) throws IOException {
|
||||
super(0);
|
||||
|
||||
//we set the Shell interval to 0 so it will always run our command
|
||||
//and use this one to set the thread sleep interval
|
||||
this.refreshInterval = interval;
|
||||
this.dirPath = path.getCanonicalPath();
|
||||
|
||||
//populate the used variable if the initial value is not specified.
|
||||
if (initialUsed < 0) {
|
||||
run();
|
||||
} else {
|
||||
this.used.set(initialUsed);
|
||||
}
|
||||
super(path, interval, initialUsed);
|
||||
}
|
||||
|
||||
/**
|
||||
* Keeps track of disk usage.
|
||||
* @param path the path to check disk usage in
|
||||
* @param conf configuration object
|
||||
* @throws IOException if we fail to refresh the disk usage
|
||||
*/
|
||||
public DU(File path, Configuration conf) throws IOException {
|
||||
this(path, conf, -1L);
|
||||
public DU(CachingGetSpaceUsed.Builder builder) throws IOException {
|
||||
this(builder.getPath(), builder.getInterval(), builder.getInitialUsed());
|
||||
}
|
||||
|
||||
/**
|
||||
* Keeps track of disk usage.
|
||||
* @param path the path to check disk usage in
|
||||
* @param conf configuration object
|
||||
* @param initialUsed use it until the next refresh.
|
||||
* @throws IOException if we fail to refresh the disk usage
|
||||
*/
|
||||
public DU(File path, Configuration conf, long initialUsed)
|
||||
throws IOException {
|
||||
this(path, conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY,
|
||||
CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT), initialUsed);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This thread refreshes the "used" variable.
|
||||
*
|
||||
* Future improvements could be to not permanently
|
||||
* run this thread, instead run when getUsed is called.
|
||||
**/
|
||||
class DURefreshThread implements Runnable {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
while(shouldRun) {
|
||||
|
||||
protected synchronized void refresh() {
|
||||
if (duShell == null) {
|
||||
duShell = new DUShell();
|
||||
}
|
||||
try {
|
||||
Thread.sleep(refreshInterval);
|
||||
|
||||
try {
|
||||
//update the used variable
|
||||
DU.this.run();
|
||||
} catch (IOException e) {
|
||||
synchronized (DU.this) {
|
||||
//save the latest exception so we can return it in getUsed()
|
||||
duException = e;
|
||||
}
|
||||
|
||||
LOG.warn("Could not get disk usage information", e);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
}
|
||||
}
|
||||
duShell.startRefresh();
|
||||
} catch (IOException ioe) {
|
||||
LOG.warn("Could not get disk usage information", ioe);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrease how much disk space we use.
|
||||
* @param value decrease by this value
|
||||
*/
|
||||
public void decDfsUsed(long value) {
|
||||
used.addAndGet(-value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Increase how much disk space we use.
|
||||
* @param value increase by this value
|
||||
*/
|
||||
public void incDfsUsed(long value) {
|
||||
used.addAndGet(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return disk space used
|
||||
* @throws IOException if the shell command fails
|
||||
*/
|
||||
public long getUsed() throws IOException {
|
||||
//if the updating thread isn't started, update on demand
|
||||
if(refreshUsed == null) {
|
||||
private final class DUShell extends Shell {
|
||||
void startRefresh() throws IOException {
|
||||
run();
|
||||
} else {
|
||||
synchronized (DU.this) {
|
||||
//if an exception was thrown in the last run, rethrow
|
||||
if(duException != null) {
|
||||
IOException tmp = duException;
|
||||
duException = null;
|
||||
throw tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Math.max(used.longValue(), 0L);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the path of which we're keeping track of disk usage
|
||||
*/
|
||||
public String getDirPath() {
|
||||
return dirPath;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Override to hook in DUHelper class. Maybe this can be used more
|
||||
* generally as well on Unix/Linux based systems
|
||||
*/
|
||||
@Override
|
||||
protected void run() throws IOException {
|
||||
if (WINDOWS) {
|
||||
used.set(DUHelper.getFolderUsage(dirPath));
|
||||
return;
|
||||
}
|
||||
super.run();
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the disk usage checking thread.
|
||||
*/
|
||||
public void start() {
|
||||
//only start the thread if the interval is sane
|
||||
if(refreshInterval > 0) {
|
||||
refreshUsed = new Thread(new DURefreshThread(),
|
||||
"refreshUsed-"+dirPath);
|
||||
refreshUsed.setDaemon(true);
|
||||
refreshUsed.start();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Shut down the refreshing thread.
|
||||
*/
|
||||
public void shutdown() {
|
||||
this.shouldRun = false;
|
||||
|
||||
if(this.refreshUsed != null) {
|
||||
this.refreshUsed.interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return
|
||||
"du -sk " + dirPath +"\n" +
|
||||
used + "\t" + dirPath;
|
||||
"du -sk " + getDirPath() + "\n" + used.get() + "\t" + getDirPath();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String[] getExecString() {
|
||||
return new String[] {"du", "-sk", dirPath};
|
||||
return new String[]{"du", "-sk", getDirPath()};
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -233,18 +76,25 @@ public class DU extends Shell {
|
|||
throw new IOException("Expecting a line not the end of stream");
|
||||
}
|
||||
String[] tokens = line.split("\t");
|
||||
if(tokens.length == 0) {
|
||||
if (tokens.length == 0) {
|
||||
throw new IOException("Illegal du output");
|
||||
}
|
||||
this.used.set(Long.parseLong(tokens[0])*1024);
|
||||
setUsed(Long.parseLong(tokens[0]) * 1024);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
String path = ".";
|
||||
if (args.length > 0) {
|
||||
path = args[0];
|
||||
}
|
||||
|
||||
System.out.println(new DU(new File(path), new Configuration()).toString());
|
||||
GetSpaceUsed du = new GetSpaceUsed.Builder().setPath(new File(path))
|
||||
.setConf(new Configuration())
|
||||
.build();
|
||||
String duResult = du.toString();
|
||||
System.out.println(duResult);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,147 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
|
||||
public interface GetSpaceUsed {
|
||||
long getUsed() throws IOException;
|
||||
|
||||
/**
|
||||
* The builder class
|
||||
*/
|
||||
final class Builder {
|
||||
static final Logger LOG = LoggerFactory.getLogger(Builder.class);
|
||||
|
||||
static final String CLASSNAME_KEY = "fs.getspaceused.classname";
|
||||
|
||||
private Configuration conf;
|
||||
private Class<? extends GetSpaceUsed> klass = null;
|
||||
private File path = null;
|
||||
private Long interval = null;
|
||||
private Long initialUsed = null;
|
||||
|
||||
public Configuration getConf() {
|
||||
return conf;
|
||||
}
|
||||
|
||||
public Builder setConf(Configuration conf) {
|
||||
this.conf = conf;
|
||||
return this;
|
||||
}
|
||||
|
||||
public long getInterval() {
|
||||
if (interval != null) {
|
||||
return interval;
|
||||
}
|
||||
long result = CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT;
|
||||
if (conf == null) {
|
||||
return result;
|
||||
}
|
||||
return conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY, result);
|
||||
}
|
||||
|
||||
public Builder setInterval(long interval) {
|
||||
this.interval = interval;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Class<? extends GetSpaceUsed> getKlass() {
|
||||
if (klass != null) {
|
||||
return klass;
|
||||
}
|
||||
Class<? extends GetSpaceUsed> result = null;
|
||||
if (Shell.WINDOWS) {
|
||||
result = WindowsGetSpaceUsed.class;
|
||||
} else {
|
||||
result = DU.class;
|
||||
}
|
||||
if (conf == null) {
|
||||
return result;
|
||||
}
|
||||
return conf.getClass(CLASSNAME_KEY, result, GetSpaceUsed.class);
|
||||
}
|
||||
|
||||
public Builder setKlass(Class<? extends GetSpaceUsed> klass) {
|
||||
this.klass = klass;
|
||||
return this;
|
||||
}
|
||||
|
||||
public File getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
public Builder setPath(File path) {
|
||||
this.path = path;
|
||||
return this;
|
||||
}
|
||||
|
||||
public long getInitialUsed() {
|
||||
if (initialUsed == null) {
|
||||
return -1;
|
||||
}
|
||||
return initialUsed;
|
||||
}
|
||||
|
||||
public Builder setInitialUsed(long initialUsed) {
|
||||
this.initialUsed = initialUsed;
|
||||
return this;
|
||||
}
|
||||
|
||||
public GetSpaceUsed build() throws IOException {
|
||||
GetSpaceUsed getSpaceUsed = null;
|
||||
try {
|
||||
Constructor<? extends GetSpaceUsed> cons =
|
||||
getKlass().getConstructor(Builder.class);
|
||||
getSpaceUsed = cons.newInstance(this);
|
||||
} catch (InstantiationException e) {
|
||||
LOG.warn("Error trying to create an instance of " + getKlass(), e);
|
||||
} catch (IllegalAccessException e) {
|
||||
LOG.warn("Error trying to create " + getKlass(), e);
|
||||
} catch (InvocationTargetException e) {
|
||||
LOG.warn("Error trying to create " + getKlass(), e);
|
||||
} catch (NoSuchMethodException e) {
|
||||
LOG.warn("Doesn't look like the class " + getKlass() +
|
||||
" have the needed constructor", e);
|
||||
}
|
||||
// If there were any exceptions then du will be null.
|
||||
// Construct our best guess fallback.
|
||||
if (getSpaceUsed == null) {
|
||||
if (Shell.WINDOWS) {
|
||||
getSpaceUsed = new WindowsGetSpaceUsed(this);
|
||||
} else {
|
||||
getSpaceUsed = new DU(this);
|
||||
}
|
||||
}
|
||||
// Call init after classes constructors have finished.
|
||||
if (getSpaceUsed instanceof CachingGetSpaceUsed) {
|
||||
((CachingGetSpaceUsed) getSpaceUsed).init();
|
||||
}
|
||||
return getSpaceUsed;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Class to tell the size of a path on windows.
|
||||
* Rather than shelling out, on windows this uses DUHelper.getFolderUsage
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceStability.Evolving
|
||||
public class WindowsGetSpaceUsed extends CachingGetSpaceUsed {
|
||||
|
||||
|
||||
WindowsGetSpaceUsed(CachingGetSpaceUsed.Builder builder) throws IOException {
|
||||
super(builder.getPath(), builder.getInterval(), builder.getInitialUsed());
|
||||
}
|
||||
|
||||
/**
|
||||
* Override to hook in DUHelper class.
|
||||
*/
|
||||
@Override
|
||||
protected void refresh() {
|
||||
used.set(DUHelper.getFolderUsage(getDirPath()));
|
||||
}
|
||||
}
|
|
@ -79,27 +79,28 @@ public class TestDU extends TestCase {
|
|||
|
||||
Thread.sleep(5000); // let the metadata updater catch up
|
||||
|
||||
DU du = new DU(file, 10000);
|
||||
du.start();
|
||||
DU du = new DU(file, 10000, -1);
|
||||
du.init();
|
||||
long duSize = du.getUsed();
|
||||
du.shutdown();
|
||||
du.close();
|
||||
|
||||
assertTrue("Invalid on-disk size",
|
||||
duSize >= writtenSize &&
|
||||
writtenSize <= (duSize + slack));
|
||||
|
||||
//test with 0 interval, will not launch thread
|
||||
du = new DU(file, 0);
|
||||
du.start();
|
||||
du = new DU(file, 0, -1);
|
||||
du.init();
|
||||
duSize = du.getUsed();
|
||||
du.shutdown();
|
||||
du.close();
|
||||
|
||||
assertTrue("Invalid on-disk size",
|
||||
duSize >= writtenSize &&
|
||||
writtenSize <= (duSize + slack));
|
||||
|
||||
//test without launching thread
|
||||
du = new DU(file, 10000);
|
||||
du = new DU(file, 10000, -1);
|
||||
du.init();
|
||||
duSize = du.getUsed();
|
||||
|
||||
assertTrue("Invalid on-disk size",
|
||||
|
@ -111,8 +112,8 @@ public class TestDU extends TestCase {
|
|||
assertTrue(file.createNewFile());
|
||||
Configuration conf = new Configuration();
|
||||
conf.setLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY, 10000L);
|
||||
DU du = new DU(file, conf);
|
||||
du.decDfsUsed(Long.MAX_VALUE);
|
||||
DU du = new DU(file, 10000L, -1);
|
||||
du.incDfsUsed(-Long.MAX_VALUE);
|
||||
long duSize = du.getUsed();
|
||||
assertTrue(String.valueOf(duSize), duSize >= 0L);
|
||||
}
|
||||
|
@ -121,7 +122,7 @@ public class TestDU extends TestCase {
|
|||
File file = new File(DU_DIR, "dataX");
|
||||
createFile(file, 8192);
|
||||
DU du = new DU(file, 3000, 1024);
|
||||
du.start();
|
||||
du.init();
|
||||
assertTrue("Initial usage setting not honored", du.getUsed() == 1024);
|
||||
|
||||
// wait until the first du runs.
|
||||
|
@ -131,4 +132,7 @@ public class TestDU extends TestCase {
|
|||
|
||||
assertTrue("Usage didn't get updated", du.getUsed() == 8192);
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class TestGetSpaceUsed {
|
||||
final static private File DIR = new File(
|
||||
System.getProperty("test.build.data", "/tmp"), "TestGetSpaceUsed");
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
FileUtil.fullyDelete(DIR);
|
||||
assertTrue(DIR.mkdirs());
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws IOException {
|
||||
FileUtil.fullyDelete(DIR);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that the builder can create a class specified through the class.
|
||||
*/
|
||||
@Test
|
||||
public void testBuilderConf() throws Exception {
|
||||
File file = new File(DIR, "testBuilderConf");
|
||||
assertTrue(file.createNewFile());
|
||||
Configuration conf = new Configuration();
|
||||
conf.set("fs.getspaceused.classname", DummyDU.class.getName());
|
||||
CachingGetSpaceUsed instance =
|
||||
(CachingGetSpaceUsed) new CachingGetSpaceUsed.Builder()
|
||||
.setPath(file)
|
||||
.setInterval(0)
|
||||
.setConf(conf)
|
||||
.build();
|
||||
assertNotNull(instance);
|
||||
assertTrue(instance instanceof DummyDU);
|
||||
assertFalse(instance.running());
|
||||
instance.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildInitial() throws Exception {
|
||||
File file = new File(DIR, "testBuildInitial");
|
||||
assertTrue(file.createNewFile());
|
||||
CachingGetSpaceUsed instance =
|
||||
(CachingGetSpaceUsed) new CachingGetSpaceUsed.Builder()
|
||||
.setPath(file)
|
||||
.setInitialUsed(90210)
|
||||
.setKlass(DummyDU.class)
|
||||
.build();
|
||||
assertEquals(90210, instance.getUsed());
|
||||
instance.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildInterval() throws Exception {
|
||||
File file = new File(DIR, "testBuildInitial");
|
||||
assertTrue(file.createNewFile());
|
||||
CachingGetSpaceUsed instance =
|
||||
(CachingGetSpaceUsed) new CachingGetSpaceUsed.Builder()
|
||||
.setPath(file)
|
||||
.setInitialUsed(90210)
|
||||
.setInterval(50060)
|
||||
.setKlass(DummyDU.class)
|
||||
.build();
|
||||
assertEquals(50060, instance.getRefreshInterval());
|
||||
instance.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBuildNonCaching() throws Exception {
|
||||
File file = new File(DIR, "testBuildNonCaching");
|
||||
assertTrue(file.createNewFile());
|
||||
GetSpaceUsed instance = new CachingGetSpaceUsed.Builder()
|
||||
.setPath(file)
|
||||
.setInitialUsed(90210)
|
||||
.setInterval(50060)
|
||||
.setKlass(DummyGetSpaceUsed.class)
|
||||
.build();
|
||||
assertEquals(300, instance.getUsed());
|
||||
assertTrue(instance instanceof DummyGetSpaceUsed);
|
||||
}
|
||||
|
||||
private static class DummyDU extends CachingGetSpaceUsed {
|
||||
|
||||
public DummyDU(Builder builder) throws IOException {
|
||||
// Push to the base class.
|
||||
// Most times that's all that will need to be done.
|
||||
super(builder);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void refresh() {
|
||||
// This is a test so don't du anything.
|
||||
}
|
||||
}
|
||||
|
||||
private static class DummyGetSpaceUsed implements GetSpaceUsed {
|
||||
|
||||
public DummyGetSpaceUsed(GetSpaceUsed.Builder builder) {
|
||||
|
||||
}
|
||||
|
||||
@Override public long getUsed() throws IOException {
|
||||
return 300;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -36,8 +36,9 @@ import org.apache.commons.io.FileUtils;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.DU;
|
||||
import org.apache.hadoop.fs.CachingGetSpaceUsed;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.GetSpaceUsed;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
|
@ -89,7 +90,7 @@ class BlockPoolSlice {
|
|||
private AtomicLong numOfBlocks = new AtomicLong();
|
||||
|
||||
// TODO:FEDERATION scalability issue - a thread per DU is needed
|
||||
private final DU dfsUsage;
|
||||
private final GetSpaceUsed dfsUsage;
|
||||
|
||||
/**
|
||||
* Create a blook pool slice
|
||||
|
@ -147,8 +148,10 @@ class BlockPoolSlice {
|
|||
}
|
||||
// Use cached value initially if available. Or the following call will
|
||||
// block until the initial du command completes.
|
||||
this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
|
||||
this.dfsUsage.start();
|
||||
this.dfsUsage = new CachingGetSpaceUsed.Builder().setPath(bpDir)
|
||||
.setConf(conf)
|
||||
.setInitialUsed(loadDfsUsed())
|
||||
.build();
|
||||
|
||||
// Make the dfs usage to be saved during shutdown.
|
||||
ShutdownHookManager.get().addShutdownHook(
|
||||
|
@ -184,7 +187,9 @@ class BlockPoolSlice {
|
|||
|
||||
/** Run DU on local drives. It must be synchronized from caller. */
|
||||
void decDfsUsed(long value) {
|
||||
dfsUsage.decDfsUsed(value);
|
||||
if (dfsUsage instanceof CachingGetSpaceUsed) {
|
||||
((CachingGetSpaceUsed)dfsUsage).incDfsUsed(-value);
|
||||
}
|
||||
}
|
||||
|
||||
long getDfsUsed() throws IOException {
|
||||
|
@ -192,14 +197,18 @@ class BlockPoolSlice {
|
|||
}
|
||||
|
||||
void incDfsUsed(long value) {
|
||||
dfsUsage.incDfsUsed(value);
|
||||
if (dfsUsage instanceof CachingGetSpaceUsed) {
|
||||
((CachingGetSpaceUsed)dfsUsage).incDfsUsed(value);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read in the cached DU value and return it if it is less than 600 seconds
|
||||
* old (DU update interval). Slight imprecision of dfsUsed is not critical
|
||||
* and skipping DU can significantly shorten the startup time.
|
||||
* If the cached value is not available or too old, -1 is returned.
|
||||
* Read in the cached DU value and return it if it is less than
|
||||
* cachedDfsUsedCheckTime which is set by
|
||||
* dfs.datanode.cached-dfsused.check.interval.ms parameter. Slight imprecision
|
||||
* of dfsUsed is not critical and skipping DU can significantly shorten the
|
||||
* startup time. If the cached value is not available or too old, -1 is
|
||||
* returned.
|
||||
*/
|
||||
long loadDfsUsed() {
|
||||
long cachedDfsUsed;
|
||||
|
@ -298,7 +307,10 @@ class BlockPoolSlice {
|
|||
}
|
||||
File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
|
||||
File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
|
||||
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
|
||||
if (dfsUsage instanceof CachingGetSpaceUsed) {
|
||||
((CachingGetSpaceUsed) dfsUsage).incDfsUsed(
|
||||
b.getNumBytes() + metaFile.length());
|
||||
}
|
||||
return blockFile;
|
||||
}
|
||||
|
||||
|
@ -714,7 +726,10 @@ class BlockPoolSlice {
|
|||
saveReplicas(blocksListToPersist);
|
||||
saveDfsUsed();
|
||||
dfsUsedSaved = true;
|
||||
dfsUsage.shutdown();
|
||||
|
||||
if (dfsUsage instanceof CachingGetSpaceUsed) {
|
||||
IOUtils.cleanup(LOG, ((CachingGetSpaceUsed) dfsUsage));
|
||||
}
|
||||
}
|
||||
|
||||
private boolean readReplicasFromCache(ReplicaMap volumeMap,
|
||||
|
|
Loading…
Reference in New Issue