HDDS-169:Add Volume IO Stats. Contributed by Bharat Viswanadham

This commit is contained in:
Bharat Viswanadham 2018-06-28 17:10:10 -07:00
parent 84ac6bb1b1
commit 23ebe0f4f0
8 changed files with 308 additions and 54 deletions

View File

@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
ContainerType; ContainerType;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
ContainerLifeCycleState; ContainerLifeCycleState;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
@ -59,6 +60,8 @@ public class ContainerData {
private final AtomicLong writeCount; private final AtomicLong writeCount;
private final AtomicLong bytesUsed; private final AtomicLong bytesUsed;
private HddsVolume volume;
/** /**
* Creates a ContainerData Object, which holds metadata of the container. * Creates a ContainerData Object, which holds metadata of the container.
@ -289,5 +292,22 @@ public class ContainerData {
return this.bytesUsed.addAndGet(-1L * reclaimed); return this.bytesUsed.addAndGet(-1L * reclaimed);
} }
/**
* Set the Volume for the Container.
* This should be called only from the createContainer.
* @param hddsVolume
*/
public void setVolume(HddsVolume hddsVolume) {
this.volume = hddsVolume;
}
/**
* Returns the volume of the Container.
* @return HddsVolume
*/
public HddsVolume getVolume() {
return volume;
}
} }

View File

@ -52,6 +52,7 @@ public final class HddsVolume {
private final File hddsRootDir; private final File hddsRootDir;
private final VolumeInfo volumeInfo; private final VolumeInfo volumeInfo;
private VolumeState state; private VolumeState state;
private final VolumeIOStats volumeIOStats;
// VERSION file properties // VERSION file properties
private String storageID; // id of the file system private String storageID; // id of the file system
@ -117,6 +118,7 @@ public final class HddsVolume {
this.state = VolumeState.NOT_INITIALIZED; this.state = VolumeState.NOT_INITIALIZED;
this.clusterID = b.clusterID; this.clusterID = b.clusterID;
this.datanodeUuid = b.datanodeUuid; this.datanodeUuid = b.datanodeUuid;
this.volumeIOStats = new VolumeIOStats();
VolumeInfo.Builder volumeBuilder = VolumeInfo.Builder volumeBuilder =
new VolumeInfo.Builder(b.volumeRootStr, b.conf) new VolumeInfo.Builder(b.volumeRootStr, b.conf)
@ -303,6 +305,10 @@ public final class HddsVolume {
return (state == VolumeState.FAILED); return (state == VolumeState.FAILED);
} }
public VolumeIOStats getVolumeIOStats() {
return volumeIOStats;
}
public void failVolume() { public void failVolume() {
setState(VolumeState.FAILED); setState(VolumeState.FAILED);
volumeInfo.shutdownUsageThread(); volumeInfo.shutdownUsageThread();

View File

@ -0,0 +1,139 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.container.common.volume;
import java.util.concurrent.atomic.AtomicLong;
/**
* This class is used to track Volume IO stats for each HDDS Volume.
*/
public class VolumeIOStats {
private final AtomicLong readBytes;
private final AtomicLong readOpCount;
private final AtomicLong writeBytes;
private final AtomicLong writeOpCount;
private final AtomicLong readTime;
private final AtomicLong writeTime;
public VolumeIOStats() {
readBytes = new AtomicLong(0);
readOpCount = new AtomicLong(0);
writeBytes = new AtomicLong(0);
writeOpCount = new AtomicLong(0);
readTime = new AtomicLong(0);
writeTime = new AtomicLong(0);
}
/**
* Increment number of bytes read from the volume.
* @param bytesRead
*/
public void incReadBytes(long bytesRead) {
readBytes.addAndGet(bytesRead);
}
/**
* Increment the read operations performed on the volume.
*/
public void incReadOpCount() {
readOpCount.incrementAndGet();
}
/**
* Increment number of bytes written on to the volume.
* @param bytesWritten
*/
public void incWriteBytes(long bytesWritten) {
writeBytes.addAndGet(bytesWritten);
}
/**
* Increment the write operations performed on the volume.
*/
public void incWriteOpCount() {
writeOpCount.incrementAndGet();
}
/**
* Increment the time taken by read operation on the volume.
* @param time
*/
public void incReadTime(long time) {
readTime.addAndGet(time);
}
/**
* Increment the time taken by write operation on the volume.
* @param time
*/
public void incWriteTime(long time) {
writeTime.addAndGet(time);
}
/**
* Returns total number of bytes read from the volume.
* @return long
*/
public long getReadBytes() {
return readBytes.get();
}
/**
* Returns total number of bytes written to the volume.
* @return long
*/
public long getWriteBytes() {
return writeBytes.get();
}
/**
* Returns total number of read operations performed on the volume.
* @return long
*/
public long getReadOpCount() {
return readOpCount.get();
}
/**
* Returns total number of write operations performed on the volume.
* @return long
*/
public long getWriteOpCount() {
return writeOpCount.get();
}
/**
* Returns total read operations time on the volume.
* @return long
*/
public long getReadTime() {
return readTime.get();
}
/**
* Returns total write operations time on the volume.
* @return long
*/
public long getWriteTime() {
return writeTime.get();
}
}

View File

@ -145,6 +145,7 @@ public class KeyValueContainer implements Container {
containerData.setChunksPath(chunksPath.getPath()); containerData.setChunksPath(chunksPath.getPath());
containerData.setContainerDBType(impl); containerData.setContainerDBType(impl);
containerData.setDbFile(dbFile); containerData.setDbFile(dbFile);
containerData.setVolume(containerVolume);
// Create .container file and .chksm file // Create .container file and .chksm file
createContainerFile(containerFile, containerCheckSumFile); createContainerFile(containerFile, containerCheckSumFile);

View File

@ -36,6 +36,8 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl; import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.ratis.shaded.com.google.protobuf.ByteString; import org.apache.ratis.shaded.com.google.protobuf.ByteString;
import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -67,10 +69,11 @@ public final class ChunkUtils {
* @param chunkFile - File to write data to. * @param chunkFile - File to write data to.
* @param chunkInfo - Data stream to write. * @param chunkInfo - Data stream to write.
* @param data - The data buffer. * @param data - The data buffer.
* @param volumeIOStats
* @throws StorageContainerException * @throws StorageContainerException
*/ */
public static void writeData(File chunkFile, ChunkInfo chunkInfo, public static void writeData(File chunkFile, ChunkInfo chunkInfo,
byte[] data) throws byte[] data, VolumeIOStats volumeIOStats) throws
StorageContainerException, ExecutionException, InterruptedException, StorageContainerException, ExecutionException, InterruptedException,
NoSuchAlgorithmException { NoSuchAlgorithmException {
@ -87,6 +90,12 @@ public final class ChunkUtils {
FileLock lock = null; FileLock lock = null;
try { try {
if (chunkInfo.getChecksum() != null &&
!chunkInfo.getChecksum().isEmpty()) {
verifyChecksum(chunkInfo, data, log);
}
long writeTimeStart = Time.monotonicNow();
file = file =
AsynchronousFileChannel.open(chunkFile.toPath(), AsynchronousFileChannel.open(chunkFile.toPath(),
StandardOpenOption.CREATE, StandardOpenOption.CREATE,
@ -94,11 +103,11 @@ public final class ChunkUtils {
StandardOpenOption.SPARSE, StandardOpenOption.SPARSE,
StandardOpenOption.SYNC); StandardOpenOption.SYNC);
lock = file.lock().get(); lock = file.lock().get();
if (chunkInfo.getChecksum() != null &&
!chunkInfo.getChecksum().isEmpty()) {
verifyChecksum(chunkInfo, data, log);
}
int size = file.write(ByteBuffer.wrap(data), chunkInfo.getOffset()).get(); int size = file.write(ByteBuffer.wrap(data), chunkInfo.getOffset()).get();
// Increment volumeIO stats here.
volumeIOStats.incWriteTime(Time.monotonicNow() - writeTimeStart);
volumeIOStats.incWriteOpCount();
volumeIOStats.incWriteBytes(size);
if (size != data.length) { if (size != data.length) {
log.error("Invalid write size found. Size:{} Expected: {} ", size, log.error("Invalid write size found. Size:{} Expected: {} ", size,
data.length); data.length);
@ -136,12 +145,15 @@ public final class ChunkUtils {
* *
* @param chunkFile - file where data lives. * @param chunkFile - file where data lives.
* @param data - chunk definition. * @param data - chunk definition.
* @param volumeIOStats
* @return ByteBuffer * @return ByteBuffer
* @throws StorageContainerException * @throws StorageContainerException
* @throws ExecutionException * @throws ExecutionException
* @throws InterruptedException * @throws InterruptedException
*/ */
public static ByteBuffer readData(File chunkFile, ChunkInfo data) throws public static ByteBuffer readData(File chunkFile, ChunkInfo data,
VolumeIOStats volumeIOStats)
throws
StorageContainerException, ExecutionException, InterruptedException, StorageContainerException, ExecutionException, InterruptedException,
NoSuchAlgorithmException { NoSuchAlgorithmException {
Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
@ -157,6 +169,7 @@ public final class ChunkUtils {
AsynchronousFileChannel file = null; AsynchronousFileChannel file = null;
FileLock lock = null; FileLock lock = null;
try { try {
long readStartTime = Time.monotonicNow();
file = file =
AsynchronousFileChannel.open(chunkFile.toPath(), AsynchronousFileChannel.open(chunkFile.toPath(),
StandardOpenOption.READ); StandardOpenOption.READ);
@ -165,10 +178,13 @@ public final class ChunkUtils {
ByteBuffer buf = ByteBuffer.allocate((int) data.getLen()); ByteBuffer buf = ByteBuffer.allocate((int) data.getLen());
file.read(buf, data.getOffset()).get(); file.read(buf, data.getOffset()).get();
// Increment volumeIO stats here.
volumeIOStats.incReadTime(Time.monotonicNow() - readStartTime);
volumeIOStats.incReadOpCount();
volumeIOStats.incReadBytes(data.getLen());
if (data.getChecksum() != null && !data.getChecksum().isEmpty()) { if (data.getChecksum() != null && !data.getChecksum().isEmpty()) {
verifyChecksum(data, buf.array(), log); verifyChecksum(data, buf.array(), log);
} }
return buf; return buf;
} catch (IOException e) { } catch (IOException e) {
throw new StorageContainerException(e, IO_EXCEPTION); throw new StorageContainerException(e, IO_EXCEPTION);

View File

@ -26,6 +26,8 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerExcep
import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats;
import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
@ -64,13 +66,15 @@ public class ChunkManagerImpl implements ChunkManager {
* @throws StorageContainerException * @throws StorageContainerException
*/ */
public void writeChunk(Container container, BlockID blockID, ChunkInfo info, public void writeChunk(Container container, BlockID blockID, ChunkInfo info,
byte[] data, ContainerProtos.Stage stage) byte[] data, ContainerProtos.Stage stage)
throws StorageContainerException { throws StorageContainerException {
try { try {
KeyValueContainerData containerData = (KeyValueContainerData) container KeyValueContainerData containerData = (KeyValueContainerData) container
.getContainerData(); .getContainerData();
HddsVolume volume = containerData.getVolume();
VolumeIOStats volumeIOStats = volume.getVolumeIOStats();
File chunkFile = ChunkUtils.validateChunk(containerData, info); File chunkFile = ChunkUtils.validateChunk(containerData, info);
File tmpChunkFile = getTmpChunkFile(chunkFile, info); File tmpChunkFile = getTmpChunkFile(chunkFile, info);
@ -81,20 +85,23 @@ public class ChunkManagerImpl implements ChunkManager {
switch (stage) { switch (stage) {
case WRITE_DATA: case WRITE_DATA:
// Initially writes to temporary chunk file. // Initially writes to temporary chunk file.
ChunkUtils.writeData(tmpChunkFile, info, data); ChunkUtils.writeData(tmpChunkFile, info, data, volumeIOStats);
// No need to increment container stats here, as still data is not
// committed here.
break; break;
case COMMIT_DATA: case COMMIT_DATA:
// commit the data, means move chunk data from temporary chunk file // commit the data, means move chunk data from temporary chunk file
// to actual chunk file. // to actual chunk file.
long sizeDiff = tmpChunkFile.length() - chunkFile.length();
commitChunk(tmpChunkFile, chunkFile); commitChunk(tmpChunkFile, chunkFile);
containerData.incrBytesUsed(sizeDiff); // Increment container stats here, as we commit the data.
containerData.incrBytesUsed(info.getLen());
containerData.incrWriteCount(); containerData.incrWriteCount();
containerData.incrWriteBytes(sizeDiff); containerData.incrWriteBytes(info.getLen());
break; break;
case COMBINED: case COMBINED:
// directly write to the chunk file // directly write to the chunk file
ChunkUtils.writeData(chunkFile, info, data); ChunkUtils.writeData(chunkFile, info, data, volumeIOStats);
// Increment container stats here, as we directly write to chunk file.
containerData.incrBytesUsed(info.getLen()); containerData.incrBytesUsed(info.getLen());
containerData.incrWriteCount(); containerData.incrWriteCount();
containerData.incrWriteBytes(info.getLen()); containerData.incrWriteBytes(info.getLen());
@ -137,6 +144,8 @@ public class ChunkManagerImpl implements ChunkManager {
KeyValueContainerData containerData = (KeyValueContainerData) container KeyValueContainerData containerData = (KeyValueContainerData) container
.getContainerData(); .getContainerData();
ByteBuffer data; ByteBuffer data;
HddsVolume volume = containerData.getVolume();
VolumeIOStats volumeIOStats = volume.getVolumeIOStats();
// Checking here, which layout version the container is, and reading // Checking here, which layout version the container is, and reading
// the chunk file in that format. // the chunk file in that format.
@ -145,9 +154,10 @@ public class ChunkManagerImpl implements ChunkManager {
if (containerData.getLayOutVersion() == ChunkLayOutVersion if (containerData.getLayOutVersion() == ChunkLayOutVersion
.getLatestVersion().getVersion()) { .getLatestVersion().getVersion()) {
File chunkFile = ChunkUtils.getChunkFile(containerData, info); File chunkFile = ChunkUtils.getChunkFile(containerData, info);
data = ChunkUtils.readData(chunkFile, info); data = ChunkUtils.readData(chunkFile, info, volumeIOStats);
containerData.incrReadCount(); containerData.incrReadCount();
containerData.incrReadBytes(chunkFile.length()); long length = chunkFile.length();
containerData.incrReadBytes(length);
return data.array(); return data.array();
} }
} catch(NoSuchAlgorithmException ex) { } catch(NoSuchAlgorithmException ex) {

View File

@ -22,10 +22,12 @@ import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.KeyData; import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl; import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
@ -37,13 +39,9 @@ import org.mockito.Mockito;
import java.io.File; import java.io.File;
import java.util.Arrays; import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID; import java.util.UUID;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.*;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
@ -57,21 +55,21 @@ public class TestChunkManagerImpl {
private String scmId = UUID.randomUUID().toString(); private String scmId = UUID.randomUUID().toString();
private VolumeSet volumeSet; private VolumeSet volumeSet;
private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
private HddsVolume hddsVolume;
private KeyValueContainerData keyValueContainerData; private KeyValueContainerData keyValueContainerData;
private KeyValueContainer keyValueContainer; private KeyValueContainer keyValueContainer;
private KeyData keyData;
private BlockID blockID; private BlockID blockID;
private ChunkManagerImpl chunkManager; private ChunkManagerImpl chunkManager;
private ChunkInfo chunkInfo; private ChunkInfo chunkInfo;
private byte[] data; private byte[] data;
@Rule @Rule
public TemporaryFolder folder = new TemporaryFolder(); public TemporaryFolder folder = new TemporaryFolder();
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
config = new OzoneConfiguration(); config = new OzoneConfiguration();
hddsVolume = new HddsVolume.Builder(folder.getRoot()
HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
.getAbsolutePath()).conf(config).datanodeUuid(UUID.randomUUID() .getAbsolutePath()).conf(config).datanodeUuid(UUID.randomUUID()
.toString()).build(); .toString()).build();
@ -83,22 +81,15 @@ public class TestChunkManagerImpl {
keyValueContainerData = new KeyValueContainerData(1L); keyValueContainerData = new KeyValueContainerData(1L);
keyValueContainer = new KeyValueContainer( keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
keyValueContainerData, config);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
data = "testing write chunks".getBytes(); data = "testing write chunks".getBytes();
// Creating KeyData // Creating KeyData
blockID = new BlockID(1L, 1L); blockID = new BlockID(1L, 1L);
keyData = new KeyData(blockID);
keyData.addMetadata("VOLUME", "ozone");
keyData.addMetadata("OWNER", "hdfs");
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
.getLocalID(), 0), 0, data.length); .getLocalID(), 0), 0, data.length);
chunkList.add(chunkInfo.getProtoBufMessage());
keyData.setChunks(chunkList);
// Create a ChunkManager object. // Create a ChunkManager object.
chunkManager = new ChunkManagerImpl(); chunkManager = new ChunkManagerImpl();
@ -113,16 +104,38 @@ public class TestChunkManagerImpl {
assertTrue(chunksPath.exists()); assertTrue(chunksPath.exists());
// Initially chunks folder should be empty. // Initially chunks folder should be empty.
assertTrue(chunksPath.listFiles().length == 0); assertTrue(chunksPath.listFiles().length == 0);
// As no chunks are written to the volume writeBytes should be 0
checkWriteIOStats(0, 0);
chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
ContainerProtos.Stage.WRITE_DATA); ContainerProtos.Stage.WRITE_DATA);
// Now a chunk file is being written with Stage WRITE_DATA, so it should // Now a chunk file is being written with Stage WRITE_DATA, so it should
// create a temporary chunk file. // create a temporary chunk file.
assertTrue(chunksPath.listFiles().length == 1); assertTrue(chunksPath.listFiles().length == 1);
File chunkFile = ChunkUtils.getChunkFile(keyValueContainerData, chunkInfo);
File tempChunkFile = new File(chunkFile.getParent(),
chunkFile.getName() +
OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER +
OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX);
// As chunk write stage is WRITE_DATA, temp chunk file will be created.
assertTrue(tempChunkFile.exists());
checkWriteIOStats(data.length, 1);
chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
ContainerProtos.Stage.COMMIT_DATA); ContainerProtos.Stage.COMMIT_DATA);
checkWriteIOStats(data.length, 1);
// Old temp file should have been renamed to chunk file. // Old temp file should have been renamed to chunk file.
assertTrue(chunksPath.listFiles().length == 1); assertTrue(chunksPath.listFiles().length == 1);
// As commit happened, chunk file should exist.
assertTrue(chunkFile.exists());
assertFalse(tempChunkFile.exists());
} }
@Test @Test
@ -131,13 +144,12 @@ public class TestChunkManagerImpl {
long randomLength = 200L; long randomLength = 200L;
chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
.getLocalID(), 0), 0, randomLength); .getLocalID(), 0), 0, randomLength);
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkList.add(chunkInfo.getProtoBufMessage());
keyData.setChunks(chunkList);
chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
ContainerProtos.Stage.WRITE_DATA); ContainerProtos.Stage.WRITE_DATA);
fail("testWriteChunkIncorrectLength failed"); fail("testWriteChunkIncorrectLength failed");
} catch (StorageContainerException ex) { } catch (StorageContainerException ex) {
// As we got an exception, writeBytes should be 0.
checkWriteIOStats(0, 0);
GenericTestUtils.assertExceptionContains("data array does not match " + GenericTestUtils.assertExceptionContains("data array does not match " +
"the length ", ex); "the length ", ex);
assertEquals(ContainerProtos.Result.INVALID_WRITE_SIZE, ex.getResult()); assertEquals(ContainerProtos.Result.INVALID_WRITE_SIZE, ex.getResult());
@ -152,21 +164,29 @@ public class TestChunkManagerImpl {
assertTrue(chunksPath.exists()); assertTrue(chunksPath.exists());
// Initially chunks folder should be empty. // Initially chunks folder should be empty.
assertTrue(chunksPath.listFiles().length == 0); assertTrue(chunksPath.listFiles().length == 0);
checkWriteIOStats(0, 0);
chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
ContainerProtos.Stage.COMBINED); ContainerProtos.Stage.COMBINED);
// Now a chunk file is being written with Stage WRITE_DATA, so it should // Now a chunk file is being written with Stage COMBINED_DATA, so it should
// create a temporary chunk file. // create a chunk file.
assertTrue(chunksPath.listFiles().length == 1); assertTrue(chunksPath.listFiles().length == 1);
File chunkFile = ChunkUtils.getChunkFile(keyValueContainerData, chunkInfo);
assertTrue(chunkFile.exists());
checkWriteIOStats(data.length, 1);
} }
@Test @Test
public void testReadChunk() throws Exception { public void testReadChunk() throws Exception {
checkWriteIOStats(0, 0);
chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
ContainerProtos.Stage.COMBINED); ContainerProtos.Stage.COMBINED);
checkWriteIOStats(data.length, 1);
checkReadIOStats(0, 0);
byte[] expectedData = chunkManager.readChunk(keyValueContainer, blockID, byte[] expectedData = chunkManager.readChunk(keyValueContainer, blockID,
chunkInfo); chunkInfo);
assertEquals(expectedData.length, data.length); assertEquals(expectedData.length, data.length);
assertTrue(Arrays.equals(expectedData, data)); assertTrue(Arrays.equals(expectedData, data));
checkReadIOStats(data.length, 1);
} }
@Test @Test
@ -187,9 +207,6 @@ public class TestChunkManagerImpl {
long randomLength = 200L; long randomLength = 200L;
chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
.getLocalID(), 0), 0, randomLength); .getLocalID(), 0), 0, randomLength);
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkList.add(chunkInfo.getProtoBufMessage());
keyData.setChunks(chunkList);
chunkManager.deleteChunk(keyValueContainer, blockID, chunkInfo); chunkManager.deleteChunk(keyValueContainer, blockID, chunkInfo);
fail("testDeleteChunkUnsupportedRequest"); fail("testDeleteChunkUnsupportedRequest");
} catch (StorageContainerException ex) { } catch (StorageContainerException ex) {
@ -205,9 +222,6 @@ public class TestChunkManagerImpl {
.getLocalID(), 0), 0, data.length); .getLocalID(), 0), 0, data.length);
//Setting checksum to some value. //Setting checksum to some value.
chunkInfo.setChecksum("some garbage"); chunkInfo.setChecksum("some garbage");
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkList.add(chunkInfo.getProtoBufMessage());
keyData.setChunks(chunkList);
chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
ContainerProtos.Stage.COMBINED); ContainerProtos.Stage.COMBINED);
fail("testWriteChunkChecksumMismatch failed"); fail("testWriteChunkChecksumMismatch failed");
@ -231,5 +245,46 @@ public class TestChunkManagerImpl {
} }
} }
@Test
public void testWriteAndReadChunkMultipleTimes() throws Exception {
for (int i=0; i<100; i++) {
chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
.getLocalID(), i), 0, data.length);
chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data,
ContainerProtos.Stage.COMBINED);
}
checkWriteIOStats(data.length*100, 100);
assertTrue(hddsVolume.getVolumeIOStats().getWriteTime() > 0);
for (int i=0; i<100; i++) {
chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
.getLocalID(), i), 0, data.length);
chunkManager.readChunk(keyValueContainer, blockID, chunkInfo);
}
checkReadIOStats(data.length*100, 100);
assertTrue(hddsVolume.getVolumeIOStats().getReadTime() > 0);
}
/**
* Check WriteIO stats.
* @param length
* @param opCount
*/
private void checkWriteIOStats(long length, long opCount) {
VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats();
assertEquals(length, volumeIOStats.getWriteBytes());
assertEquals(opCount, volumeIOStats.getWriteOpCount());
}
/**
* Check ReadIO stats.
* @param length
* @param opCount
*/
private void checkReadIOStats(long length, long opCount) {
VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats();
assertEquals(length, volumeIOStats.getReadBytes());
assertEquals(opCount, volumeIOStats.getReadOpCount());
}
} }

View File

@ -58,7 +58,7 @@ public class TestKeyManagerImpl {
private KeyValueContainerData keyValueContainerData; private KeyValueContainerData keyValueContainerData;
private KeyValueContainer keyValueContainer; private KeyValueContainer keyValueContainer;
private KeyData keyData; private KeyData keyData;
private KeyManagerImpl keyValueContainerManager; private KeyManagerImpl keyManager;
private BlockID blockID; private BlockID blockID;
@Rule @Rule
@ -98,17 +98,17 @@ public class TestKeyManagerImpl {
keyData.setChunks(chunkList); keyData.setChunks(chunkList);
// Create KeyValueContainerManager // Create KeyValueContainerManager
keyValueContainerManager = new KeyManagerImpl(config); keyManager = new KeyManagerImpl(config);
} }
@Test @Test
public void testPutAndGetKey() throws Exception { public void testPutAndGetKey() throws Exception {
//Put Key //Put Key
keyValueContainerManager.putKey(keyValueContainer, keyData); keyManager.putKey(keyValueContainer, keyData);
//Get Key //Get Key
KeyData fromGetKeyData = keyValueContainerManager.getKey(keyValueContainer, KeyData fromGetKeyData = keyManager.getKey(keyValueContainer,
keyData.getBlockID()); keyData.getBlockID());
assertEquals(keyData.getContainerID(), fromGetKeyData.getContainerID()); assertEquals(keyData.getContainerID(), fromGetKeyData.getContainerID());
@ -124,9 +124,15 @@ public class TestKeyManagerImpl {
public void testDeleteKey() throws Exception { public void testDeleteKey() throws Exception {
try { try {
//Put Key //Put Key
keyValueContainerManager.putKey(keyValueContainer, keyData); keyManager.putKey(keyValueContainer, keyData);
//Delete Key //Delete Key
keyValueContainerManager.deleteKey(keyValueContainer, blockID); keyManager.deleteKey(keyValueContainer, blockID);
try {
keyManager.getKey(keyValueContainer, blockID);
fail("testDeleteKey");
} catch (StorageContainerException ex) {
GenericTestUtils.assertExceptionContains("Unable to find the key", ex);
}
} catch (IOException ex) { } catch (IOException ex) {
fail("testDeleteKey failed"); fail("testDeleteKey failed");
} }
@ -135,8 +141,8 @@ public class TestKeyManagerImpl {
@Test @Test
public void testListKey() throws Exception { public void testListKey() throws Exception {
try { try {
keyValueContainerManager.putKey(keyValueContainer, keyData); keyManager.putKey(keyValueContainer, keyData);
List<KeyData> listKeyData = keyValueContainerManager.listKey( List<KeyData> listKeyData = keyManager.listKey(
keyValueContainer, 1, 10); keyValueContainer, 1, 10);
assertNotNull(listKeyData); assertNotNull(listKeyData);
assertTrue(listKeyData.size() == 1); assertTrue(listKeyData.size() == 1);
@ -151,10 +157,10 @@ public class TestKeyManagerImpl {
.getLocalID(), 0), 0, 1024); .getLocalID(), 0), 0, 1024);
chunkList.add(info.getProtoBufMessage()); chunkList.add(info.getProtoBufMessage());
keyData.setChunks(chunkList); keyData.setChunks(chunkList);
keyValueContainerManager.putKey(keyValueContainer, keyData); keyManager.putKey(keyValueContainer, keyData);
} }
listKeyData = keyValueContainerManager.listKey( listKeyData = keyManager.listKey(
keyValueContainer, 1, 10); keyValueContainer, 1, 10);
assertNotNull(listKeyData); assertNotNull(listKeyData);
assertTrue(listKeyData.size() == 10); assertTrue(listKeyData.size() == 10);
@ -167,7 +173,8 @@ public class TestKeyManagerImpl {
@Test @Test
public void testGetNoSuchKey() throws Exception { public void testGetNoSuchKey() throws Exception {
try { try {
keyValueContainerManager.getKey(keyValueContainer, new BlockID(1L, 2L)); keyData = new KeyData(new BlockID(1L, 2L));
keyManager.getKey(keyValueContainer, new BlockID(1L, 2L));
fail("testGetNoSuchKey failed"); fail("testGetNoSuchKey failed");
} catch (StorageContainerException ex) { } catch (StorageContainerException ex) {
GenericTestUtils.assertExceptionContains("Unable to find the key.", ex); GenericTestUtils.assertExceptionContains("Unable to find the key.", ex);