diff --git a/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentPusher.java b/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentPusher.java index c89ccad2457..8f1056122bb 100644 --- a/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentPusher.java +++ b/extensions-core/hdfs-storage/src/main/java/io/druid/storage/hdfs/HdfsDataSegmentPusher.java @@ -103,14 +103,14 @@ public class HdfsDataSegmentPusher implements DataSegmentPusher final DataSegment dataSegment; try (FSDataOutputStream out = fs.create(tmpFile)) { size = CompressionUtils.zip(inDir, out); + Path outDir = new Path(String.format("%s/%s", config.getStorageDirectory(), storageDir)); dataSegment = createDescriptorFile( - segment.withLoadSpec(makeLoadSpec(tmpFile)) + segment.withLoadSpec(makeLoadSpec(new Path(String.format("%s/%s", outDir.toUri().getPath(), "index.zip")))) .withSize(size) .withBinaryVersion(SegmentUtils.getVersionFromDir(inDir)), tmpFile.getParent(), fs ); - Path outDir = new Path(String.format("%s/%s", config.getStorageDirectory(), storageDir)); // Create parent if it does not exist, recreation is not an error fs.mkdirs(outDir.getParent()); diff --git a/extensions-core/hdfs-storage/src/test/java/io/druid/storage/hdfs/HdfsDataSegmentPusherTest.java b/extensions-core/hdfs-storage/src/test/java/io/druid/storage/hdfs/HdfsDataSegmentPusherTest.java index d1890e99a5c..aa19581fb5b 100644 --- a/extensions-core/hdfs-storage/src/test/java/io/druid/storage/hdfs/HdfsDataSegmentPusherTest.java +++ b/extensions-core/hdfs-storage/src/test/java/io/druid/storage/hdfs/HdfsDataSegmentPusherTest.java @@ -19,6 +19,7 @@ package io.druid.storage.hdfs; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.io.Files; @@ -27,14 +28,10 @@ import io.druid.segment.loading.DataSegmentPusherUtil; import io.druid.timeline.DataSegment; import io.druid.timeline.partition.NoneShardSpec; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.web.resources.ExceptionHandler; import org.joda.time.Interval; -import org.junit.AfterClass; import org.junit.Assert; -import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; -import org.junit.internal.runners.statements.Fail; import org.junit.rules.TemporaryFolder; import java.io.File; @@ -80,7 +77,16 @@ public class HdfsDataSegmentPusherTest DataSegment segment = pusher.push(segmentDir, segmentToPush); Assert.assertEquals(segmentToPush.getSize(), segment.getSize()); - + Assert.assertEquals(segmentToPush, segment); + Assert.assertEquals(ImmutableMap.of( + "type", + "hdfs", + "path", + String.format("%s/%s/index.zip", + config.getStorageDirectory(), + DataSegmentPusherUtil.getHdfsStorageDir(segmentToPush) + ) + ), segment.getLoadSpec()); // rename directory after push final String storageDir = DataSegmentPusherUtil.getHdfsStorageDir(segment); File indexFile = new File(String.format("%s/%s/index.zip", config.getStorageDirectory(), storageDir)); @@ -93,7 +99,8 @@ public class HdfsDataSegmentPusherTest outDir.setReadOnly(); try { pusher.push(segmentDir, segmentToPush); - }catch (IOException e){ + } + catch (IOException e) { Assert.fail("should not throw exception"); } }