mirror of https://github.com/apache/druid.git
fix datasegment metadata (#3555)
This commit is contained in:
parent
078de4fcf9
commit
c255dd8b19
|
@ -103,14 +103,14 @@ public class HdfsDataSegmentPusher implements DataSegmentPusher
|
|||
final DataSegment dataSegment;
|
||||
try (FSDataOutputStream out = fs.create(tmpFile)) {
|
||||
size = CompressionUtils.zip(inDir, out);
|
||||
Path outDir = new Path(String.format("%s/%s", config.getStorageDirectory(), storageDir));
|
||||
dataSegment = createDescriptorFile(
|
||||
segment.withLoadSpec(makeLoadSpec(tmpFile))
|
||||
segment.withLoadSpec(makeLoadSpec(new Path(String.format("%s/%s", outDir.toUri().getPath(), "index.zip"))))
|
||||
.withSize(size)
|
||||
.withBinaryVersion(SegmentUtils.getVersionFromDir(inDir)),
|
||||
tmpFile.getParent(),
|
||||
fs
|
||||
);
|
||||
Path outDir = new Path(String.format("%s/%s", config.getStorageDirectory(), storageDir));
|
||||
|
||||
// Create parent if it does not exist, recreation is not an error
|
||||
fs.mkdirs(outDir.getParent());
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package io.druid.storage.hdfs;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.io.Files;
|
||||
|
@ -27,14 +28,10 @@ import io.druid.segment.loading.DataSegmentPusherUtil;
|
|||
import io.druid.timeline.DataSegment;
|
||||
import io.druid.timeline.partition.NoneShardSpec;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
|
||||
import org.joda.time.Interval;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.internal.runners.statements.Fail;
|
||||
import org.junit.rules.TemporaryFolder;
|
||||
|
||||
import java.io.File;
|
||||
|
@ -80,7 +77,16 @@ public class HdfsDataSegmentPusherTest
|
|||
DataSegment segment = pusher.push(segmentDir, segmentToPush);
|
||||
|
||||
Assert.assertEquals(segmentToPush.getSize(), segment.getSize());
|
||||
|
||||
Assert.assertEquals(segmentToPush, segment);
|
||||
Assert.assertEquals(ImmutableMap.of(
|
||||
"type",
|
||||
"hdfs",
|
||||
"path",
|
||||
String.format("%s/%s/index.zip",
|
||||
config.getStorageDirectory(),
|
||||
DataSegmentPusherUtil.getHdfsStorageDir(segmentToPush)
|
||||
)
|
||||
), segment.getLoadSpec());
|
||||
// rename directory after push
|
||||
final String storageDir = DataSegmentPusherUtil.getHdfsStorageDir(segment);
|
||||
File indexFile = new File(String.format("%s/%s/index.zip", config.getStorageDirectory(), storageDir));
|
||||
|
@ -93,7 +99,8 @@ public class HdfsDataSegmentPusherTest
|
|||
outDir.setReadOnly();
|
||||
try {
|
||||
pusher.push(segmentDir, segmentToPush);
|
||||
}catch (IOException e){
|
||||
}
|
||||
catch (IOException e) {
|
||||
Assert.fail("should not throw exception");
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue