Avoid using Guava in DataSegmentPushers because of incompatibilities (#4391)

* Avoid using Guava in DataSegmentPushers because of Hadoop incompatibilities

* Clarify comments
This commit is contained in:
Roman Leventov 2017-06-12 11:58:34 -05:00 committed by Charles Allen
parent 5285eb961b
commit c121845102
4 changed files with 17 additions and 19 deletions

View File

@ -23,7 +23,6 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import com.google.inject.Inject;
import io.druid.java.util.common.CompressionUtils;
import io.druid.java.util.common.logger.Logger;
import io.druid.segment.SegmentUtils;
@ -32,9 +31,9 @@ import io.druid.timeline.DataSegment;
import org.jclouds.rackspace.cloudfiles.v1.CloudFilesApi;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.nio.file.Files;
import java.util.Map;
import java.util.concurrent.Callable;
@ -102,9 +101,9 @@ public class CloudFilesDataSegmentPusher implements DataSegmentPusher
log.info("Pushing %s.", segmentData.getPath());
objectApi.put(segmentData);
try (FileOutputStream stream = new FileOutputStream(descFile)) {
stream.write(jsonMapper.writeValueAsBytes(inSegment));
}
// Avoid using Guava in DataSegmentPushers because they might be used with very diverse Guava versions in
// runtime, and because Guava deletes methods over time, that causes incompatibilities.
Files.write(descFile.toPath(), jsonMapper.writeValueAsBytes(inSegment));
CloudFilesObject descriptorData = new CloudFilesObject(
segmentPath, descFile,
objectApi.getRegion(), objectApi.getContainer()

View File

@ -34,9 +34,9 @@ import io.druid.timeline.DataSegment;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.nio.file.Files;
import java.util.Map;
public class GoogleDataSegmentPusher implements DataSegmentPusher
@ -78,10 +78,9 @@ public class GoogleDataSegmentPusher implements DataSegmentPusher
throws IOException
{
File descriptorFile = File.createTempFile("descriptor", ".json");
try (FileOutputStream stream = new FileOutputStream(descriptorFile)) {
stream.write(jsonMapper.writeValueAsBytes(segment));
}
// Avoid using Guava in DataSegmentPushers because they might be used with very diverse Guava versions in
// runtime, and because Guava deletes methods over time, that causes incompatibilities.
Files.write(descriptorFile.toPath(), jsonMapper.writeValueAsBytes(segment));
return descriptorFile;
}

View File

@ -22,11 +22,8 @@ package io.druid.storage.s3;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.ByteStreams;
import com.google.common.io.Files;
import com.google.inject.Inject;
import com.metamx.emitter.EmittingLogger;
import io.druid.java.util.common.CompressionUtils;
import io.druid.segment.SegmentUtils;
import io.druid.segment.loading.DataSegmentPusher;
@ -39,6 +36,7 @@ import org.jets3t.service.model.S3Object;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.nio.file.Files;
import java.util.Map;
import java.util.concurrent.Callable;
@ -116,7 +114,9 @@ public class S3DataSegmentPusher implements DataSegmentPusher
.withBinaryVersion(SegmentUtils.getVersionFromDir(indexFilesDir));
File descriptorFile = File.createTempFile("druid", "descriptor.json");
Files.copy(ByteStreams.newInputStreamSupplier(jsonMapper.writeValueAsBytes(outSegment)), descriptorFile);
// Avoid using Guava in DataSegmentPushers because they might be used with very diverse Guava versions in
// runtime, and because Guava deletes methods over time, that causes incompatibilities.
Files.write(descriptorFile.toPath(), jsonMapper.writeValueAsBytes(outSegment));
S3Object descriptorObject = new S3Object(descriptorFile);
descriptorObject.setBucketName(outputBucket);
descriptorObject.setKey(s3DescriptorPath);

View File

@ -21,10 +21,7 @@ package io.druid.segment.loading;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.ByteStreams;
import com.google.common.io.Files;
import com.google.inject.Inject;
import io.druid.java.util.common.CompressionUtils;
import io.druid.java.util.common.logger.Logger;
import io.druid.segment.SegmentUtils;
@ -35,6 +32,7 @@ import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.Files;
import java.util.Map;
import java.util.UUID;
@ -110,7 +108,7 @@ public class LocalDataSegmentPusher implements DataSegmentPusher
// will be failed and will read the descriptor.json created by current push operation directly
FileUtils.forceMkdir(outDir.getParentFile());
try {
java.nio.file.Files.move(tmpOutDir.toPath(), outDir.toPath());
Files.move(tmpOutDir.toPath(), outDir.toPath());
}
catch (FileAlreadyExistsException e) {
log.warn("Push destination directory[%s] exists, ignore this message if replication is configured.", outDir);
@ -143,7 +141,9 @@ public class LocalDataSegmentPusher implements DataSegmentPusher
{
File descriptorFile = new File(outDir, "descriptor.json");
log.info("Creating descriptor file at[%s]", descriptorFile);
Files.copy(ByteStreams.newInputStreamSupplier(jsonMapper.writeValueAsBytes(segment)), descriptorFile);
// Avoid using Guava in DataSegmentPushers because they might be used with very diverse Guava versions in
// runtime, and because Guava deletes methods over time, that causes incompatibilities.
Files.write(descriptorFile.toPath(), jsonMapper.writeValueAsBytes(segment));
return segment;
}
}