Using annotation to distinguish Hadoop Configuration in each module (#9013)

* Multibinding for NodeRole

* Fix endpoints

* fix doc

* fix test

* Using annotation to distinguish Hadoop Configuration in each module
This commit is contained in:
Jihoon Son 2019-12-11 17:30:44 -08:00 committed by GitHub
parent e5e1e9c4ee
commit 66056b2826
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 163 additions and 17 deletions

View File

@ -27,6 +27,7 @@ import org.apache.druid.data.input.FiniteFirehoseFactory;
import org.apache.druid.data.input.InputSplit;
import org.apache.druid.data.input.impl.StringInputRowParser;
import org.apache.druid.data.input.impl.prefetch.PrefetchableTextFilesFirehoseFactory;
import org.apache.druid.guice.Hdfs;
import org.apache.druid.inputsource.hdfs.HdfsInputSource;
import org.apache.druid.storage.hdfs.HdfsDataSegmentPuller;
import org.apache.druid.utils.CompressionUtils;
@ -46,7 +47,7 @@ public class HdfsFirehoseFactory extends PrefetchableTextFilesFirehoseFactory<Pa
@JsonCreator
public HdfsFirehoseFactory(
@JacksonInject Configuration conf,
@JacksonInject @Hdfs Configuration conf,
@JsonProperty("paths") Object inputPaths,
@JsonProperty("maxCacheCapacityBytes") Long maxCacheCapacityBytes,
@JsonProperty("maxFetchCapacityBytes") Long maxFetchCapacityBytes,

View File

@ -0,0 +1,40 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.guice;
import com.google.inject.BindingAnnotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Each extension module needs to properly bind whatever it will use, but sometimes different modules need to bind the
* same class which will lead to the duplicate injection error. To avoid this problem, each module is supposed to bind
* different instances. This is a binding annotation for druid-hdfs-storage extension. Any binding for the same type
* should be distinguished by using this annotation.
*/
@Target({ElementType.FIELD, ElementType.PARAMETER, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
@BindingAnnotation
public @interface Hdfs
{
}

View File

@ -32,6 +32,7 @@ import org.apache.druid.data.input.InputSplit;
import org.apache.druid.data.input.SplitHintSpec;
import org.apache.druid.data.input.impl.InputEntityIteratingReader;
import org.apache.druid.data.input.impl.SplittableInputSource;
import org.apache.druid.guice.Hdfs;
import org.apache.druid.java.util.common.IAE;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@ -72,7 +73,7 @@ public class HdfsInputSource extends AbstractInputSource implements SplittableIn
@JsonCreator
public HdfsInputSource(
@JsonProperty(PROP_PATHS) Object inputPaths,
@JacksonInject Configuration configuration
@JacksonInject @Hdfs Configuration configuration
)
{
this.inputPaths = coerceInputPathsToList(inputPaths, PROP_PATHS);

View File

@ -22,6 +22,7 @@ package org.apache.druid.storage.hdfs;
import com.google.common.base.Preconditions;
import com.google.inject.Inject;
import org.apache.commons.lang.StringUtils;
import org.apache.druid.guice.Hdfs;
import org.apache.druid.java.util.emitter.EmittingLogger;
import org.apache.druid.segment.loading.DataSegmentKiller;
import org.apache.druid.segment.loading.SegmentLoadingException;
@ -43,7 +44,7 @@ public class HdfsDataSegmentKiller implements DataSegmentKiller
private final Path storageDirectory;
@Inject
public HdfsDataSegmentKiller(final Configuration config, final HdfsDataSegmentPusherConfig pusherConfig)
public HdfsDataSegmentKiller(@Hdfs final Configuration config, final HdfsDataSegmentPusherConfig pusherConfig)
{
this.config = config;
this.storageDirectory = new Path(pusherConfig.getStorageDirectory());

View File

@ -22,6 +22,7 @@ package org.apache.druid.storage.hdfs;
import com.google.common.base.Predicate;
import com.google.common.io.ByteSource;
import com.google.inject.Inject;
import org.apache.druid.guice.Hdfs;
import org.apache.druid.java.util.common.FileUtils;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.RetryUtils;
@ -178,7 +179,7 @@ public class HdfsDataSegmentPuller implements URIDataPuller
protected final Configuration config;
@Inject
public HdfsDataSegmentPuller(final Configuration config)
public HdfsDataSegmentPuller(@Hdfs final Configuration config)
{
this.config = config;
}

View File

@ -26,6 +26,7 @@ import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableMap;
import com.google.inject.Inject;
import org.apache.druid.common.utils.UUIDUtils;
import org.apache.druid.guice.Hdfs;
import org.apache.druid.java.util.common.IOE;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.logger.Logger;
@ -60,7 +61,11 @@ public class HdfsDataSegmentPusher implements DataSegmentPusher
private final Supplier<String> fullyQualifiedStorageDirectory;
@Inject
public HdfsDataSegmentPusher(HdfsDataSegmentPusherConfig config, Configuration hadoopConfig, ObjectMapper jsonMapper)
public HdfsDataSegmentPusher(
HdfsDataSegmentPusherConfig config,
@Hdfs Configuration hadoopConfig,
ObjectMapper jsonMapper
)
{
this.hadoopConfig = hadoopConfig;
this.jsonMapper = jsonMapper;

View File

@ -21,6 +21,7 @@ package org.apache.druid.storage.hdfs;
import com.google.inject.Inject;
import org.apache.druid.data.SearchableVersionedDataFinder;
import org.apache.druid.guice.Hdfs;
import org.apache.druid.java.util.common.RetryUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
@ -36,7 +37,7 @@ import java.util.regex.Pattern;
public class HdfsFileTimestampVersionFinder extends HdfsDataSegmentPuller implements SearchableVersionedDataFinder<URI>
{
@Inject
public HdfsFileTimestampVersionFinder(Configuration config)
public HdfsFileTimestampVersionFinder(@Hdfs Configuration config)
{
super(config);
}

View File

@ -22,6 +22,7 @@ package org.apache.druid.storage.hdfs;
import com.google.common.base.Strings;
import com.google.inject.Inject;
import org.apache.druid.guice.Hdfs;
import org.apache.druid.guice.ManageLifecycle;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.lifecycle.LifecycleStart;
@ -40,7 +41,7 @@ public class HdfsStorageAuthentication
private final Configuration hadoopConf;
@Inject
public HdfsStorageAuthentication(HdfsKerberosConfig hdfsKerberosConfig, Configuration hadoopConf)
public HdfsStorageAuthentication(HdfsKerberosConfig hdfsKerberosConfig, @Hdfs Configuration hadoopConf)
{
this.hdfsKerberosConfig = hdfsKerberosConfig;
this.hadoopConf = hadoopConf;

View File

@ -28,6 +28,7 @@ import com.google.inject.multibindings.MapBinder;
import org.apache.druid.data.SearchableVersionedDataFinder;
import org.apache.druid.firehose.hdfs.HdfsFirehoseFactory;
import org.apache.druid.guice.Binders;
import org.apache.druid.guice.Hdfs;
import org.apache.druid.guice.JsonConfigProvider;
import org.apache.druid.guice.LazySingleton;
import org.apache.druid.guice.LifecycleModule;
@ -108,7 +109,7 @@ public class HdfsStorageDruidModule implements DruidModule
}
}
binder.bind(Configuration.class).toInstance(conf);
binder.bind(Configuration.class).annotatedWith(Hdfs.class).toInstance(conf);
JsonConfigProvider.bind(binder, "druid.storage", HdfsDataSegmentPusherConfig.class);
Binders.taskLogsBinder(binder).addBinding("hdfs").to(HdfsTaskLogs.class);
@ -117,6 +118,5 @@ public class HdfsStorageDruidModule implements DruidModule
JsonConfigProvider.bind(binder, "druid.hadoop.security.kerberos", HdfsKerberosConfig.class);
binder.bind(HdfsStorageAuthentication.class).in(ManageLifecycle.class);
LifecycleModule.register(binder, HdfsStorageAuthentication.class);
}
}

View File

@ -23,6 +23,7 @@ import com.google.common.base.Optional;
import com.google.common.io.ByteSource;
import com.google.common.io.ByteStreams;
import com.google.inject.Inject;
import org.apache.druid.guice.Hdfs;
import org.apache.druid.java.util.common.IOE;
import org.apache.druid.java.util.common.logger.Logger;
import org.apache.druid.tasklogs.TaskLogs;
@ -51,7 +52,7 @@ public class HdfsTaskLogs implements TaskLogs
private final Configuration hadoopConfig;
@Inject
public HdfsTaskLogs(HdfsTaskLogsConfig config, Configuration hadoopConfig)
public HdfsTaskLogs(HdfsTaskLogsConfig config, @Hdfs Configuration hadoopConfig)
{
this.config = config;
this.hadoopConfig = hadoopConfig;

View File

@ -24,6 +24,7 @@ import com.fasterxml.jackson.databind.jsontype.NamedType;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.google.inject.Binder;
import com.google.inject.Inject;
import org.apache.druid.data.input.orc.guice.Orc;
import org.apache.druid.initialization.DruidModule;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@ -89,6 +90,6 @@ public class OrcExtensionsModule implements DruidModule
}
}
binder.bind(Configuration.class).toInstance(conf);
binder.bind(Configuration.class).annotatedWith(Orc.class).toInstance(conf);
}
}

View File

@ -26,6 +26,7 @@ import org.apache.druid.data.input.InputEntity;
import org.apache.druid.data.input.InputEntityReader;
import org.apache.druid.data.input.InputRowSchema;
import org.apache.druid.data.input.impl.NestedInputFormat;
import org.apache.druid.data.input.orc.guice.Orc;
import org.apache.druid.java.util.common.parsers.JSONPathSpec;
import org.apache.hadoop.conf.Configuration;
@ -42,7 +43,7 @@ public class OrcInputFormat extends NestedInputFormat
public OrcInputFormat(
@JsonProperty("flattenSpec") @Nullable JSONPathSpec flattenSpec,
@JsonProperty("binaryAsString") @Nullable Boolean binaryAsString,
@JacksonInject Configuration conf
@JacksonInject @Orc Configuration conf
)
{
super(flattenSpec);

View File

@ -0,0 +1,40 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.data.input.orc.guice;
import com.google.inject.BindingAnnotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Each extension module needs to properly bind whatever it will use, but sometimes different modules need to bind the
* same class which will lead to the duplicate injection error. To avoid this problem, each module is supposed to bind
* different instances. This is a binding annotation for druid-orc-extensions extension. Any binding for the same type
* should be distinguished by using this annotation.
*/
@Target({ElementType.FIELD, ElementType.PARAMETER, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
@BindingAnnotation
public @interface Orc
{
}

View File

@ -25,6 +25,7 @@ import com.fasterxml.jackson.databind.module.SimpleModule;
import com.google.inject.Binder;
import com.google.inject.Inject;
import org.apache.druid.data.input.parquet.avro.ParquetAvroHadoopInputRowParser;
import org.apache.druid.data.input.parquet.guice.Parquet;
import org.apache.druid.data.input.parquet.simple.ParquetHadoopInputRowParser;
import org.apache.druid.data.input.parquet.simple.ParquetParseSpec;
import org.apache.druid.initialization.DruidModule;
@ -98,6 +99,6 @@ public class ParquetExtensionsModule implements DruidModule
}
}
binder.bind(Configuration.class).toInstance(conf);
binder.bind(Configuration.class).annotatedWith(Parquet.class).toInstance(conf);
}
}

View File

@ -19,13 +19,16 @@
package org.apache.druid.data.input.parquet;
import com.fasterxml.jackson.annotation.JacksonInject;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.druid.data.input.InputEntity;
import org.apache.druid.data.input.InputEntityReader;
import org.apache.druid.data.input.InputRowSchema;
import org.apache.druid.data.input.impl.NestedInputFormat;
import org.apache.druid.data.input.parquet.guice.Parquet;
import org.apache.druid.java.util.common.parsers.JSONPathSpec;
import org.apache.hadoop.conf.Configuration;
import javax.annotation.Nullable;
import java.io.File;
@ -35,15 +38,18 @@ import java.util.Objects;
public class ParquetInputFormat extends NestedInputFormat
{
private final boolean binaryAsString;
private final Configuration conf;
@JsonCreator
public ParquetInputFormat(
@JsonProperty("flattenSpec") @Nullable JSONPathSpec flattenSpec,
@JsonProperty("binaryAsString") @Nullable Boolean binaryAsString
@JsonProperty("binaryAsString") @Nullable Boolean binaryAsString,
@JacksonInject @Parquet Configuration conf
)
{
super(flattenSpec);
this.binaryAsString = binaryAsString == null ? false : binaryAsString;
this.conf = conf;
}
@JsonProperty
@ -65,7 +71,7 @@ public class ParquetInputFormat extends NestedInputFormat
File temporaryDirectory
) throws IOException
{
return new ParquetReader(inputRowSchema, source, temporaryDirectory, getFlattenSpec(), binaryAsString);
return new ParquetReader(conf, inputRowSchema, source, temporaryDirectory, getFlattenSpec(), binaryAsString);
}
@Override

View File

@ -31,6 +31,7 @@ import org.apache.druid.java.util.common.parsers.JSONPathSpec;
import org.apache.druid.java.util.common.parsers.ObjectFlattener;
import org.apache.druid.java.util.common.parsers.ObjectFlatteners;
import org.apache.druid.java.util.common.parsers.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.hadoop.example.GroupReadSupport;
@ -51,6 +52,7 @@ public class ParquetReader extends IntermediateRowParsingReader<Group>
private final Closer closer;
ParquetReader(
Configuration conf,
InputRowSchema inputRowSchema,
InputEntity source,
File temporaryDirectory,
@ -69,7 +71,9 @@ public class ParquetReader extends IntermediateRowParsingReader<Group>
final ClassLoader currentClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
reader = closer.register(org.apache.parquet.hadoop.ParquetReader.builder(new GroupReadSupport(), path).build());
reader = closer.register(org.apache.parquet.hadoop.ParquetReader.builder(new GroupReadSupport(), path)
.withConf(conf)
.build());
}
finally {
Thread.currentThread().setContextClassLoader(currentClassLoader);

View File

@ -0,0 +1,40 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.data.input.parquet.guice;
import com.google.inject.BindingAnnotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Each extension module needs to properly bind whatever it will use, but sometimes different modules need to bind the
* same class which will lead to the duplicate injection error. To avoid this problem, each module is supposed to bind
* different instances. This is a binding annotation for druid-parquet-extensions extension. Any binding for the same
* type should be distinguished by using this annotation.
*/
@Target({ElementType.FIELD, ElementType.PARAMETER, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
@BindingAnnotation
public @interface Parquet
{
}

View File

@ -28,6 +28,7 @@ import org.apache.druid.data.input.InputRowSchema;
import org.apache.druid.data.input.impl.FileEntity;
import org.apache.druid.java.util.common.parsers.CloseableIterator;
import org.apache.druid.java.util.common.parsers.JSONPathSpec;
import org.apache.hadoop.conf.Configuration;
import java.io.File;
import java.io.IOException;
@ -51,7 +52,7 @@ class BaseParquetReaderTest
) throws IOException
{
FileEntity entity = new FileEntity(new File(parquetFile));
ParquetInputFormat parquet = new ParquetInputFormat(flattenSpec, binaryAsString);
ParquetInputFormat parquet = new ParquetInputFormat(flattenSpec, binaryAsString, new Configuration());
return parquet.createReader(schema, entity, null);
}