mirror of https://github.com/apache/druid.git
remove unnecessary log that pontetially take up lots of heap (#5286)
This commit is contained in:
parent
cc32640642
commit
d54330be76
|
@ -573,13 +573,6 @@ public class HadoopDruidIndexerConfig
|
|||
|
||||
public void verify()
|
||||
{
|
||||
try {
|
||||
log.info("Running with config:%n%s", JSON_MAPPER.writerWithDefaultPrettyPrinter().writeValueAsString(this));
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
|
||||
Preconditions.checkNotNull(schema.getDataSchema().getDataSource(), "dataSource");
|
||||
Preconditions.checkNotNull(schema.getDataSchema().getParser().getParseSpec(), "parseSpec");
|
||||
Preconditions.checkNotNull(schema.getDataSchema().getParser().getParseSpec().getTimestampSpec(), "timestampSpec");
|
||||
|
|
|
@ -81,7 +81,10 @@ public class DatasourceInputFormat extends InputFormat<NullWritable, InputRow>
|
|||
throw new ISE("No segments found to read");
|
||||
}
|
||||
|
||||
logger.info("segments to read [%s]", segmentsStr);
|
||||
// Note: log is splitted into two lines so that a new String is not generated to print it.
|
||||
// segmentsStr could be quite large when re-indexing multiple months of data.
|
||||
logger.info("Segment to read are...");
|
||||
logger.info(segmentsStr);
|
||||
|
||||
long maxSize = conf.getLong(CONF_MAX_SPLIT_SIZE, 0);
|
||||
if (maxSize < 0) {
|
||||
|
|
Loading…
Reference in New Issue