fix logging if millisecond value in load field data
This commit is contained in:
parent
82b36e5bb8
commit
0de8c06900
|
@ -36,6 +36,7 @@ import org.elasticsearch.index.settings.IndexSettings;
|
|||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -130,7 +131,7 @@ public abstract class AbstractConcurrentMapFieldDataCache extends AbstractIndexC
|
|||
fieldDataCache.put(fieldName, fieldData);
|
||||
long took = System.nanoTime() - time;
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("loaded field [{}] for reader [{}], took [{}], took_millis [{}]", fieldName, reader, TimeValue.timeValueNanos(took), took / 1000);
|
||||
logger.trace("loaded field [{}] for reader [{}], took [{}], took_millis [{}]", fieldName, reader, TimeValue.timeValueNanos(took), TimeUnit.NANOSECONDS.toMillis(took));
|
||||
}
|
||||
} catch (OutOfMemoryError e) {
|
||||
logger.warn("loading field [" + fieldName + "] caused out of memory failure", e);
|
||||
|
|
Loading…
Reference in New Issue