mirror of https://github.com/apache/druid.git
add missing dynamic coordinator configs (#8090)
This commit is contained in:
parent
15fbf5983d
commit
b80f20f769
|
@ -25,7 +25,6 @@ import { HashRouter, Route, Switch } from 'react-router-dom';
|
|||
|
||||
import { ExternalLink, HeaderActiveTab, HeaderBar, Loader } from './components';
|
||||
import { AppToaster } from './singletons/toaster';
|
||||
import { UrlBaser } from './singletons/url-baser';
|
||||
import { QueryManager } from './utils';
|
||||
import { DRUID_DOCS_API, DRUID_DOCS_SQL } from './variables';
|
||||
import {
|
||||
|
@ -45,9 +44,6 @@ type Capabilities = 'working-with-sql' | 'working-without-sql' | 'broken';
|
|||
|
||||
export interface ConsoleApplicationProps {
|
||||
hideLegacy: boolean;
|
||||
baseURL?: string;
|
||||
customHeaderName?: string;
|
||||
customHeaderValue?: string;
|
||||
}
|
||||
|
||||
export interface ConsoleApplicationState {
|
||||
|
@ -125,14 +121,6 @@ export class ConsoleApplication extends React.PureComponent<
|
|||
capabilitiesLoading: true,
|
||||
};
|
||||
|
||||
if (props.baseURL) {
|
||||
axios.defaults.baseURL = props.baseURL;
|
||||
UrlBaser.baseURL = props.baseURL;
|
||||
}
|
||||
if (props.customHeaderName && props.customHeaderValue) {
|
||||
axios.defaults.headers.common[props.customHeaderName] = props.customHeaderValue;
|
||||
}
|
||||
|
||||
this.capabilitiesQueryManager = new QueryManager({
|
||||
processQuery: async () => {
|
||||
const capabilities = await ConsoleApplication.discoverCapabilities();
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { Intent } from '@blueprintjs/core';
|
||||
import { Code, Intent } from '@blueprintjs/core';
|
||||
import { IconNames } from '@blueprintjs/icons';
|
||||
import axios from 'axios';
|
||||
import React from 'react';
|
||||
|
@ -138,50 +138,153 @@ export class CoordinatorDynamicConfigDialog extends React.PureComponent<
|
|||
{
|
||||
name: 'balancerComputeThreads',
|
||||
type: 'number',
|
||||
defaultValue: 1,
|
||||
info: (
|
||||
<>
|
||||
Thread pool size for computing moving cost of segments in segment balancing.
|
||||
Consider increasing this if you have a lot of segments and moving segments starts
|
||||
to get stuck.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'emitBalancingStats',
|
||||
type: 'boolean',
|
||||
info: (
|
||||
<>
|
||||
Boolean flag for whether or not we should emit balancing stats. This is an
|
||||
expensive operation.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'killAllDataSources',
|
||||
type: 'boolean',
|
||||
info: (
|
||||
<>
|
||||
Send kill tasks for ALL dataSources if property{' '}
|
||||
<Code>druid.coordinator.kill.on</Code> is true. If this is set to true then{' '}
|
||||
<Code>killDataSourceWhitelist</Code> must not be specified or be empty list.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'killDataSourceWhitelist',
|
||||
type: 'string-array',
|
||||
info: (
|
||||
<>
|
||||
List of dataSources for which kill tasks are sent if property{' '}
|
||||
<Code>druid.coordinator.kill.on</Code> is true. This can be a list of
|
||||
comma-separated dataSources or a JSON array.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'killPendingSegmentsSkipList',
|
||||
type: 'string-array',
|
||||
info: (
|
||||
<>
|
||||
List of dataSources for which pendingSegments are NOT cleaned up if property{' '}
|
||||
<Code>druid.coordinator.kill.pendingSegments.on</Code> is true. This can be a list
|
||||
of comma-separated dataSources or a JSON array.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'maxSegmentsInNodeLoadingQueue',
|
||||
type: 'number',
|
||||
defaultValue: 0,
|
||||
info: (
|
||||
<>
|
||||
The maximum number of segments that could be queued for loading to any given
|
||||
server. This parameter could be used to speed up segments loading process,
|
||||
especially if there are "slow" nodes in the cluster (with low loading speed) or if
|
||||
too much segments scheduled to be replicated to some particular node (faster
|
||||
loading could be preferred to better segments distribution). Desired value depends
|
||||
on segments loading speed, acceptable replication time and number of nodes. Value
|
||||
1000 could be a start point for a rather big cluster. Default value is 0 (loading
|
||||
queue is unbounded)
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'maxSegmentsToMove',
|
||||
type: 'number',
|
||||
defaultValue: 5,
|
||||
info: <>The maximum number of segments that can be moved at any given time.</>,
|
||||
},
|
||||
{
|
||||
name: 'mergeBytesLimit',
|
||||
type: 'size-bytes',
|
||||
defaultValue: 524288000,
|
||||
info: <>The maximum total uncompressed size in bytes of segments to merge.</>,
|
||||
},
|
||||
{
|
||||
name: 'mergeSegmentsLimit',
|
||||
type: 'number',
|
||||
defaultValue: 100,
|
||||
info: <>The maximum number of segments that can be in a single append task.</>,
|
||||
},
|
||||
{
|
||||
name: 'millisToWaitBeforeDeleting',
|
||||
type: 'number',
|
||||
defaultValue: 900000,
|
||||
info: (
|
||||
<>
|
||||
How long does the Coordinator need to be active before it can start removing
|
||||
(marking unused) segments in metadata storage.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'replicantLifetime',
|
||||
type: 'number',
|
||||
defaultValue: 15,
|
||||
info: (
|
||||
<>
|
||||
The maximum number of Coordinator runs for a segment to be replicated before we
|
||||
start alerting.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'replicationThrottleLimit',
|
||||
type: 'number',
|
||||
defaultValue: 10,
|
||||
info: <>The maximum number of segments that can be replicated at one time.</>,
|
||||
},
|
||||
{
|
||||
name: 'decommissioningNodes',
|
||||
type: 'string-array',
|
||||
info: (
|
||||
<>
|
||||
List of historical servers to 'decommission'. Coordinator will not assign new
|
||||
segments to 'decommissioning' servers, and segments will be moved away from them
|
||||
to be placed on non-decommissioning servers at the maximum rate specified by{' '}
|
||||
<Code>decommissioningMaxPercentOfMaxSegmentsToMove</Code>.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
name: 'decommissioningMaxPercentOfMaxSegmentsToMove',
|
||||
type: 'number',
|
||||
defaultValue: 70,
|
||||
info: (
|
||||
<>
|
||||
The maximum number of segments that may be moved away from 'decommissioning'
|
||||
servers to non-decommissioning (that is, active) servers during one Coordinator
|
||||
run. This value is relative to the total maximum segment movements allowed during
|
||||
one run which is determined by <Code>maxSegmentsToMove</Code>. If
|
||||
<Code>decommissioningMaxPercentOfMaxSegmentsToMove</Code> is 0, segments will
|
||||
neither be moved from or to 'decommissioning' servers, effectively putting them in
|
||||
a sort of "maintenance" mode that will not participate in balancing or assignment
|
||||
by load rules. Decommissioning can also become stalled if there are no available
|
||||
active servers to place the segments. By leveraging the maximum percent of
|
||||
decommissioning segment movements, an operator can prevent active servers from
|
||||
overload by prioritizing balancing, or decrease decommissioning time instead. The
|
||||
value should be between 0 and 100.
|
||||
</>
|
||||
),
|
||||
},
|
||||
]}
|
||||
model={dynamicConfig}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import axios from 'axios';
|
||||
import 'brace'; // Import Ace editor and all the sub components used in the app
|
||||
import 'brace/ext/language_tools';
|
||||
import 'brace/theme/solarized_dark';
|
||||
|
@ -28,6 +29,7 @@ import './ace-modes/dsql';
|
|||
import './ace-modes/hjson';
|
||||
import './bootstrap/react-table-defaults';
|
||||
import { ConsoleApplication } from './console-application';
|
||||
import { UrlBaser } from './singletons/url-baser';
|
||||
|
||||
import './entry.scss';
|
||||
|
||||
|
@ -40,6 +42,7 @@ interface ConsoleConfig {
|
|||
baseURL?: string;
|
||||
customHeaderName?: string;
|
||||
customHeaderValue?: string;
|
||||
customHeaders?: Record<string, string>;
|
||||
}
|
||||
|
||||
const consoleConfig: ConsoleConfig = (window as any).consoleConfig;
|
||||
|
@ -47,12 +50,20 @@ if (typeof consoleConfig.title === 'string') {
|
|||
window.document.title = consoleConfig.title;
|
||||
}
|
||||
|
||||
if (consoleConfig.baseURL) {
|
||||
axios.defaults.baseURL = consoleConfig.baseURL;
|
||||
UrlBaser.baseURL = consoleConfig.baseURL;
|
||||
}
|
||||
if (consoleConfig.customHeaderName && consoleConfig.customHeaderValue) {
|
||||
axios.defaults.headers.common[consoleConfig.customHeaderName] = consoleConfig.customHeaderValue;
|
||||
}
|
||||
if (consoleConfig.customHeaders) {
|
||||
Object.assign(axios.defaults.headers, consoleConfig.customHeaders);
|
||||
}
|
||||
|
||||
ReactDOM.render(
|
||||
React.createElement(ConsoleApplication, {
|
||||
hideLegacy: Boolean(consoleConfig.hideLegacy),
|
||||
baseURL: consoleConfig.baseURL,
|
||||
customHeaderName: consoleConfig.customHeaderName,
|
||||
customHeaderValue: consoleConfig.customHeaderValue,
|
||||
}) as any,
|
||||
container,
|
||||
);
|
||||
|
|
|
@ -2669,6 +2669,7 @@ export class LoadDataView extends React.PureComponent<LoadDataViewProps, LoadDat
|
|||
)}
|
||||
<Button
|
||||
text="Submit"
|
||||
rightIcon={IconNames.CLOUD_UPLOAD}
|
||||
intent={Intent.PRIMARY}
|
||||
onClick={async () => {
|
||||
if (['index', 'index_parallel'].includes(deepGet(spec, 'type'))) {
|
||||
|
|
Loading…
Reference in New Issue