HDDS-2266. Avoid evaluation of LOG.trace and LOG.debug statement in the read/write path. (#1633)
This commit is contained in:
parent
eeb58a07e2
commit
a031388a2e
|
@ -41,8 +41,7 @@ import java.util.stream.Collectors;
|
|||
*/
|
||||
public final class Pipeline {
|
||||
|
||||
private static final Logger LOG = LoggerFactory
|
||||
.getLogger(Pipeline.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(Pipeline.class);
|
||||
private final PipelineID id;
|
||||
private final ReplicationType type;
|
||||
private final ReplicationFactor factor;
|
||||
|
|
|
@ -193,10 +193,12 @@ public class BlockOutputStreamEntryPool {
|
|||
.setPipeline(streamEntry.getPipeline()).build();
|
||||
locationInfoList.add(info);
|
||||
}
|
||||
LOG.debug(
|
||||
"block written " + streamEntry.getBlockID() + ", length " + length
|
||||
+ " bcsID " + streamEntry.getBlockID()
|
||||
.getBlockCommitSequenceId());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(
|
||||
"block written " + streamEntry.getBlockID() + ", length " + length
|
||||
+ " bcsID " + streamEntry.getBlockID()
|
||||
.getBlockCommitSequenceId());
|
||||
}
|
||||
}
|
||||
return locationInfoList;
|
||||
}
|
||||
|
|
|
@ -97,8 +97,10 @@ public class KeyInputStream extends InputStream implements Seekable {
|
|||
long keyLength = 0;
|
||||
for (int i = 0; i < blockInfos.size(); i++) {
|
||||
OmKeyLocationInfo omKeyLocationInfo = blockInfos.get(i);
|
||||
LOG.debug("Adding stream for accessing {}. The stream will be " +
|
||||
"initialized later.", omKeyLocationInfo);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Adding stream for accessing {}. The stream will be " +
|
||||
"initialized later.", omKeyLocationInfo);
|
||||
}
|
||||
|
||||
addStream(omKeyLocationInfo, xceiverClientManager,
|
||||
verifyChecksum);
|
||||
|
|
|
@ -439,10 +439,14 @@ public class RpcClient implements ClientProtocol {
|
|||
ozoneManagerClient.getDelegationToken(renewer);
|
||||
if (token != null) {
|
||||
token.setService(dtService);
|
||||
LOG.debug("Created token {} for dtService {}", token, dtService);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Created token {} for dtService {}", token, dtService);
|
||||
}
|
||||
} else {
|
||||
LOG.debug("Cannot get ozone delegation token for renewer {} to access " +
|
||||
"service {}", renewer, dtService);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Cannot get ozone delegation token for renewer {} to " +
|
||||
"access service {}", renewer, dtService);
|
||||
}
|
||||
}
|
||||
return token;
|
||||
}
|
||||
|
|
|
@ -75,7 +75,9 @@ public class S3SecretManagerImpl implements S3SecretManager {
|
|||
} finally {
|
||||
omMetadataManager.getLock().releaseLock(S3_SECRET_LOCK, kerberosID);
|
||||
}
|
||||
LOG.trace("Secret for accessKey:{}, proto:{}", kerberosID, result);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Secret for accessKey:{}, proto:{}", kerberosID, result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -214,8 +214,10 @@ public class OMFailoverProxyProvider implements
|
|||
@Override
|
||||
public void performFailover(OzoneManagerProtocolPB currentProxy) {
|
||||
int newProxyIndex = incrementProxyIndex();
|
||||
LOG.debug("Failing over OM proxy to index: {}, nodeId: {}",
|
||||
newProxyIndex, omNodeIDList.get(newProxyIndex));
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Failing over OM proxy to index: {}, nodeId: {}",
|
||||
newProxyIndex, omNodeIDList.get(newProxyIndex));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -61,7 +61,9 @@ public final class OMRatisHelper {
|
|||
*/
|
||||
public static RaftClient newRaftClient(RpcType rpcType, String omId, RaftGroup
|
||||
group, RetryPolicy retryPolicy, Configuration conf) {
|
||||
LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, omId, group);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, omId, group);
|
||||
}
|
||||
final RaftProperties properties = new RaftProperties();
|
||||
RaftConfigKeys.Rpc.setType(properties, rpcType);
|
||||
|
||||
|
|
|
@ -168,8 +168,10 @@ public class OzoneManagerLock {
|
|||
throw new RuntimeException(errorMessage);
|
||||
} else {
|
||||
lockFn.accept(resourceName);
|
||||
LOG.debug("Acquired {} {} lock on resource {}", lockType, resource.name,
|
||||
resourceName);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Acquired {} {} lock on resource {}", lockType, resource.name,
|
||||
resourceName);
|
||||
}
|
||||
lockSet.set(resource.setLock(lockSet.get()));
|
||||
return true;
|
||||
}
|
||||
|
@ -264,8 +266,10 @@ public class OzoneManagerLock {
|
|||
throw ex;
|
||||
}
|
||||
}
|
||||
LOG.debug("Acquired Write {} lock on resource {} and {}", resource.name,
|
||||
firstUser, secondUser);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Acquired Write {} lock on resource {} and {}", resource.name,
|
||||
firstUser, secondUser);
|
||||
}
|
||||
lockSet.set(resource.setLock(lockSet.get()));
|
||||
return true;
|
||||
}
|
||||
|
@ -300,8 +304,10 @@ public class OzoneManagerLock {
|
|||
manager.writeUnlock(firstUser);
|
||||
manager.writeUnlock(secondUser);
|
||||
}
|
||||
LOG.debug("Release Write {} lock on resource {} and {}", resource.name,
|
||||
firstUser, secondUser);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Release Write {} lock on resource {} and {}", resource.name,
|
||||
firstUser, secondUser);
|
||||
}
|
||||
lockSet.set(resource.clearLock(lockSet.get()));
|
||||
}
|
||||
|
||||
|
@ -352,8 +358,10 @@ public class OzoneManagerLock {
|
|||
// locks, as some locks support acquiring lock again.
|
||||
lockFn.accept(resourceName);
|
||||
// clear lock
|
||||
LOG.debug("Release {} {}, lock on resource {}", lockType, resource.name,
|
||||
resourceName);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Release {} {}, lock on resource {}", lockType, resource.name,
|
||||
resourceName);
|
||||
}
|
||||
lockSet.set(resource.clearLock(lockSet.get()));
|
||||
}
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ public class OzoneBlockTokenSecretManager extends
|
|||
if (LOG.isTraceEnabled()) {
|
||||
long expiryTime = tokenIdentifier.getExpiryDate();
|
||||
String tokenId = tokenIdentifier.toString();
|
||||
LOG.trace("Issued delegation token -> expiryTime:{},tokenId:{}",
|
||||
LOG.trace("Issued delegation token -> expiryTime:{}, tokenId:{}",
|
||||
expiryTime, tokenId);
|
||||
}
|
||||
// Pass blockId as service.
|
||||
|
|
|
@ -289,8 +289,10 @@ public class OzoneDelegationTokenSecretManager
|
|||
String canceller) throws IOException {
|
||||
OzoneTokenIdentifier id = OzoneTokenIdentifier.readProtoBuf(
|
||||
token.getIdentifier());
|
||||
LOG.debug("Token cancellation requested for identifier: {}",
|
||||
formatTokenId(id));
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Token cancellation requested for identifier: {}",
|
||||
formatTokenId(id));
|
||||
}
|
||||
|
||||
if (id.getUser() == null) {
|
||||
throw new InvalidToken("Token with no owner " + formatTokenId(id));
|
||||
|
|
|
@ -43,9 +43,13 @@ public class OzoneDelegationTokenSelector
|
|||
@Override
|
||||
public Token<OzoneTokenIdentifier> selectToken(Text service,
|
||||
Collection<Token<? extends TokenIdentifier>> tokens) {
|
||||
LOG.trace("Getting token for service {}", service);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Getting token for service {}", service);
|
||||
}
|
||||
Token token = getSelectedTokens(service, tokens);
|
||||
LOG.debug("Got tokens: {} for service {}", token, service);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Got tokens: {} for service {}", token, service);
|
||||
}
|
||||
return token;
|
||||
}
|
||||
|
||||
|
|
|
@ -110,8 +110,10 @@ public abstract class OzoneSecretManager<T extends TokenIdentifier>
|
|||
|
||||
@Override
|
||||
public byte[] createPassword(T identifier) {
|
||||
logger.debug("Creating password for identifier: {}, currentKey: {}",
|
||||
formatTokenId(identifier), currentKey.getKeyId());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Creating password for identifier: {}, currentKey: {}",
|
||||
formatTokenId(identifier), currentKey.getKeyId());
|
||||
}
|
||||
byte[] password = null;
|
||||
try {
|
||||
password = createPassword(identifier.getBytes(),
|
||||
|
|
|
@ -570,8 +570,10 @@ public class BucketManagerImpl implements BucketManager {
|
|||
}
|
||||
boolean hasAccess = OzoneAclUtil.checkAclRights(bucketInfo.getAcls(),
|
||||
context);
|
||||
LOG.debug("user:{} has access rights for bucket:{} :{} ",
|
||||
context.getClientUgi(), ozObject.getBucketName(), hasAccess);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("user:{} has access rights for bucket:{} :{} ",
|
||||
context.getClientUgi(), ozObject.getBucketName(), hasAccess);
|
||||
}
|
||||
return hasAccess;
|
||||
} catch (IOException ex) {
|
||||
if(ex instanceof OMException) {
|
||||
|
|
|
@ -1661,8 +1661,10 @@ public class KeyManagerImpl implements KeyManager {
|
|||
if (keyInfo == null) {
|
||||
// the key does not exist, but it is a parent "dir" of some key
|
||||
// let access be determined based on volume/bucket/prefix ACL
|
||||
LOG.debug("key:{} is non-existent parent, permit access to user:{}",
|
||||
keyName, context.getClientUgi());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("key:{} is non-existent parent, permit access to user:{}",
|
||||
keyName, context.getClientUgi());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} catch (OMException e) {
|
||||
|
@ -1678,8 +1680,10 @@ public class KeyManagerImpl implements KeyManager {
|
|||
|
||||
boolean hasAccess = OzoneAclUtil.checkAclRight(
|
||||
keyInfo.getAcls(), context);
|
||||
LOG.debug("user:{} has access rights for key:{} :{} ",
|
||||
context.getClientUgi(), ozObject.getKeyName(), hasAccess);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("user:{} has access rights for key:{} :{} ",
|
||||
context.getClientUgi(), ozObject.getKeyName(), hasAccess);
|
||||
}
|
||||
return hasAccess;
|
||||
} catch (IOException ex) {
|
||||
if(ex instanceof OMException) {
|
||||
|
@ -1766,10 +1770,11 @@ public class KeyManagerImpl implements KeyManager {
|
|||
if (keys.iterator().hasNext()) {
|
||||
return new OzoneFileStatus(keyName);
|
||||
}
|
||||
|
||||
LOG.debug("Unable to get file status for the key: volume:" + volumeName +
|
||||
" bucket:" + bucketName + " key:" + keyName + " with error no " +
|
||||
"such file exists:");
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Unable to get file status for the key: volume: {}, bucket:" +
|
||||
" {}, key: {}, with error: No such file exists.", volumeName,
|
||||
bucketName, keyName);
|
||||
}
|
||||
throw new OMException("Unable to get file status: volume: " +
|
||||
volumeName + " bucket: " + bucketName + " key: " + keyName,
|
||||
FILE_NOT_FOUND);
|
||||
|
@ -2132,8 +2137,10 @@ public class KeyManagerImpl implements KeyManager {
|
|||
List<DatanodeDetails> sortedNodes = scmClient.getBlockClient()
|
||||
.sortDatanodes(nodeList, clientMachine);
|
||||
k.getPipeline().setNodesInOrder(sortedNodes);
|
||||
LOG.debug("Sort datanodes {} for client {}, return {}", nodes,
|
||||
clientMachine, sortedNodes);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Sort datanodes {} for client {}, return {}", nodes,
|
||||
clientMachine, sortedNodes);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to sort datanodes based on distance to " +
|
||||
"client, volume=" + keyInfo.getVolumeName() +
|
||||
|
|
|
@ -88,7 +88,9 @@ public class OpenKeyCleanupService extends BackgroundService {
|
|||
if (result.isSuccess()) {
|
||||
try {
|
||||
keyManager.deleteExpiredOpenKey(result.getObjectKey());
|
||||
LOG.debug("Key {} deleted from OM DB", result.getObjectKey());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Key {} deleted from OM DB", result.getObjectKey());
|
||||
}
|
||||
deletedSize += 1;
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Failed to delete hanging-open key {}",
|
||||
|
|
|
@ -734,10 +734,12 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
|
|||
|
||||
if (SecurityUtil.getAuthenticationMethod(conf).equals(
|
||||
AuthenticationMethod.KERBEROS)) {
|
||||
LOG.debug("Ozone security is enabled. Attempting login for OM user. "
|
||||
+ "Principal: {},keytab: {}", conf.get(
|
||||
OZONE_OM_KERBEROS_PRINCIPAL_KEY),
|
||||
conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY));
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Ozone security is enabled. Attempting login for OM user. "
|
||||
+ "Principal: {}, keytab: {}", conf.get(
|
||||
OZONE_OM_KERBEROS_PRINCIPAL_KEY),
|
||||
conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY));
|
||||
}
|
||||
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
|
||||
|
|
|
@ -139,7 +139,10 @@ public class PrefixManagerImpl implements PrefixManager {
|
|||
OMPrefixAclOpResult omPrefixAclOpResult = removeAcl(obj, acl, prefixInfo);
|
||||
|
||||
if (!omPrefixAclOpResult.isOperationsResult()) {
|
||||
LOG.debug("acl {} does not exist for prefix path {} ", acl, prefixPath);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("acl {} does not exist for prefix path {} ",
|
||||
acl, prefixPath);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -236,8 +239,10 @@ public class PrefixManagerImpl implements PrefixManager {
|
|||
if (lastNode != null && lastNode.getValue() != null) {
|
||||
boolean hasAccess = OzoneAclUtil.checkAclRights(lastNode.getValue().
|
||||
getAcls(), context);
|
||||
LOG.debug("user:{} has access rights for ozObj:{} ::{} ",
|
||||
context.getClientUgi(), ozObject, hasAccess);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("user:{} has access rights for ozObj:{} ::{} ",
|
||||
context.getClientUgi(), ozObject, hasAccess);
|
||||
}
|
||||
return hasAccess;
|
||||
} else {
|
||||
return true;
|
||||
|
|
|
@ -108,7 +108,7 @@ public class VolumeManagerImpl implements VolumeManager {
|
|||
if (volumeList != null) {
|
||||
prevVolList.addAll(volumeList.getVolumeNamesList());
|
||||
} else {
|
||||
LOG.debug("volume:{} not found for user:{}");
|
||||
LOG.debug("volume:{} not found for user:{}", volume, owner);
|
||||
throw new OMException(ResultCodes.USER_NOT_FOUND);
|
||||
}
|
||||
|
||||
|
@ -503,7 +503,9 @@ public class VolumeManagerImpl implements VolumeManager {
|
|||
try {
|
||||
volumeArgs.addAcl(acl);
|
||||
} catch (OMException ex) {
|
||||
LOG.debug("Add acl failed.", ex);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Add acl failed.", ex);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs);
|
||||
|
@ -553,7 +555,9 @@ public class VolumeManagerImpl implements VolumeManager {
|
|||
try {
|
||||
volumeArgs.removeAcl(acl);
|
||||
} catch (OMException ex) {
|
||||
LOG.debug("Remove acl failed.", ex);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Remove acl failed.", ex);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs);
|
||||
|
@ -685,8 +689,10 @@ public class VolumeManagerImpl implements VolumeManager {
|
|||
Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
|
||||
boolean hasAccess = volumeArgs.getAclMap().hasAccess(
|
||||
context.getAclRights(), context.getClientUgi());
|
||||
LOG.debug("user:{} has access rights for volume:{} :{} ",
|
||||
context.getClientUgi(), ozObject.getVolumeName(), hasAccess);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("user:{} has access rights for volume:{} :{} ",
|
||||
context.getClientUgi(), ozObject.getVolumeName(), hasAccess);
|
||||
}
|
||||
return hasAccess;
|
||||
} catch (IOException ex) {
|
||||
LOG.error("Check access operation failed for volume:{}", volume, ex);
|
||||
|
|
|
@ -148,9 +148,11 @@ public class OzoneManagerDoubleBuffer {
|
|||
flushedTransactionCount.addAndGet(flushedTransactionsSize);
|
||||
flushIterations.incrementAndGet();
|
||||
|
||||
LOG.debug("Sync Iteration {} flushed transactions in this " +
|
||||
"iteration{}", flushIterations.get(),
|
||||
flushedTransactionsSize);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Sync Iteration {} flushed transactions in this " +
|
||||
"iteration{}", flushIterations.get(),
|
||||
flushedTransactionsSize);
|
||||
}
|
||||
|
||||
long lastRatisTransactionIndex =
|
||||
readyBuffer.stream().map(DoubleBufferEntry::getTrxLogIndex)
|
||||
|
|
|
@ -99,8 +99,10 @@ public final class OzoneManagerRatisClient implements Closeable {
|
|||
}
|
||||
|
||||
public void connect() {
|
||||
LOG.debug("Connecting to OM Ratis Server GroupId:{} OM:{}",
|
||||
raftGroup.getGroupId().getUuid().toString(), omNodeID);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Connecting to OM Ratis Server GroupId:{} OM:{}",
|
||||
raftGroup.getGroupId().getUuid().toString(), omNodeID);
|
||||
}
|
||||
|
||||
// TODO : XceiverClient ratis should pass the config value of
|
||||
// maxOutstandingRequests so as to set the upper bound on max no of async
|
||||
|
@ -147,8 +149,7 @@ public final class OzoneManagerRatisClient implements Closeable {
|
|||
if (message.contains(STATUS_CODE)) {
|
||||
String errorCode = message.substring(message.indexOf(STATUS_CODE) +
|
||||
STATUS_CODE.length());
|
||||
LOG.debug("Parsing error message for error code " +
|
||||
errorCode);
|
||||
LOG.debug("Parsing error message for error code {}", errorCode);
|
||||
return OzoneManagerProtocolProtos.Status.valueOf(errorCode.trim());
|
||||
} else {
|
||||
return OzoneManagerProtocolProtos.Status.INTERNAL_ERROR;
|
||||
|
@ -166,25 +167,27 @@ public final class OzoneManagerRatisClient implements Closeable {
|
|||
CompletableFuture<RaftClientReply> raftClientReply =
|
||||
sendRequestAsync(request);
|
||||
|
||||
return raftClientReply.whenComplete((reply, e) -> LOG.debug(
|
||||
"received reply {} for request: cmdType={} traceID={} " +
|
||||
"exception: {}", reply, request.getCmdType(),
|
||||
request.getTraceID(), e))
|
||||
.thenApply(reply -> {
|
||||
try {
|
||||
Preconditions.checkNotNull(reply);
|
||||
if (!reply.isSuccess()) {
|
||||
RaftException exception = reply.getException();
|
||||
Preconditions.checkNotNull(exception, "Raft reply failure " +
|
||||
"but no exception propagated.");
|
||||
throw new CompletionException(exception);
|
||||
}
|
||||
return OMRatisHelper.getOMResponseFromRaftClientReply(reply);
|
||||
return raftClientReply.whenComplete((reply, e) -> {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("received reply {} for request: cmdType={} traceID={} " +
|
||||
"exception: {}", reply, request.getCmdType(),
|
||||
request.getTraceID(), e);
|
||||
}
|
||||
}).thenApply(reply -> {
|
||||
try {
|
||||
Preconditions.checkNotNull(reply);
|
||||
if (!reply.isSuccess()) {
|
||||
RaftException exception = reply.getException();
|
||||
Preconditions.checkNotNull(exception, "Raft reply failure " +
|
||||
"but no exception propagated.");
|
||||
throw new CompletionException(exception);
|
||||
}
|
||||
return OMRatisHelper.getOMResponseFromRaftClientReply(reply);
|
||||
|
||||
} catch (InvalidProtocolBufferException e) {
|
||||
throw new CompletionException(e);
|
||||
}
|
||||
});
|
||||
} catch (InvalidProtocolBufferException e) {
|
||||
throw new CompletionException(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -198,7 +201,9 @@ public final class OzoneManagerRatisClient implements Closeable {
|
|||
OMRequest request) {
|
||||
boolean isReadOnlyRequest = OmUtils.isReadOnly(request);
|
||||
ByteString byteString = OMRatisHelper.convertRequestToByteString(request);
|
||||
LOG.debug("sendOMRequestAsync {} {}", isReadOnlyRequest, request);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("sendOMRequestAsync {} {}", isReadOnlyRequest, request);
|
||||
}
|
||||
return isReadOnlyRequest ? raftClient.sendReadOnlyAsync(() -> byteString) :
|
||||
raftClient.sendAsync(() -> byteString);
|
||||
}
|
||||
|
|
|
@ -169,8 +169,10 @@ public final class OzoneManagerRatisServer {
|
|||
omResponse.setMessage(stateMachineException.getCause().getMessage());
|
||||
omResponse.setStatus(parseErrorStatus(
|
||||
stateMachineException.getCause().getMessage()));
|
||||
LOG.debug("Error while executing ratis request. " +
|
||||
"stateMachineException: ", stateMachineException);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Error while executing ratis request. " +
|
||||
"stateMachineException: ", stateMachineException);
|
||||
}
|
||||
return omResponse.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,9 @@ public class OMBucketSetAclRequest extends OMBucketAclRequest {
|
|||
void onComplete(boolean operationResult, IOException exception,
|
||||
OMMetrics omMetrics) {
|
||||
if (operationResult) {
|
||||
LOG.debug("Set acl: {} for path: {} success!", getAcls(), getPath());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Set acl: {} for path: {} success!", getAcls(), getPath());
|
||||
}
|
||||
} else {
|
||||
omMetrics.incNumBucketUpdateFails();
|
||||
if (exception == null) {
|
||||
|
|
|
@ -96,8 +96,10 @@ public class OMVolumeSetAclRequest extends OMVolumeAclRequest {
|
|||
@Override
|
||||
void onComplete(IOException ex) {
|
||||
if (ex == null) {
|
||||
LOG.debug("Set acls: {} to volume: {} success!",
|
||||
getAcls(), getVolumeName());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Set acls: {} to volume: {} success!",
|
||||
getAcls(), getVolumeName());
|
||||
}
|
||||
} else {
|
||||
LOG.error("Set acls {} to volume {} failed!",
|
||||
getAcls(), getVolumeName(), ex);
|
||||
|
|
|
@ -48,7 +48,9 @@ public class OzoneManagerHARequestHandlerImpl
|
|||
@Override
|
||||
public OMResponse handleApplyTransaction(OMRequest omRequest,
|
||||
long transactionLogIndex) {
|
||||
LOG.debug("Received OMRequest: {}, ", omRequest);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Received OMRequest: {}, ", omRequest);
|
||||
}
|
||||
Type cmdType = omRequest.getCmdType();
|
||||
switch (cmdType) {
|
||||
case CreateVolume:
|
||||
|
|
|
@ -225,7 +225,9 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements
|
|||
}
|
||||
try {
|
||||
omClientResponse.getFlushFuture().get();
|
||||
LOG.trace("Future for {} is completed", request);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Future for {} is completed", request);
|
||||
}
|
||||
} catch (ExecutionException | InterruptedException ex) {
|
||||
// terminate OM. As if we are in this stage means, while getting
|
||||
// response from flush future, we got an exception.
|
||||
|
|
|
@ -149,7 +149,9 @@ public class OzoneManagerRequestHandler implements RequestHandler {
|
|||
@SuppressWarnings("methodlength")
|
||||
@Override
|
||||
public OMResponse handle(OMRequest request) {
|
||||
LOG.debug("Received OMRequest: {}, ", request);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Received OMRequest: {}, ", request);
|
||||
}
|
||||
Type cmdType = request.getCmdType();
|
||||
OMResponse.Builder responseBuilder = OMResponse.newBuilder()
|
||||
.setCmdType(cmdType)
|
||||
|
|
|
@ -79,20 +79,20 @@ public class OzoneNativeAuthorizer implements IAccessAuthorizer {
|
|||
|
||||
switch (objInfo.getResourceType()) {
|
||||
case VOLUME:
|
||||
LOG.trace("Checking access for volume:" + objInfo);
|
||||
LOG.trace("Checking access for volume: {}", objInfo);
|
||||
return volumeManager.checkAccess(objInfo, context);
|
||||
case BUCKET:
|
||||
LOG.trace("Checking access for bucket:" + objInfo);
|
||||
LOG.trace("Checking access for bucket: {}", objInfo);
|
||||
return (bucketManager.checkAccess(objInfo, context)
|
||||
&& volumeManager.checkAccess(objInfo, context));
|
||||
case KEY:
|
||||
LOG.trace("Checking access for Key:" + objInfo);
|
||||
LOG.trace("Checking access for Key: {}", objInfo);
|
||||
return (keyManager.checkAccess(objInfo, context)
|
||||
&& prefixManager.checkAccess(objInfo, context)
|
||||
&& bucketManager.checkAccess(objInfo, context)
|
||||
&& volumeManager.checkAccess(objInfo, context));
|
||||
case PREFIX:
|
||||
LOG.trace("Checking access for Prefix:" + objInfo);
|
||||
LOG.trace("Checking access for Prefix: {]", objInfo);
|
||||
return (prefixManager.checkAccess(objInfo, context)
|
||||
&& bucketManager.checkAccess(objInfo, context)
|
||||
&& volumeManager.checkAccess(objInfo, context));
|
||||
|
|
|
@ -425,7 +425,9 @@ public class BasicOzoneFileSystem extends FileSystem {
|
|||
DeleteIterator iterator = new DeleteIterator(f, recursive);
|
||||
return iterator.iterate();
|
||||
} catch (FileNotFoundException e) {
|
||||
LOG.debug("Couldn't delete {} - does not exist", f);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Couldn't delete {} - does not exist", f);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,10 +110,14 @@ public class AWSV4AuthParser implements AWSAuthParser {
|
|||
|
||||
canonicalRequest = buildCanonicalRequest();
|
||||
strToSign.append(hash(canonicalRequest));
|
||||
LOG.debug("canonicalRequest:[{}]", canonicalRequest);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("canonicalRequest:[{}]", canonicalRequest);
|
||||
}
|
||||
|
||||
headerMap.keySet().forEach(k -> LOG.trace("Header:{},value:{}", k,
|
||||
headerMap.get(k)));
|
||||
if (LOG.isTraceEnabled()) {
|
||||
headerMap.keySet().forEach(k -> LOG.trace("Header:{},value:{}", k,
|
||||
headerMap.get(k)));
|
||||
}
|
||||
|
||||
LOG.debug("StringToSign:[{}]", strToSign);
|
||||
stringToSign = strToSign.toString();
|
||||
|
|
|
@ -86,8 +86,9 @@ public class OzoneClientProducer {
|
|||
identifier.setSignature(v4RequestParser.getSignature());
|
||||
identifier.setAwsAccessId(v4RequestParser.getAwsAccessId());
|
||||
identifier.setOwner(new Text(v4RequestParser.getAwsAccessId()));
|
||||
|
||||
LOG.trace("Adding token for service:{}", omService);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Adding token for service:{}", omService);
|
||||
}
|
||||
Token<OzoneTokenIdentifier> token = new Token(identifier.getBytes(),
|
||||
identifier.getSignature().getBytes(UTF_8),
|
||||
identifier.getKind(),
|
||||
|
|
|
@ -42,7 +42,9 @@ public class OS3ExceptionMapper implements ExceptionMapper<OS3Exception> {
|
|||
|
||||
@Override
|
||||
public Response toResponse(OS3Exception exception) {
|
||||
LOG.debug("Returning exception. ex: {}", exception.toString());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Returning exception. ex: {}", exception.toString());
|
||||
}
|
||||
exception.setRequestId(requestIdentifier.getRequestId());
|
||||
return Response.status(exception.getHttpCode())
|
||||
.entity(exception.toXml()).build();
|
||||
|
|
Loading…
Reference in New Issue