Upgraded findbugs finds these, fix them up.
Change-Id: Id5a008cddc6616c3a93f0528efca00b86843fc3c
Signed-off-by: Robert Varga <robert.varga@pantheon.tech>
package org.opendaylight.controller.blueprint.ext;
import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
* @author Thomas Pantelis
*/
abstract class AbstractDependentComponentFactoryMetadata implements DependentComponentFactoryMetadata {
+ @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
final Logger log = LoggerFactory.getLogger(getClass());
private final String id;
private final AtomicBoolean started = new AtomicBoolean();
+ " can only be used on the root <blueprint> element");
}
- LOG.debug("{}: {}", propertyName, attr.getValue());
+ LOG.debug("Property {} = {}", propertyName, attr.getValue());
if (!Boolean.parseBoolean(attr.getValue())) {
return component;
LOG.debug("{}: In untrack {}", getName(), reference);
if (trackedServiceReference == reference) {
- LOG.debug("{}: Current reference has been untracked", getName(), trackedServiceReference);
+ LOG.debug("{}: Current reference {} has been untracked", getName(), trackedServiceReference);
}
}
TimeUnit.NANOSECONDS.sleep(delay);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- LOG.debug("Interrupted after sleeping {}ns", e, currentTime() - now);
+ LOG.debug("Interrupted after sleeping {}ns", currentTime() - now, e);
}
}
try {
return cookie.equals(extractCookie(id));
} catch (IllegalArgumentException e) {
- LOG.debug("extractCookie failed while cancelling slicing for cookie {}: {}", cookie, e);
+ LOG.debug("extractCookie failed while cancelling slicing for cookie {}", cookie, e);
return false;
}
});
package org.opendaylight.controller.cluster.access.client;
import com.google.common.base.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Forwarder class responsible for routing requests from the previous connection incarnation back to the originator,
* @author Robert Varga
*/
public abstract class ReconnectForwarder {
- static final Logger LOG = LoggerFactory.getLogger(ReconnectForwarder.class);
// Visible for subclass method handle
private final AbstractReceivingClientConnection<?> successor;
import static java.util.Objects.requireNonNull;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
*
*/
public class TracingBroker implements TracingDOMDataBroker {
-
+ @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
static final Logger LOG = LoggerFactory.getLogger(TracingBroker.class);
private static final int STACK_TRACE_FIRST_RELEVANT_FRAME = 2;
@Override
public void onFailure(final Throwable ex) {
- LOG.error("Can not put data into datastore [store: {}] [path: {}] [exception: {}]",store,path, ex);
+ LOG.error("Can not put data into datastore [store: {}] [path: {}]", store, path, ex);
}
}, MoreExecutors.directExecutor());
}
@Override
public void onFailure(final Throwable ex) {
- LOG.error("Can not delete data from datastore [store: {}] [path: {}] [exception: {}]",store,path, ex);
+ LOG.error("Can not delete data from datastore [store: {}] [path: {}]", store, path, ex);
}
}, MoreExecutors.directExecutor());
}
// non-leader cannot satisfy leadership request
LOG.warn("{}: onRequestLeadership {} was sent to non-leader."
+ " Current behavior: {}. Sending failure response",
- persistenceId(), getCurrentBehavior().state());
+ persistenceId(), message, getCurrentBehavior().state());
message.getReplyTo().tell(new LeadershipTransferFailedException("Cannot transfer leader to "
+ message.getRequestedFollowerId()
+ ". RequestLeadership message was sent to non-leader " + persistenceId()), getSelf());
cluster = Optional.of(Cluster.get(getActorSystem()));
} catch (Exception e) {
// An exception means there's no cluster configured. This will only happen in unit tests.
- log.debug("{}: Could not obtain Cluster: {}", getId(), e);
+ log.debug("{}: Could not obtain Cluster", getId(), e);
cluster = Optional.empty();
}
}
this.log = context.getLogger();
}
- boolean handleRecoveryMessage(Object message, PersistentDataProvider persistentProvider) {
+ boolean handleRecoveryMessage(final Object message, final PersistentDataProvider persistentProvider) {
log.trace("{}: handleRecoveryMessage: {}", context.getId(), message);
anyDataRecovered = anyDataRecovered || !(message instanceof RecoveryCompleted);
}
}
- private void onRecoveredSnapshot(SnapshotOffer offer) {
+ private void onRecoveredSnapshot(final SnapshotOffer offer) {
log.debug("{}: SnapshotOffer called.", context.getId());
initRecoveryTimer();
replicatedLog().getSnapshotTerm(), replicatedLog().size());
}
- private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
+ private void onRecoveredJournalLogEntry(final ReplicatedLogEntry logEntry) {
if (log.isDebugEnabled()) {
log.debug("{}: Received ReplicatedLogEntry for recovery: index: {}, size: {}", context.getId(),
logEntry.getIndex(), logEntry.size());
}
}
- private void onRecoveredApplyLogEntries(long toIndex) {
+ private void onRecoveredApplyLogEntries(final long toIndex) {
if (!context.getPersistenceProvider().isRecoveryApplicable()) {
dataRecoveredWithPersistenceDisabled = true;
return;
context.setCommitIndex(lastApplied);
}
- private void onDeleteEntries(DeleteEntries deleteEntries) {
+ private void onDeleteEntries(final DeleteEntries deleteEntries) {
if (context.getPersistenceProvider().isRecoveryApplicable()) {
replicatedLog().removeFrom(deleteEntries.getFromIndex());
} else {
}
}
- private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
+ private void batchRecoveredLogEntry(final ReplicatedLogEntry logEntry) {
initRecoveryTimer();
int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
currentRecoveryBatchCount = 0;
}
- private void onRecoveryCompletedMessage(PersistentDataProvider persistentProvider) {
+ private void onRecoveryCompletedMessage(final PersistentDataProvider persistentProvider) {
if (currentRecoveryBatchCount > 0) {
endCurrentLogRecoveryBatch();
}
recoveryTimer = null;
}
- log.info("Recovery completed" + recoveryTime + " - Switching actor to Follower - " + "Persistence Id = "
- + context.getId() + " Last index in log = {}, snapshotIndex = {}, snapshotTerm = {}, "
- + "journal-size = {}", replicatedLog().lastIndex(), replicatedLog().getSnapshotIndex(),
+ log.info("Recovery completed {} - Switching actor to Follower - Persistence Id = {}"
+ + " Last index in log = {}, snapshotIndex = {}, snapshotTerm = {}, journal-size = {}",
+ recoveryTime, context.getId(), replicatedLog().lastIndex(), replicatedLog().getSnapshotIndex(),
replicatedLog().getSnapshotTerm(), replicatedLog().size());
if (dataRecoveredWithPersistenceDisabled
}
}
- private static boolean isServerConfigurationPayload(ReplicatedLogEntry repLogEntry) {
+ private static boolean isServerConfigurationPayload(final ReplicatedLogEntry repLogEntry) {
return repLogEntry.getData() instanceof ServerConfigurationPayload;
}
- private static boolean isPersistentPayload(ReplicatedLogEntry repLogEntry) {
+ private static boolean isPersistentPayload(final ReplicatedLogEntry repLogEntry) {
return repLogEntry.getData() instanceof PersistentPayload;
}
- private static boolean isMigratedPayload(ReplicatedLogEntry repLogEntry) {
+ private static boolean isMigratedPayload(final ReplicatedLogEntry repLogEntry) {
return isMigratedSerializable(repLogEntry.getData());
}
- private static boolean isMigratedSerializable(Object message) {
+ private static boolean isMigratedSerializable(final Object message) {
return message instanceof MigratedSerializable && ((MigratedSerializable)message).isMigrated();
}
}
import akka.actor.ActorRef;
import akka.actor.Cancellable;
import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
/**
* Used for message logging.
*/
+ @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
protected final Logger log;
/**
}
@Override
- public void setReplicatedToAllIndex(long replicatedToAllIndex) {
+ public void setReplicatedToAllIndex(final long replicatedToAllIndex) {
this.replicatedToAllIndex = replicatedToAllIndex;
}
* @param appendEntries the message
* @return a new behavior if it was changed or the current behavior
*/
- protected RaftActorBehavior appendEntries(ActorRef sender, AppendEntries appendEntries) {
+ protected RaftActorBehavior appendEntries(final ActorRef sender, final AppendEntries appendEntries) {
// 1. Reply false if term < currentTerm (§5.1)
if (appendEntries.getTerm() < currentTerm()) {
* @param requestVote the message
* @return a new behavior if it was changed or the current behavior
*/
- protected RaftActorBehavior requestVote(ActorRef sender, RequestVote requestVote) {
+ protected RaftActorBehavior requestVote(final ActorRef sender, final RequestVote requestVote) {
log.debug("{}: In requestVote: {} - currentTerm: {}, votedFor: {}, lastIndex: {}, lastTerm: {}", logName(),
requestVote, currentTerm(), votedFor(), lastIndex(), lastTerm());
return this;
}
- protected boolean canGrantVote(RequestVote requestVote) {
+ protected boolean canGrantVote(final RequestVote requestVote) {
boolean grantVote = false;
// Reply false if term < currentTerm (§5.1)
*
* @param interval the duration after which we should trigger a new election
*/
- protected void scheduleElection(FiniteDuration interval) {
+ protected void scheduleElection(final FiniteDuration interval) {
stopElection();
// Schedule an election. When the scheduler triggers an ElectionTimeout message is sent to itself
* @param logIndex the log index
* @return the ClientRequestTracker or null if none available
*/
- protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
+ protected ClientRequestTracker removeClientRequestTracker(final long logIndex) {
return null;
}
*
* @return the log entry index or -1 if not found
*/
- protected long getLogEntryIndex(long index) {
+ protected long getLogEntryIndex(final long index) {
if (index == context.getReplicatedLog().getSnapshotIndex()) {
return context.getReplicatedLog().getSnapshotIndex();
}
*
* @return the log entry term or -1 if not found
*/
- protected long getLogEntryTerm(long index) {
+ protected long getLogEntryTerm(final long index) {
if (index == context.getReplicatedLog().getSnapshotIndex()) {
return context.getReplicatedLog().getSnapshotTerm();
}
}
@Override
- public RaftActorBehavior handleMessage(ActorRef sender, Object message) {
+ public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
if (message instanceof AppendEntries) {
return appendEntries(sender, (AppendEntries) message);
} else if (message instanceof AppendEntriesReply) {
}
@Override
- public RaftActorBehavior switchBehavior(RaftActorBehavior behavior) {
+ public RaftActorBehavior switchBehavior(final RaftActorBehavior behavior) {
return internalSwitchBehavior(behavior);
}
- protected RaftActorBehavior internalSwitchBehavior(RaftState newState) {
+ protected RaftActorBehavior internalSwitchBehavior(final RaftState newState) {
return internalSwitchBehavior(createBehavior(context, newState));
}
@SuppressWarnings("checkstyle:IllegalCatch")
- protected RaftActorBehavior internalSwitchBehavior(RaftActorBehavior newBehavior) {
+ protected RaftActorBehavior internalSwitchBehavior(final RaftActorBehavior newBehavior) {
if (!context.getRaftPolicy().automaticElectionsEnabled()) {
return this;
}
}
- protected int getMajorityVoteCount(int numPeers) {
+ protected int getMajorityVoteCount(final int numPeers) {
// Votes are required from a majority of the peers including self.
// The numMajority field therefore stores a calculated value
// of the number of votes required for this candidate to win an
LOG.debug("{}: Lagging {} entries behind leader {}", id, lag, leaderId);
changeSyncStatus(NOT_IN_SYNC, false);
} else if (commitIndex >= syncTarget.minimumCommitIndex) {
- LOG.debug("{}: Lagging {} entries behind leader and reached {} (of expected {})", id, lag, leaderId,
+ LOG.debug("{}: Lagging {} entries behind leader {} and reached {} (of expected {})", id, lag, leaderId,
commitIndex, syncTarget.minimumCommitIndex);
changeSyncStatus(IN_SYNC, false);
}
private final NotificationPublishService notificationPublishService;
- public HeliumNotificationProviderServiceAdapter(NotificationPublishService notificationPublishService,
- NotificationService notificationService) {
+ public HeliumNotificationProviderServiceAdapter(final NotificationPublishService notificationPublishService,
+ final NotificationService notificationService) {
super(notificationService);
this.notificationPublishService = notificationPublishService;
}
try {
notificationPublishService.putNotification(notification);
} catch (InterruptedException e) {
- LOG.error("Notification publication was interupted: " + e);
+ LOG.error("Notification publication was interupted", e);
}
}
try {
notificationPublishService.putNotification(notification);
} catch (InterruptedException e) {
- LOG.error("Notification publication was interupted: " + e);
+ LOG.error("Notification publication was interupted", e);
}
}
@Override
public ListenerRegistration<NotificationInterestListener> registerInterestListener(
- NotificationInterestListener interestListener) {
+ final NotificationInterestListener interestListener) {
throw new UnsupportedOperationException("InterestListener is not supported.");
}
@Override
- public void close() throws Exception {
+ public void close() {
+
}
}
try {
notifyListener(listenerRef, baEvent);
} catch (RuntimeException e) {
- LOG.warn("Unhandled exception during invoking listener {}", e, listenerRef);
+ LOG.warn("Unhandled exception during invoking listener {}", listenerRef, e);
}
}
}
};
}
- private void notifyListener(final NotificationInterestListener listener,
+ private static void notifyListener(final NotificationInterestListener listener,
final Set<Class<? extends Notification>> baEvent) {
for (final Class<? extends Notification> event: baEvent) {
listener.onNotificationSubscribtion(event);
}
@Override
- public void close() throws Exception {
+ public void close() {
super.close();
domListener.close();
}
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.impl.codec.DeserializationException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
final class BindingDOMMountPointListenerAdapter<T extends MountPointListener>
implements ListenerRegistration<T>, DOMMountPointListener {
+ private static final Logger LOG = LoggerFactory.getLogger(BindingDOMMountPointListenerAdapter.class);
private final T listener;
private final ListenerRegistration<DOMMountPointListener> registration;
final InstanceIdentifier<? extends DataObject> bindingPath = toBinding(path);
listener.onMountPointCreated(bindingPath);
} catch (final DeserializationException e) {
- BindingDOMMountPointServiceAdapter.LOG.error("Unable to translate mountPoint path {}. Omitting event.",
- path, e);
+ LOG.error("Unable to translate mountPoint path {}. Omitting event.", path, e);
}
}
final InstanceIdentifier<? extends DataObject> bindingPath = toBinding(path);
listener.onMountPointRemoved(bindingPath);
} catch (final DeserializationException e) {
- BindingDOMMountPointServiceAdapter.LOG.error("Unable to translate mountPoint path {}. Omitting event.",
- path, e);
+ LOG.error("Unable to translate mountPoint path {}. Omitting event.", path, e);
}
}
}
import org.slf4j.LoggerFactory;
public class BindingDOMMountPointServiceAdapter implements MountPointService {
- public static final Logger LOG = LoggerFactory.getLogger(BindingDOMMountPointServiceAdapter.class);
+ private static final Logger LOG = LoggerFactory.getLogger(BindingDOMMountPointServiceAdapter.class);
private final BindingToNormalizedNodeCodec codec;
private final DOMMountPointService mountService;
private final LoadingCache<DOMMountPoint, BindingMountPointAdapter> bindingMountpoints = CacheBuilder.newBuilder()
.weakKeys().build(new CacheLoader<DOMMountPoint, BindingMountPointAdapter>() {
-
@Override
- public BindingMountPointAdapter load(DOMMountPoint key) {
- return new BindingMountPointAdapter(codec,key);
+ public BindingMountPointAdapter load(final DOMMountPoint key) {
+ return new BindingMountPointAdapter(codec, key);
}
});
- public BindingDOMMountPointServiceAdapter(DOMMountPointService mountService,BindingToNormalizedNodeCodec codec) {
+ public BindingDOMMountPointServiceAdapter(final DOMMountPointService mountService,
+ final BindingToNormalizedNodeCodec codec) {
this.codec = codec;
this.mountService = mountService;
}
@Override
- public Optional<MountPoint> getMountPoint(InstanceIdentifier<?> mountPoint) {
+ public Optional<MountPoint> getMountPoint(final InstanceIdentifier<?> mountPoint) {
YangInstanceIdentifier domPath = codec.toYangInstanceIdentifierBlocking(mountPoint);
Optional<DOMMountPoint> domMount = mountService.getMountPoint(domPath);
}
@Override
- public <T extends MountPointListener> ListenerRegistration<T> registerListener(InstanceIdentifier<?> path,
- T listener) {
+ public <T extends MountPointListener> ListenerRegistration<T> registerListener(final InstanceIdentifier<?> path,
+ final T listener) {
return new BindingDOMMountPointListenerAdapter<>(listener, codec, mountService);
}
}
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.SettableFuture;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.AbstractMap.SimpleEntry;
public void onComplete(final Throwable failure, final ActorRef actorRef) {
if (failure != null) {
LOG.warn("No local shard found for {} datastoreType {} - Cannot request leadership transfer to"
- + " local shard.", shardName, failure);
+ + " local shard.", shardName, dataStoreType, failure);
makeLeaderLocalAsk.failure(failure);
} else {
makeLeaderLocalAsk
onMessageFailure(String.format("Failed to back up datastore to file %s", fileName), returnFuture, failure);
}
+ @SuppressFBWarnings("SLF4J_SIGN_ONLY_FORMAT")
private static <T> void onMessageFailure(final String msg, final SettableFuture<RpcResult<T>> returnFuture,
final Throwable failure) {
- LOG.error(msg, failure);
+ LOG.error("{}", msg, failure);
returnFuture.set(ClusterAdminRpcService.<T>newFailedRpcResultBuilder(String.format("%s: %s", msg,
failure.getMessage())).build());
}
import akka.actor.ActorRef;
import akka.actor.UntypedActor;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.eclipse.jdt.annotation.NonNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class AbstractUntypedActor extends UntypedActor implements ExecuteInSelfActor {
// The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now.
+ @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
@SuppressWarnings("checkstyle:MemberName")
protected final Logger LOG = LoggerFactory.getLogger(getClass());
import akka.actor.ActorRef;
import akka.persistence.UntypedPersistentActor;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.eclipse.jdt.annotation.NonNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class AbstractUntypedPersistentActor extends UntypedPersistentActor implements ExecuteInSelfActor {
// The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now.
+ @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
@SuppressWarnings("checkstyle:MemberName")
protected final Logger LOG = LoggerFactory.getLogger(getClass());
public class ActorSystemProviderImpl implements ActorSystemProvider, AutoCloseable {
private static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-data";
- static final Logger LOG = LoggerFactory.getLogger(ActorSystemProviderImpl.class);
+ private static final Logger LOG = LoggerFactory.getLogger(ActorSystemProviderImpl.class);
+
private final ActorSystem actorSystem;
private final ListenerRegistry<ActorSystemProviderListener> listeners = new ListenerRegistry<>();
LOG.debug("Invalidating backend information {}", staleInfo);
flushCache(staleInfo.getShardName());
- LOG.trace("Invalidated cache %s", staleInfo);
+ LOG.trace("Invalidated cache {}", staleInfo);
backends.remove(cookie, existing);
}
synchronized (this) {
LOG.debug("Invalidating backend information {}", staleInfo);
flushCache(shardName);
- LOG.trace("Invalidated cache %s", staleInfo);
+ LOG.trace("Invalidated cache {}", staleInfo);
state = null;
}
}
* @author Robert Varga
*/
final class SingleClientHistory extends AbstractClientHistory {
- private static final Logger LOG = LoggerFactory.getLogger(AbstractClientHistory.class);
+ private static final Logger LOG = LoggerFactory.getLogger(SingleClientHistory.class);
SingleClientHistory(final AbstractDataStoreClientBehavior client, final LocalHistoryIdentifier identifier) {
super(client, identifier);
import akka.actor.ActorContext;
import akka.actor.ActorRef;
import akka.actor.Props;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import javax.annotation.concurrent.NotThreadSafe;
import org.opendaylight.controller.cluster.common.actor.Dispatchers;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
*/
@NotThreadSafe
abstract class AbstractShardDataTreeNotificationPublisherActorProxy implements ShardDataTreeNotificationPublisher {
+ @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
protected final Logger log = LoggerFactory.getLogger(getClass());
private final ActorContext actorContext;
private final String logContext;
private ActorRef publisherActor;
- protected AbstractShardDataTreeNotificationPublisherActorProxy(ActorContext actorContext, String actorName,
- String logContext) {
+ protected AbstractShardDataTreeNotificationPublisherActorProxy(final ActorContext actorContext,
+ final String actorName, final String logContext) {
this.actorContext = actorContext;
this.actorName = actorName;
this.logContext = logContext;
}
@Override
- public void publishChanges(DataTreeCandidate candidate) {
+ public void publishChanges(final DataTreeCandidate candidate) {
publisherActor().tell(new ShardDataTreeNotificationPublisherActor.PublishNotifications(candidate),
ActorRef.noSender());
}
aggregateFuture.onComplete(new OnComplete<Iterable<Object>>() {
@Override
- public void onComplete(Throwable failure, Iterable<Object> results) {
+ public void onComplete(final Throwable failure, final Iterable<Object> results) {
callbackExecutor.execute(
() -> processResponses(failure, results, currentState, afterState, returnFuture));
}
// FB issues violation for passing null to CompletableFuture#complete but it is valid and necessary when the
// generic type is Void.
@SuppressFBWarnings("NP_NONNULL_PARAM_VIOLATION")
- private void processResponses(Throwable failure, Iterable<Object> results, State currentState, State afterState,
- CompletableFuture<Void> resultFuture) {
+ private void processResponses(final Throwable failure, final Iterable<Object> results,
+ final State currentState, final State afterState, final CompletableFuture<Void> resultFuture) {
if (failure != null) {
successfulFromPrevious = Collections.emptyList();
resultFuture.completeExceptionally(failure);
} else if (result instanceof Status.Failure) {
failed.add((Failure) result);
} else {
- LOG.warn("{}: unrecognized response {}, ignoring it", result);
+ LOG.warn("{}: unrecognized response {}, ignoring it", txId, result);
}
}
+ "cannot be registered", logContext(), shardName, getInstance(), registeredPath);
} else if (failure != null) {
LOG.error("{}: Failed to find local shard {} - DataTreeChangeListener {} at path {} "
- + "cannot be registered: {}", logContext(), shardName, getInstance(), registeredPath,
+ + "cannot be registered", logContext(), shardName, getInstance(), registeredPath,
failure);
} else {
doRegistration(shard);
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
@GuardedBy("this")
private ActorRef cohortRegistry;
-
- DataTreeCohortRegistrationProxy(ActorContext actorContext, DOMDataTreeIdentifier subtree, C cohort) {
+ DataTreeCohortRegistrationProxy(final ActorContext actorContext, final DOMDataTreeIdentifier subtree,
+ final C cohort) {
super(cohort);
this.subtree = Preconditions.checkNotNull(subtree);
this.actorContext = Preconditions.checkNotNull(actorContext);
subtree.getRootIdentifier()).withDispatcher(actorContext.getNotificationDispatcherPath()));
}
-
- public void init(String shardName) {
+ public void init(final String shardName) {
// FIXME: Add late binding to shard.
Future<ActorRef> findFuture = actorContext.findLocalShardAsync(shardName);
findFuture.onComplete(new OnComplete<ActorRef>() {
+ "cannot be registered", shardName, getInstance(), subtree);
} else if (failure != null) {
LOG.error("Failed to find local shard {} - DataTreeChangeListener {} at path {} "
- + "cannot be registered: {}", shardName, getInstance(), subtree, failure);
+ + "cannot be registered", shardName, getInstance(), subtree, failure);
} else {
performRegistration(shard);
}
}, actorContext.getClientDispatcher());
}
- private synchronized void performRegistration(ActorRef shard) {
+ private synchronized void performRegistration(final ActorRef shard) {
if (isClosed()) {
return;
}
future.onComplete(new OnComplete<Object>() {
@Override
- public void onComplete(Throwable failure, Object val) {
+ public void onComplete(final Throwable failure, final Object val) {
if (failure != null) {
LOG.error("Unable to register {} as commit cohort", getInstance(), failure);
}
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.List;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.slf4j.Logger;
private final AbstractThreePhaseCommitCohort<?> delegate;
private final Throwable debugContext;
private final TransactionIdentifier transactionId;
+
+ @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_FINAL")
private Logger log = LOG;
DebugThreePhaseCommitCohort(final TransactionIdentifier transactionId,
// XXX: do we need to account for cookies?
purgedHistories.add(historyId.getHistoryId());
- LOG.debug("{}: Purged history {}", historyId);
+ LOG.debug("{}: Purged history {}", shardName, historyId);
}
void onTransactionAborted(final TransactionIdentifier txId) {
return actorContext.executeOperationAsync(leader, message, actorContext.getTransactionCommitOperationTimeout());
}
- Future<ActorSelection> initiateCoordinatedCommit(Optional<SortedSet<String>> participatingShardNames) {
+ Future<ActorSelection> initiateCoordinatedCommit(final Optional<SortedSet<String>> participatingShardNames) {
final Future<Object> messageFuture = initiateCommit(false, participatingShardNames);
final Future<ActorSelection> ret = TransactionReadyReplyMapper.transform(messageFuture, actorContext,
transaction.getIdentifier());
LOG.debug("Transaction {} committed successfully", transaction.getIdentifier());
transactionCommitted(transaction);
} else {
- LOG.error("Transaction {} resulted in unhandled message type {}, aborting", message.getClass());
+ LOG.error("Transaction {} resulted in unhandled message type {}, aborting",
+ transaction.getIdentifier(), message.getClass());
transactionAborted(transaction);
}
}
throw new UnsupportedOperationException();
}
- protected void transactionAborted(SnapshotBackedWriteTransaction<TransactionIdentifier> aborted) {
+ protected void transactionAborted(final SnapshotBackedWriteTransaction<TransactionIdentifier> aborted) {
}
- protected void transactionCommitted(SnapshotBackedWriteTransaction<TransactionIdentifier> comitted) {
+ protected void transactionCommitted(final SnapshotBackedWriteTransaction<TransactionIdentifier> comitted) {
}
}
@Override
public Future<Object> directCommit(final Boolean havePermit) {
- LOG.debug("Tx {} directCommit called, failure: {}", getIdentifier(), failure);
+ LOG.debug("Tx {} directCommit called, failure", getIdentifier(), failure);
return akka.dispatch.Futures.failed(failure);
}
@Override
public Future<ActorSelection> readyTransaction(final Boolean havePermit,
final Optional<SortedSet<String>> participatingShardNamess) {
- LOG.debug("Tx {} readyTransaction called, failure: {}", getIdentifier(), failure);
+ LOG.debug("Tx {} readyTransaction called, failure", getIdentifier(), failure);
return akka.dispatch.Futures.failed(failure);
}
}
if (failure != null) {
- LOG.debug("Tx {} {} operation failed: {}", getIdentifier(), readCmd.getClass().getSimpleName(),
- failure);
+ LOG.debug("Tx {} {} operation failed", getIdentifier(), readCmd.getClass().getSimpleName(),
+ failure);
returnFuture.setException(new ReadFailedException("Error checking "
+ readCmd.getClass().getSimpleName() + " for path " + readCmd.getPath(), failure));
@Override
public void onFailure(final Throwable failure) {
- log.debug("{}: An exception occurred during canCommit for {}: {}", name,
- cohortEntry.getTransactionId(), failure);
+ log.debug("{}: An exception occurred during canCommit for {}", name, cohortEntry.getTransactionId(),
+ failure);
cohortCache.remove(cohortEntry.getTransactionId());
cohortEntry.getReplySender().tell(new Failure(failure), cohortEntry.getShard().self());
// between canCommit and ready and the entry was expired from the cache or it was aborted.
IllegalStateException ex = new IllegalStateException(
String.format("%s: Cannot canCommit transaction %s - no cohort entry found", name, transactionID));
- log.error(ex.getMessage());
+ log.error("{}: Inconsistency during transaction {} canCommit", name, transactionID, ex);
sender.tell(new Failure(ex), shard.self());
return;
}
// or it was aborted.
IllegalStateException ex = new IllegalStateException(
String.format("%s: Cannot commit transaction %s - no cohort entry found", name, transactionID));
- log.error(ex.getMessage());
+ log.error("{}: Inconsistency during transaction {} commit", name, transactionID, ex);
sender.tell(new Failure(ex), shard.self());
return;
}
// For debugging purposes, allow dumping of the modification. Coupled with the above
// precondition log, it should allow us to understand what went on.
- LOG.debug("{}: Store Tx {}: modifications: {} tree: {}", cohort.getIdentifier(), modification,
- dataTree);
+ LOG.debug("{}: Store Tx {}: modifications: {} tree: {}", logContext, cohort.getIdentifier(),
+ modification, dataTree);
cause = new TransactionCommitFailedException("Data did not pass validation for path " + e.getPath(), e);
} catch (Exception e) {
LOG.warn("{}: Unexpected failure in validation phase", logContext, e);
processNextPendingTransaction();
}
- private void insertEntry(Deque<CommitEntry> queue, CommitEntry entry, int atIndex) {
+ private void insertEntry(final Deque<CommitEntry> queue, final CommitEntry entry, final int atIndex) {
if (atIndex == 0) {
queue.addFirst(entry);
return;
}
private Collection<String> extractPrecedingShardNames(
- java.util.Optional<SortedSet<String>> participatingShardNames) {
+ final java.util.Optional<SortedSet<String>> participatingShardNames) {
return participatingShardNames.map((Function<SortedSet<String>, Collection<String>>)
set -> set.headSet(shard.getShardName())).orElse(Collections.<String>emptyList());
}
@SuppressWarnings("checkstyle:IllegalCatch")
public void applyRecoverySnapshot(final Snapshot.State snapshotState) {
if (!(snapshotState instanceof ShardSnapshotState)) {
- log.debug("{}: applyRecoverySnapshot ignoring snapshot: {}", snapshotState);
+ log.debug("{}: applyRecoverySnapshot ignoring snapshot: {}", shardName, snapshotState);
}
log.debug("{}: Applying recovered snapshot", shardName);
@SuppressWarnings("checkstyle:IllegalCatch")
public void applySnapshot(final Snapshot.State snapshotState) {
if (!(snapshotState instanceof ShardSnapshotState)) {
- log.debug("{}: applySnapshot ignoring snapshot: {}", snapshotState);
+ log.debug("{}: applySnapshot ignoring snapshot: {}", logId, snapshotState);
}
final ShardDataTreeSnapshot snapshot = ((ShardSnapshotState)snapshotState).getSnapshot();
@Override
public void onFailure(final Throwable failure) {
- LOG.debug("Tx {}: a {} cohort path Future failed: {}", transactionId, operationName, failure);
+ LOG.debug("Tx {}: a {} cohort path Future failed", transactionId, operationName, failure);
if (propagateException) {
returnFuture.setException(failure);
@Override
public void finalizeReferent() {
- LOG.trace("Cleaning up {} Tx actors {}", cleanup);
+ LOG.trace("Cleaning up {} Tx actors", cleanup);
if (CACHE.remove(cleanup) != null) {
cleanup.closeTransaction();
} else {
LOG.debug("{}: Found entity {} but no other candidates - not clearing owner", persistenceId(),
- entityPath, newOwner);
+ entityPath);
}
});
LOG.warn("{}: Failed to delete prior snapshots", persistenceId(),
((DeleteSnapshotsFailure) message).cause());
} else if (message instanceof DeleteSnapshotsSuccess) {
- LOG.debug("{}: Successfully deleted prior snapshots", persistenceId(), message);
+ LOG.debug("{}: Successfully deleted prior snapshots", persistenceId());
} else if (message instanceof RegisterRoleChangeListenerReply) {
LOG.trace("{}: Received RegisterRoleChangeListenerReply", persistenceId());
} else if (message instanceof ClusterEvent.MemberEvent) {
public void onComplete(final Throwable failure, final Object response) {
if (failure != null) {
shardReplicaOperationsInProgress.remove(shardName);
- String msg = String.format("RemoveServer request to leader %s for shard %s failed",
- primaryPath, shardName);
- LOG.debug("{}: {}", persistenceId(), msg, failure);
+ LOG.debug("{}: RemoveServer request to leader {} for shard {} failed", persistenceId(), primaryPath,
+ shardName, failure);
// FAILURE
- sender.tell(new Status.Failure(new RuntimeException(msg, failure)), self());
+ sender.tell(new Status.Failure(new RuntimeException(
+ String.format("RemoveServer request to leader %s for shard %s failed", primaryPath, shardName),
+ failure)), self());
} else {
// SUCCESS
self().tell(new WrappedShardResponse(shardId, response, primaryPath), sender);
public void onComplete(final Throwable failure, final Object response) {
if (failure != null) {
shardReplicaOperationsInProgress.remove(shardName);
- String msg = String.format("RemoveServer request to leader %s for shard %s failed",
- primaryPath, shardName);
-
- LOG.debug("{}: {}", persistenceId(), msg, failure);
+ LOG.debug("{}: RemoveServer request to leader {} for shard {} failed", persistenceId(), primaryPath,
+ shardName, failure);
// FAILURE
- sender.tell(new Status.Failure(new RuntimeException(msg, failure)), self());
+ sender.tell(new Status.Failure(new RuntimeException(
+ String.format("RemoveServer request to leader %s for shard %s failed", primaryPath, shardName),
+ failure)), self());
} else {
// SUCCESS
self().tell(new WrappedShardResponse(shardId, response, primaryPath), sender);
try {
shardId = ShardIdentifier.fromShardIdString(actorName);
} catch (IllegalArgumentException e) {
- LOG.debug("{}: ignoring actor {}", actorName, e);
+ LOG.debug("{}: ignoring actor {}", persistenceId, actorName, e);
return;
}
private boolean isShardReplicaOperationInProgress(final String shardName, final ActorRef sender) {
if (shardReplicaOperationsInProgress.contains(shardName)) {
- String msg = String.format("A shard replica operation for %s is already in progress", shardName);
- LOG.debug("{}: {}", persistenceId(), msg);
- sender.tell(new Status.Failure(new IllegalStateException(msg)), getSelf());
+ LOG.debug("{}: A shard replica operation for {} is already in progress", persistenceId(), shardName);
+ sender.tell(new Status.Failure(new IllegalStateException(
+ String.format("A shard replica operation for %s is already in progress", shardName))), getSelf());
return true;
}
// Create the localShard
if (schemaContext == null) {
- String msg = String.format(
- "No SchemaContext is available in order to create a local shard instance for %s", shardName);
- LOG.debug("{}: {}", persistenceId(), msg);
- getSender().tell(new Status.Failure(new IllegalStateException(msg)), getSelf());
+ LOG.debug("{}: No SchemaContext is available in order to create a local shard instance for {}",
+ persistenceId(), shardName);
+ getSender().tell(new Status.Failure(new IllegalStateException(
+ "No SchemaContext is available in order to create a local shard instance for " + shardName)),
+ getSelf());
return;
}
// verify the shard with the specified name is present in the cluster configuration
if (!this.configuration.isShardConfigured(shardName)) {
- String msg = String.format("No module configuration exists for shard %s", shardName);
- LOG.debug("{}: {}", persistenceId(), msg);
- getSender().tell(new Status.Failure(new IllegalArgumentException(msg)), getSelf());
+ LOG.debug("{}: No module configuration exists for shard {}", persistenceId(), shardName);
+ getSender().tell(new Status.Failure(new IllegalArgumentException(
+ "No module configuration exists for shard " + shardName)), getSelf());
return;
}
// Create the localShard
if (schemaContext == null) {
- String msg = String.format(
- "No SchemaContext is available in order to create a local shard instance for %s", shardName);
- LOG.debug("{}: {}", persistenceId(), msg);
- getSender().tell(new Status.Failure(new IllegalStateException(msg)), getSelf());
+ LOG.debug("{}: No SchemaContext is available in order to create a local shard instance for {}",
+ persistenceId(), shardName);
+ getSender().tell(new Status.Failure(new IllegalStateException(
+ "No SchemaContext is available in order to create a local shard instance for " + shardName)),
+ getSelf());
return;
}
}
private void sendLocalReplicaAlreadyExistsReply(final String shardName, final ActorRef sender) {
- String msg = String.format("Local shard %s already exists", shardName);
- LOG.debug("{}: {}", persistenceId(), msg);
- sender.tell(new Status.Failure(new AlreadyExistsException(msg)), getSelf());
+ LOG.debug("{}: Local shard {} already exists", persistenceId(), shardName);
+ sender.tell(new Status.Failure(new AlreadyExistsException(
+ String.format("Local shard %s already exists", shardName))), getSelf());
}
private void addPrefixShard(final String shardName, final YangInstanceIdentifier shardPrefix,
getSelf().tell((RunnableMessage) () -> onLocalShardFound.accept((LocalShardFound) response),
sender);
} else if (response instanceof LocalShardNotFound) {
- String msg = String.format("Local shard %s does not exist", shardName);
- LOG.debug("{}: {}", persistenceId, msg);
- sender.tell(new Status.Failure(new IllegalArgumentException(msg)), self());
+ LOG.debug("{}: Local shard {} does not exist", persistenceId, shardName);
+ sender.tell(new Status.Failure(new IllegalArgumentException(
+ String.format("Local shard %s does not exist", shardName))), self());
} else {
- String msg = String.format("Failed to find local shard %s: received response: %s",
- shardName, response);
- LOG.debug("{}: {}", persistenceId, msg);
- sender.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response :
- new RuntimeException(msg)), self());
+ LOG.debug("{}: Failed to find local shard {}: received response: {}", persistenceId, shardName,
+ response);
+ sender.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response
+ : new RuntimeException(
+ String.format("Failed to find local shard %s: received response: %s", shardName,
+ response))), self());
}
}
}
public void onComplete(final Throwable failure, final Object response) {
shardReplicaOperationsInProgress.remove(shardName);
if (failure != null) {
- String msg = String.format("ChangeServersVotingStatus request to local shard %s failed",
- shardActorRef.path());
- LOG.debug("{}: {}", persistenceId(), msg, failure);
- sender.tell(new Status.Failure(new RuntimeException(msg, failure)), self());
+ LOG.debug("{}: ChangeServersVotingStatus request to local shard {} failed", persistenceId(),
+ shardActorRef.path(), failure);
+ sender.tell(new Status.Failure(new RuntimeException(
+ String.format("ChangeServersVotingStatus request to local shard %s failed",
+ shardActorRef.path()), failure)), self());
} else {
LOG.debug("{}: Received {} from local shard {}", persistenceId(), response, shardActorRef.path());
@Override
public void onUnknownResponse(final Object response) {
- String msg = String.format("Failed to find leader for shard %s: received response: %s",
- shardName, response);
- LOG.debug("{}: {}", persistenceId, msg);
- targetActor.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response :
- new RuntimeException(msg)), shardManagerActor);
+ LOG.debug("{}: Failed to find leader for shard {}: received response: {}", persistenceId, shardName,
+ response);
+ targetActor.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response
+ : new RuntimeException(String.format("Failed to find leader for shard %s: received response: %s",
+ shardName, response))), shardManagerActor);
}
}
params.replyToActor.tell(message, getSelf());
getSelf().tell(PoisonPill.getInstance(), getSelf());
} else if (message instanceof ReceiveTimeout) {
- String msg = String.format(
- "Timed out after %s ms while waiting for snapshot replies from %d shard(s). %d shard(s) %s "
- + "did not respond.", params.receiveTimeout.toMillis(), params.shardNames.size(),
- remainingShardNames.size(), remainingShardNames);
- LOG.warn("{}: {}", params.id, msg);
- params.replyToActor.tell(new Failure(new TimeoutException(msg)), getSelf());
+ LOG.warn("{}: Timed out after {} ms while waiting for snapshot replies from {} shard(s). "
+ + "{} shard(s) {} did not respond", params.id, params.receiveTimeout.toMillis(),
+ params.shardNames.size(), remainingShardNames.size(), remainingShardNames);
+ params.replyToActor.tell(new Failure(new TimeoutException(String.format(
+ "Timed out after %s ms while waiting for snapshot replies from %d shard(s). %d shard(s) %s "
+ + "did not respond.", params.receiveTimeout.toMillis(), params.shardNames.size(),
+ remainingShardNames.size(), remainingShardNames))), getSelf());
getSelf().tell(PoisonPill.getInstance(), getSelf());
}
}
private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER =
new Mapper<Throwable, Throwable>() {
@Override
- public Throwable apply(Throwable failure) {
+ public Throwable apply(final Throwable failure) {
Throwable actualFailure = failure;
if (failure instanceof AskTimeoutException) {
// A timeout exception most likely means the shard isn't initialized.
private final PrimaryShardInfoFutureCache primaryShardInfoCache;
private final ShardStrategyFactory shardStrategyFactory;
- public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
- ClusterWrapper clusterWrapper, Configuration configuration) {
+ public ActorContext(final ActorSystem actorSystem, final ActorRef shardManager,
+ final ClusterWrapper clusterWrapper, final Configuration configuration) {
this(actorSystem, shardManager, clusterWrapper, configuration,
DatastoreContext.newBuilder().build(), new PrimaryShardInfoFutureCache());
}
- public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
- ClusterWrapper clusterWrapper, Configuration configuration,
- DatastoreContext datastoreContext, PrimaryShardInfoFutureCache primaryShardInfoCache) {
+ public ActorContext(final ActorSystem actorSystem, final ActorRef shardManager,
+ final ClusterWrapper clusterWrapper, final Configuration configuration,
+ final DatastoreContext datastoreContext, final PrimaryShardInfoFutureCache primaryShardInfoCache) {
this.actorSystem = actorSystem;
this.shardManager = shardManager;
this.clusterWrapper = clusterWrapper;
return shardManager;
}
- public ActorSelection actorSelection(String actorPath) {
+ public ActorSelection actorSelection(final String actorPath) {
return actorSystem.actorSelection(actorPath);
}
- public ActorSelection actorSelection(ActorPath actorPath) {
+ public ActorSelection actorSelection(final ActorPath actorPath) {
return actorSystem.actorSelection(actorPath);
}
- public void setSchemaContext(SchemaContext schemaContext) {
+ public void setSchemaContext(final SchemaContext schemaContext) {
this.schemaContext = schemaContext;
if (shardManager != null) {
}
}
- public void setDatastoreContext(DatastoreContextFactory contextFactory) {
+ public void setDatastoreContext(final DatastoreContextFactory contextFactory) {
this.datastoreContext = contextFactory.getBaseDatastoreContext();
setCachedProperties();
return future.transform(new Mapper<Object, PrimaryShardInfo>() {
@Override
- public PrimaryShardInfo checkedApply(Object response) throws UnknownMessageException {
+ public PrimaryShardInfo checkedApply(final Object response) throws UnknownMessageException {
if (response instanceof RemotePrimaryShardFound) {
LOG.debug("findPrimaryShardAsync received: {}", response);
RemotePrimaryShardFound found = (RemotePrimaryShardFound)response;
}, FIND_PRIMARY_FAILURE_TRANSFORMER, getClientDispatcher());
}
- private PrimaryShardInfo onPrimaryShardFound(String shardName, String primaryActorPath,
- short primaryVersion, DataTree localShardDataTree) {
+ private PrimaryShardInfo onPrimaryShardFound(final String shardName, final String primaryActorPath,
+ final short primaryVersion, final DataTree localShardDataTree) {
ActorSelection actorSelection = actorSystem.actorSelection(primaryActorPath);
PrimaryShardInfo info = localShardDataTree == null ? new PrimaryShardInfo(actorSelection, primaryVersion) :
new PrimaryShardInfo(actorSelection, primaryVersion, localShardDataTree);
* @return a reference to a local shard actor which represents the shard
* specified by the shardName
*/
- public Optional<ActorRef> findLocalShard(String shardName) {
+ public Optional<ActorRef> findLocalShard(final String shardName) {
Object result = executeOperation(shardManager, new FindLocalShard(shardName, false));
if (result instanceof LocalShardFound) {
return future.map(new Mapper<Object, ActorRef>() {
@Override
- public ActorRef checkedApply(Object response) throws Throwable {
+ public ActorRef checkedApply(final Object response) throws Throwable {
if (response instanceof LocalShardFound) {
LocalShardFound found = (LocalShardFound)response;
LOG.debug("Local shard found {}", found.getPath());
* @return The response of the operation
*/
@SuppressWarnings("checkstyle:IllegalCatch")
- public Object executeOperation(ActorRef actor, Object message) {
+ public Object executeOperation(final ActorRef actor, final Object message) {
Future<Object> future = executeOperationAsync(actor, message, operationTimeout);
try {
* @return the response message
*/
@SuppressWarnings("checkstyle:IllegalCatch")
- public Object executeOperation(ActorSelection actor, Object message) {
+ public Object executeOperation(final ActorSelection actor, final Object message) {
Future<Object> future = executeOperationAsync(actor, message);
try {
}
}
- public Future<Object> executeOperationAsync(ActorRef actor, Object message, Timeout timeout) {
+ public Future<Object> executeOperationAsync(final ActorRef actor, final Object message, final Timeout timeout) {
Preconditions.checkArgument(actor != null, "actor must not be null");
Preconditions.checkArgument(message != null, "message must not be null");
* @param timeout the operation timeout
* @return a Future containing the eventual result
*/
- public Future<Object> executeOperationAsync(ActorSelection actor, Object message,
- Timeout timeout) {
+ public Future<Object> executeOperationAsync(final ActorSelection actor, final Object message,
+ final Timeout timeout) {
Preconditions.checkArgument(actor != null, "actor must not be null");
Preconditions.checkArgument(message != null, "message must not be null");
* @param message the message to send
* @return a Future containing the eventual result
*/
- public Future<Object> executeOperationAsync(ActorSelection actor, Object message) {
+ public Future<Object> executeOperationAsync(final ActorSelection actor, final Object message) {
return executeOperationAsync(actor, message, operationTimeout);
}
* @param actor the ActorSelection
* @param message the message to send
*/
- public void sendOperationAsync(ActorSelection actor, Object message) {
+ public void sendOperationAsync(final ActorSelection actor, final Object message) {
Preconditions.checkArgument(actor != null, "actor must not be null");
Preconditions.checkArgument(message != null, "message must not be null");
/**
* Send the message to each and every shard.
*/
- public void broadcast(final Function<Short, Object> messageSupplier, Class<?> messageClass) {
+ public void broadcast(final Function<Short, Object> messageSupplier, final Class<?> messageClass) {
for (final String shardName : configuration.getAllShardNames()) {
Future<PrimaryShardInfo> primaryFuture = findPrimaryShardAsync(shardName);
primaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
@Override
- public void onComplete(Throwable failure, PrimaryShardInfo primaryShardInfo) {
+ public void onComplete(final Throwable failure, final PrimaryShardInfo primaryShardInfo) {
if (failure != null) {
- LOG.warn("broadcast failed to send message {} to shard {}: {}",
- messageClass.getSimpleName(), shardName, failure);
+ LOG.warn("broadcast failed to send message {} to shard {}", messageClass.getSimpleName(),
+ shardName, failure);
} else {
Object message = messageSupplier.apply(primaryShardInfo.getPrimaryShardVersion());
primaryShardInfo.getPrimaryShardActor().tell(message, ActorRef.noSender());
return operationTimeout;
}
- public boolean isPathLocal(String path) {
+ public boolean isPathLocal(final String path) {
if (Strings.isNullOrEmpty(path)) {
return false;
}
* @param operationName the name of the operation
* @return the Timer instance
*/
- public Timer getOperationTimer(String operationName) {
+ public Timer getOperationTimer(final String operationName) {
return getOperationTimer(datastoreContext.getDataStoreName(), operationName);
}
- public Timer getOperationTimer(String dataStoreType, String operationName) {
+ public Timer getOperationTimer(final String dataStoreType, final String operationName) {
final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, dataStoreType,
operationName, METRIC_RATE);
return metricRegistry.timer(rate);
return shardStrategyFactory;
}
- protected Future<Object> doAsk(ActorRef actorRef, Object message, Timeout timeout) {
+ protected Future<Object> doAsk(final ActorRef actorRef, final Object message, final Timeout timeout) {
return ask(actorRef, message, timeout);
}
- protected Future<Object> doAsk(ActorSelection actorRef, Object message, Timeout timeout) {
+ protected Future<Object> doAsk(final ActorSelection actorRef, final Object message, final Timeout timeout) {
return ask(actorRef, message, timeout);
}
commitFuture = executor.submit(new CommitCoordinationTask<>(transaction, cohorts, commitStatsTracker,
futureValueSupplier));
} catch (RejectedExecutionException e) {
- LOG.error("The commit executor's queue is full - submit task was rejected. \n" + executor, e);
+ LOG.error("The commit executor {} queue is full - submit task was rejected. \n", executor, e);
commitFuture = Futures.immediateFailedFuture(new TransactionCommitFailedException(
"Could not submit the commit task - the commit queue capacity has been exceeded.", e));
}
public void onFailure(final Throwable failure) {
LOG.debug("Failed to execute RPC {}", msg.getRpc(), failure);
LOG.error("Failed to execute RPC {} due to {}. More details are available on DEBUG level.",
- msg.getRpc(), Throwables.getRootCause(failure));
+ msg.getRpc(), Throwables.getRootCause(failure).getMessage());
sender.tell(new akka.actor.Status.Failure(failure), self);
}
}, MoreExecutors.directExecutor());
versions.remove(addr);
final Bucket<T> bucket = remoteBuckets.remove(addr);
if (bucket != null) {
- LOG.debug("Source actor dead, removing bucket {} from ", bucket, addr);
+ LOG.debug("Source actor dead, removing bucket {} from {}", bucket, addr);
onBucketRemoved(addr, bucket);
}
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.remote.rpc.registry.mbeans;
import akka.actor.Address;
import akka.util.Timeout;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import scala.concurrent.Await;
import scala.concurrent.Future;
-
public class RemoteRpcRegistryMXBeanImpl extends AbstractMXBean implements RemoteRpcRegistryMXBean {
+ @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
protected final Logger log = LoggerFactory.getLogger(getClass());
private static final String LOCAL_CONSTANT = "local";
private final BucketStoreAccess rpcRegistryAccess;
private final Timeout timeout;
- public RemoteRpcRegistryMXBeanImpl(final BucketStoreAccess rpcRegistryAccess, Timeout timeout) {
+ public RemoteRpcRegistryMXBeanImpl(final BucketStoreAccess rpcRegistryAccess, final Timeout timeout) {
super("RemoteRpcRegistry", "RemoteRpcBroker", null);
this.rpcRegistryAccess = rpcRegistryAccess;
this.timeout = timeout;
@Override
public void onFailure(final Throwable ex) {
- LOG.error(String.format("Failed to add car-person entry: [%s]", carPerson), ex);
+ LOG.error("Failed to add car-person entry: [{}]", carPerson, ex);
}
}, MoreExecutors.directExecutor());
}
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.Collection;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
*
* @author Thomas Pantelis
*/
+@SuppressFBWarnings("SLF4J_ILLEGAL_PASSED_CLASS")
public class CarProvider implements CarService {
private static final Logger LOG_PURCHASE_CAR = LoggerFactory.getLogger(PurchaseCarProvider.class);
@Override
public void onFailure(final Throwable ex) {
- LOG.error(String.format("RPC addPerson : person addition failed [%s]", person), ex);
+ LOG.error("RPC addPerson : person addition failed [{}]", person, ex);
futureResult.set(RpcResultBuilder.<AddPersonOutput>failed()
.withError(RpcError.ErrorType.APPLICATION, ex.getMessage()).build());
}
changes.forEach(change -> {
if (change.getRootNode().getDataAfter().isPresent()) {
- LOG.trace("Received change, data before: {}, data after: ",
+ LOG.trace("Received change, data before: {}, data after: {}",
change.getRootNode().getDataBefore().isPresent()
? change.getRootNode().getDataBefore().get() : "",
change.getRootNode().getDataAfter().get());
if (result.isSuccessful()) {
LOG.info("makeBreakfast succeeded");
} else {
- LOG.warn("makeBreakfast failed: " + result.getErrors());
+ LOG.warn("makeBreakfast failed: {}", result.getErrors());
}
return result.isSuccessful();
-
} catch (InterruptedException | ExecutionException e) {
- LOG.warn("An error occurred while maing breakfast: " + e);
+ LOG.warn("An error occurred while maing breakfast", e);
}
return Boolean.FALSE;
*/
@Override
public void onToasterRestocked(final ToasterRestocked notification) {
- LOG.info("ToasterRestocked notification - amountOfBread: " + notification.getAmountOfBread());
+ LOG.info("ToasterRestocked notification - amountOfBread: {}", notification.getAmountOfBread());
toasterOutOfBread = false;
}
}
*/
@Override
public ListenableFuture<RpcResult<MakeToastOutput>> makeToast(final MakeToastInput input) {
- LOG.info("makeToast: " + input);
+ LOG.info("makeToast: {}", input);
final SettableFuture<RpcResult<MakeToastOutput>> futureResult = SettableFuture.create();
*/
@Override
public ListenableFuture<RpcResult<RestockToasterOutput>> restockToaster(final RestockToasterInput input) {
- LOG.info("restockToaster: " + input);
+ LOG.info("restockToaster: {}", input);
amountOfBreadInStock.set(input.getAmountOfBreadToStock());