Fix findbugs warnings 51/75351/4
authorRobert Varga <robert.varga@pantheon.tech>
Mon, 20 Aug 2018 23:07:54 +0000 (01:07 +0200)
committerStephen Kitt <skitt@redhat.com>
Wed, 22 Aug 2018 07:44:10 +0000 (07:44 +0000)
Upgraded findbugs finds these, fix them up.

Change-Id: Id5a008cddc6616c3a93f0528efca00b86843fc3c
Signed-off-by: Robert Varga <robert.varga@pantheon.tech>
53 files changed:
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/AbstractDependentComponentFactoryMetadata.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/OpendaylightNamespaceHandler.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticServiceReferenceRecipe.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientConnection.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorBehavior.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ReconnectForwarder.java
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingBroker.java
opendaylight/md-sal/messagebus-impl/src/main/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopology.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupport.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/SyncStatusTracker.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumNotificationProviderServiceAdapter.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumNotificationProviderServiceWithInterestListeners.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMMountPointListenerAdapter.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMMountPointServiceAdapter.java
opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcService.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/akka/impl/ActorSystemProviderImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleShardBackendResolver.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SingleClientHistory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeNotificationPublisherActorProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CompositeDataTreeCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortRegistrationProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendClientMetadataBuilder.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalThreePhaseCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpTransactionContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTree.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardSnapshotCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextCleanup.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipShard.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManager.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerGetSnapshotReplyActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ActorContext.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/SerializedDOMDataBroker.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcInvoker.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStoreActor.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteRpcRegistryMXBeanImpl.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/listener/PeopleCarListener.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/CarProvider.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PeopleProvider.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/IdIntsListener.java
opendaylight/md-sal/samples/toaster-consumer/src/main/java/org/opendaylight/controller/sample/kitchen/impl/KitchenServiceImpl.java
opendaylight/md-sal/samples/toaster-provider/src/main/java/org/opendaylight/controller/sample/toaster/provider/OpendaylightToaster.java

index e823523e8e75aea7cdcc69da90fa634e995296d0..37343b9362533b55a100ff10127efd621b9416c7 100644 (file)
@@ -8,6 +8,7 @@
 package org.opendaylight.controller.blueprint.ext;
 
 import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -32,6 +33,7 @@ import org.slf4j.LoggerFactory;
  * @author Thomas Pantelis
  */
 abstract class AbstractDependentComponentFactoryMetadata implements DependentComponentFactoryMetadata {
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     final Logger log = LoggerFactory.getLogger(getClass());
     private final String id;
     private final AtomicBoolean started = new AtomicBoolean();
index 01f922cc61a11d4bd47a954def6b2622cb7410d7..ff22f1bc30c64bb5af4375b0e79d50291275f962 100644 (file)
@@ -208,7 +208,7 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler {
                     + " can only be used on the root <blueprint> element");
         }
 
-        LOG.debug("{}: {}", propertyName, attr.getValue());
+        LOG.debug("Property {} = {}", propertyName, attr.getValue());
 
         if (!Boolean.parseBoolean(attr.getValue())) {
             return component;
index a3c6896fbf53197232b9ceab97636a83439c87e6..fdeea3bb091bdfed61471066ab4f02f28d506176 100644 (file)
@@ -58,7 +58,7 @@ class StaticServiceReferenceRecipe extends AbstractServiceReferenceRecipe {
         LOG.debug("{}: In untrack {}", getName(), reference);
 
         if (trackedServiceReference == reference) {
-            LOG.debug("{}: Current reference has been untracked", getName(), trackedServiceReference);
+            LOG.debug("{}: Current reference {} has been untracked", getName(), trackedServiceReference);
         }
     }
 
index c9be5be548446203de27cfed75df434a601f485e..af66369271c34c8eba953d6fc6ecf5c27637aa51 100644 (file)
@@ -262,7 +262,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
             TimeUnit.NANOSECONDS.sleep(delay);
         } catch (InterruptedException e) {
             Thread.currentThread().interrupt();
-            LOG.debug("Interrupted after sleeping {}ns", e, currentTime() - now);
+            LOG.debug("Interrupted after sleeping {}ns", currentTime() - now, e);
         }
     }
 
index 3f6515cbb8b20ce22183b857eccacd6acb50223c..fa2e3b76d8a038497d57efd7e344498862717d06 100644 (file)
@@ -418,7 +418,7 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
             try {
                 return cookie.equals(extractCookie(id));
             } catch (IllegalArgumentException e) {
-                LOG.debug("extractCookie failed while cancelling slicing for cookie {}: {}", cookie, e);
+                LOG.debug("extractCookie failed while cancelling slicing for cookie {}", cookie, e);
                 return false;
             }
         });
index 58c9e7549e493c5587cdaceb299de5b4b603d565..58334f91d79199dd713f7bf8f139800f999c4260 100644 (file)
@@ -8,8 +8,6 @@
 package org.opendaylight.controller.cluster.access.client;
 
 import com.google.common.base.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Forwarder class responsible for routing requests from the previous connection incarnation back to the originator,
@@ -18,7 +16,6 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 public abstract class ReconnectForwarder {
-    static final Logger LOG = LoggerFactory.getLogger(ReconnectForwarder.class);
     // Visible for subclass method handle
     private final AbstractReceivingClientConnection<?> successor;
 
index cdb4c716931c277fe417f86e187a13dcb94fd68b..7103a007680b1fe42db06eee13de17cbe64c976a 100644 (file)
@@ -9,6 +9,7 @@ package org.opendaylight.controller.md.sal.trace.dom.impl;
 
 import static java.util.Objects.requireNonNull;
 
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -100,7 +101,7 @@ import org.slf4j.LoggerFactory;
  *
  */
 public class TracingBroker implements TracingDOMDataBroker {
-
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     static final Logger LOG = LoggerFactory.getLogger(TracingBroker.class);
 
     private static final int STACK_TRACE_FIRST_RELEVANT_FRAME = 2;
index 934056dcab81f2eeb81f387a6d6986c31ec5b629..50b8e4f45962532af5a1c792fb6be8da99f09ad0 100644 (file)
@@ -110,7 +110,7 @@ public class EventSourceTopology implements EventAggregatorService, EventSourceR
 
             @Override
             public void onFailure(final Throwable ex) {
-                LOG.error("Can not put data into datastore [store: {}] [path: {}] [exception: {}]",store,path, ex);
+                LOG.error("Can not put data into datastore [store: {}] [path: {}]", store, path, ex);
             }
         }, MoreExecutors.directExecutor());
     }
@@ -127,7 +127,7 @@ public class EventSourceTopology implements EventAggregatorService, EventSourceR
 
             @Override
             public void onFailure(final Throwable ex) {
-                LOG.error("Can not delete data from datastore [store: {}] [path: {}] [exception: {}]",store,path, ex);
+                LOG.error("Can not delete data from datastore [store: {}] [path: {}]", store, path, ex);
             }
         }, MoreExecutors.directExecutor());
     }
index f9099a7b818a7e8b5004157a4b73da0f90e57ac9..6cbeda6098d2026225767d1564aae76109a1fa00 100755 (executable)
@@ -277,7 +277,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
             // non-leader cannot satisfy leadership request
             LOG.warn("{}: onRequestLeadership {} was sent to non-leader."
                     + " Current behavior: {}. Sending failure response",
-                    persistenceId(), getCurrentBehavior().state());
+                    persistenceId(), message, getCurrentBehavior().state());
             message.getReplyTo().tell(new LeadershipTransferFailedException("Cannot transfer leader to "
                     + message.getRequestedFollowerId()
                     + ". RequestLeadership message was sent to non-leader " + persistenceId()), getSelf());
index 19d796c9363aa928e7f287e218807093fa93f2a3..9c8fbe496cd96462b38fa798b0d3001943b5f4c5 100644 (file)
@@ -159,7 +159,7 @@ public class RaftActorContextImpl implements RaftActorContext {
                 cluster = Optional.of(Cluster.get(getActorSystem()));
             } catch (Exception e) {
                 // An exception means there's no cluster configured. This will only happen in unit tests.
-                log.debug("{}: Could not obtain Cluster: {}", getId(), e);
+                log.debug("{}: Could not obtain Cluster", getId(), e);
                 cluster = Optional.empty();
             }
         }
index 1b9343c1f3ff569b378c379f3b0ef1c91c7773d3..f071d945662a73d22803fc9ea277a88d88ef29c1 100644 (file)
@@ -46,7 +46,7 @@ class RaftActorRecoverySupport {
         this.log = context.getLogger();
     }
 
-    boolean handleRecoveryMessage(Object message, PersistentDataProvider persistentProvider) {
+    boolean handleRecoveryMessage(final Object message, final PersistentDataProvider persistentProvider) {
         log.trace("{}: handleRecoveryMessage: {}", context.getId(), message);
 
         anyDataRecovered = anyDataRecovered || !(message instanceof RecoveryCompleted);
@@ -105,7 +105,7 @@ class RaftActorRecoverySupport {
         }
     }
 
-    private void onRecoveredSnapshot(SnapshotOffer offer) {
+    private void onRecoveredSnapshot(final SnapshotOffer offer) {
         log.debug("{}: SnapshotOffer called.", context.getId());
 
         initRecoveryTimer();
@@ -153,7 +153,7 @@ class RaftActorRecoverySupport {
                 replicatedLog().getSnapshotTerm(), replicatedLog().size());
     }
 
-    private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
+    private void onRecoveredJournalLogEntry(final ReplicatedLogEntry logEntry) {
         if (log.isDebugEnabled()) {
             log.debug("{}: Received ReplicatedLogEntry for recovery: index: {}, size: {}", context.getId(),
                     logEntry.getIndex(), logEntry.size());
@@ -174,7 +174,7 @@ class RaftActorRecoverySupport {
         }
     }
 
-    private void onRecoveredApplyLogEntries(long toIndex) {
+    private void onRecoveredApplyLogEntries(final long toIndex) {
         if (!context.getPersistenceProvider().isRecoveryApplicable()) {
             dataRecoveredWithPersistenceDisabled = true;
             return;
@@ -206,7 +206,7 @@ class RaftActorRecoverySupport {
         context.setCommitIndex(lastApplied);
     }
 
-    private void onDeleteEntries(DeleteEntries deleteEntries) {
+    private void onDeleteEntries(final DeleteEntries deleteEntries) {
         if (context.getPersistenceProvider().isRecoveryApplicable()) {
             replicatedLog().removeFrom(deleteEntries.getFromIndex());
         } else {
@@ -214,7 +214,7 @@ class RaftActorRecoverySupport {
         }
     }
 
-    private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
+    private void batchRecoveredLogEntry(final ReplicatedLogEntry logEntry) {
         initRecoveryTimer();
 
         int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
@@ -236,7 +236,7 @@ class RaftActorRecoverySupport {
         currentRecoveryBatchCount = 0;
     }
 
-    private void onRecoveryCompletedMessage(PersistentDataProvider persistentProvider) {
+    private void onRecoveryCompletedMessage(final PersistentDataProvider persistentProvider) {
         if (currentRecoveryBatchCount > 0) {
             endCurrentLogRecoveryBatch();
         }
@@ -248,9 +248,9 @@ class RaftActorRecoverySupport {
             recoveryTimer = null;
         }
 
-        log.info("Recovery completed" + recoveryTime + " - Switching actor to Follower - " + "Persistence Id =  "
-                  + context.getId() + " Last index in log = {}, snapshotIndex = {}, snapshotTerm = {}, "
-                  + "journal-size = {}", replicatedLog().lastIndex(), replicatedLog().getSnapshotIndex(),
+        log.info("Recovery completed {} - Switching actor to Follower - Persistence Id = {}"
+                 + " Last index in log = {}, snapshotIndex = {}, snapshotTerm = {}, journal-size = {}",
+                 recoveryTime, context.getId(), replicatedLog().lastIndex(), replicatedLog().getSnapshotIndex(),
                  replicatedLog().getSnapshotTerm(), replicatedLog().size());
 
         if (dataRecoveredWithPersistenceDisabled
@@ -284,19 +284,19 @@ class RaftActorRecoverySupport {
         }
     }
 
-    private static boolean isServerConfigurationPayload(ReplicatedLogEntry repLogEntry) {
+    private static boolean isServerConfigurationPayload(final ReplicatedLogEntry repLogEntry) {
         return repLogEntry.getData() instanceof ServerConfigurationPayload;
     }
 
-    private static boolean isPersistentPayload(ReplicatedLogEntry repLogEntry) {
+    private static boolean isPersistentPayload(final ReplicatedLogEntry repLogEntry) {
         return repLogEntry.getData() instanceof PersistentPayload;
     }
 
-    private static boolean isMigratedPayload(ReplicatedLogEntry repLogEntry) {
+    private static boolean isMigratedPayload(final ReplicatedLogEntry repLogEntry) {
         return isMigratedSerializable(repLogEntry.getData());
     }
 
-    private static boolean isMigratedSerializable(Object message) {
+    private static boolean isMigratedSerializable(final Object message) {
         return message instanceof MigratedSerializable && ((MigratedSerializable)message).isMigrated();
     }
 }
index e8c1b09772f172157c177c747b1bc2f7c5b51430..400f110865f53a37e64d223f7cbe894c912262f6 100644 (file)
@@ -11,6 +11,7 @@ package org.opendaylight.controller.cluster.raft.behaviors;
 import akka.actor.ActorRef;
 import akka.actor.Cancellable;
 import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Random;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
@@ -39,6 +40,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
     /**
      * Used for message logging.
      */
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     protected final Logger log;
 
     /**
@@ -96,7 +98,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
     }
 
     @Override
-    public void setReplicatedToAllIndex(long replicatedToAllIndex) {
+    public void setReplicatedToAllIndex(final long replicatedToAllIndex) {
         this.replicatedToAllIndex = replicatedToAllIndex;
     }
 
@@ -127,7 +129,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      * @param appendEntries the message
      * @return a new behavior if it was changed or the current behavior
      */
-    protected RaftActorBehavior appendEntries(ActorRef sender, AppendEntries appendEntries) {
+    protected RaftActorBehavior appendEntries(final ActorRef sender, final AppendEntries appendEntries) {
 
         // 1. Reply false if term < currentTerm (§5.1)
         if (appendEntries.getTerm() < currentTerm()) {
@@ -165,7 +167,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      * @param requestVote the message
      * @return a new behavior if it was changed or the current behavior
      */
-    protected RaftActorBehavior requestVote(ActorRef sender, RequestVote requestVote) {
+    protected RaftActorBehavior requestVote(final ActorRef sender, final RequestVote requestVote) {
 
         log.debug("{}: In requestVote:  {} - currentTerm: {}, votedFor: {}, lastIndex: {}, lastTerm: {}", logName(),
                 requestVote, currentTerm(), votedFor(), lastIndex(), lastTerm());
@@ -185,7 +187,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
         return this;
     }
 
-    protected boolean canGrantVote(RequestVote requestVote) {
+    protected boolean canGrantVote(final RequestVote requestVote) {
         boolean grantVote = false;
 
         //  Reply false if term < currentTerm (§5.1)
@@ -264,7 +266,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      *
      * @param interval the duration after which we should trigger a new election
      */
-    protected void scheduleElection(FiniteDuration interval) {
+    protected void scheduleElection(final FiniteDuration interval) {
         stopElection();
 
         // Schedule an election. When the scheduler triggers an ElectionTimeout message is sent to itself
@@ -322,7 +324,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      * @param logIndex the log index
      * @return the ClientRequestTracker or null if none available
      */
-    protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
+    protected ClientRequestTracker removeClientRequestTracker(final long logIndex) {
         return null;
     }
 
@@ -331,7 +333,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      *
      * @return the log entry index or -1 if not found
      */
-    protected long getLogEntryIndex(long index) {
+    protected long getLogEntryIndex(final long index) {
         if (index == context.getReplicatedLog().getSnapshotIndex()) {
             return context.getReplicatedLog().getSnapshotIndex();
         }
@@ -349,7 +351,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      *
      * @return the log entry term or -1 if not found
      */
-    protected long getLogEntryTerm(long index) {
+    protected long getLogEntryTerm(final long index) {
         if (index == context.getReplicatedLog().getSnapshotIndex()) {
             return context.getReplicatedLog().getSnapshotTerm();
         }
@@ -405,7 +407,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
     }
 
     @Override
-    public RaftActorBehavior handleMessage(ActorRef sender, Object message) {
+    public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
         if (message instanceof AppendEntries) {
             return appendEntries(sender, (AppendEntries) message);
         } else if (message instanceof AppendEntriesReply) {
@@ -420,16 +422,16 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
     }
 
     @Override
-    public RaftActorBehavior switchBehavior(RaftActorBehavior behavior) {
+    public RaftActorBehavior switchBehavior(final RaftActorBehavior behavior) {
         return internalSwitchBehavior(behavior);
     }
 
-    protected RaftActorBehavior internalSwitchBehavior(RaftState newState) {
+    protected RaftActorBehavior internalSwitchBehavior(final RaftState newState) {
         return internalSwitchBehavior(createBehavior(context, newState));
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    protected RaftActorBehavior internalSwitchBehavior(RaftActorBehavior newBehavior) {
+    protected RaftActorBehavior internalSwitchBehavior(final RaftActorBehavior newBehavior) {
         if (!context.getRaftPolicy().automaticElectionsEnabled()) {
             return this;
         }
@@ -445,7 +447,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
     }
 
 
-    protected int getMajorityVoteCount(int numPeers) {
+    protected int getMajorityVoteCount(final int numPeers) {
         // Votes are required from a majority of the peers including self.
         // The numMajority field therefore stores a calculated value
         // of the number of votes required for this candidate to win an
index e2512d5b67f911acfab9e8d94e3c5f4888381714..20714493e84658d7a8c780cc3a1efd0573962b13 100644 (file)
@@ -68,7 +68,7 @@ public class SyncStatusTracker {
             LOG.debug("{}: Lagging {} entries behind leader {}", id, lag, leaderId);
             changeSyncStatus(NOT_IN_SYNC, false);
         } else if (commitIndex >= syncTarget.minimumCommitIndex) {
-            LOG.debug("{}: Lagging {} entries behind leader and reached {} (of expected {})", id, lag, leaderId,
+            LOG.debug("{}: Lagging {} entries behind leader {} and reached {} (of expected {})", id, lag, leaderId,
                 commitIndex, syncTarget.minimumCommitIndex);
             changeSyncStatus(IN_SYNC, false);
         }
index 149c2c2dab04f72241326d3472ac43876f31fc7e..ac85f65b7781d50153afb18aa4ee157e27583788 100644 (file)
@@ -22,8 +22,8 @@ public class HeliumNotificationProviderServiceAdapter extends HeliumNotification
 
     private final NotificationPublishService notificationPublishService;
 
-    public HeliumNotificationProviderServiceAdapter(NotificationPublishService notificationPublishService,
-                                                 NotificationService notificationService) {
+    public HeliumNotificationProviderServiceAdapter(final NotificationPublishService notificationPublishService,
+            final NotificationService notificationService) {
         super(notificationService);
         this.notificationPublishService = notificationPublishService;
     }
@@ -33,7 +33,7 @@ public class HeliumNotificationProviderServiceAdapter extends HeliumNotification
         try {
             notificationPublishService.putNotification(notification);
         } catch (InterruptedException e) {
-            LOG.error("Notification publication was interupted: "  + e);
+            LOG.error("Notification publication was interupted", e);
         }
     }
 
@@ -42,17 +42,18 @@ public class HeliumNotificationProviderServiceAdapter extends HeliumNotification
         try {
             notificationPublishService.putNotification(notification);
         } catch (InterruptedException e) {
-            LOG.error("Notification publication was interupted: "  + e);
+            LOG.error("Notification publication was interupted", e);
         }
     }
 
     @Override
     public ListenerRegistration<NotificationInterestListener> registerInterestListener(
-            NotificationInterestListener interestListener) {
+            final NotificationInterestListener interestListener) {
         throw new UnsupportedOperationException("InterestListener is not supported.");
     }
 
     @Override
-    public void close() throws Exception {
+    public void close() {
+
     }
 }
index ee64e470806f6f7d672cb0f9173efeae24a64d96..2a07b03b8300e330a8eda1cf2fb9c88fc2b26084 100644 (file)
@@ -68,7 +68,7 @@ public class HeliumNotificationProviderServiceWithInterestListeners extends Heli
                 try {
                     notifyListener(listenerRef, baEvent);
                 } catch (RuntimeException  e) {
-                    LOG.warn("Unhandled exception during invoking listener {}", e, listenerRef);
+                    LOG.warn("Unhandled exception during invoking listener {}", listenerRef, e);
                 }
             }
         }
@@ -90,7 +90,7 @@ public class HeliumNotificationProviderServiceWithInterestListeners extends Heli
         };
     }
 
-    private void notifyListener(final NotificationInterestListener listener,
+    private static void notifyListener(final NotificationInterestListener listener,
             final Set<Class<? extends Notification>> baEvent) {
         for (final Class<? extends Notification> event: baEvent) {
             listener.onNotificationSubscribtion(event);
@@ -114,7 +114,7 @@ public class HeliumNotificationProviderServiceWithInterestListeners extends Heli
     }
 
     @Override
-    public void close() throws Exception {
+    public void close() {
         super.close();
         domListener.close();
     }
index a929af5bcf18951a57c59a5bffd8f7bfd5e2e23e..69e6d99a29fa68fb7fa3e2182f549eb6e7b47f3f 100644 (file)
@@ -16,9 +16,12 @@ import org.opendaylight.yangtools.yang.binding.DataObject;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.impl.codec.DeserializationException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 final class BindingDOMMountPointListenerAdapter<T extends MountPointListener>
         implements ListenerRegistration<T>, DOMMountPointListener {
+    private static final Logger LOG = LoggerFactory.getLogger(BindingDOMMountPointListenerAdapter.class);
 
     private final T listener;
     private final ListenerRegistration<DOMMountPointListener> registration;
@@ -47,8 +50,7 @@ final class BindingDOMMountPointListenerAdapter<T extends MountPointListener>
             final InstanceIdentifier<? extends DataObject> bindingPath = toBinding(path);
             listener.onMountPointCreated(bindingPath);
         } catch (final DeserializationException e) {
-            BindingDOMMountPointServiceAdapter.LOG.error("Unable to translate mountPoint path {}. Omitting event.",
-                    path, e);
+            LOG.error("Unable to translate mountPoint path {}. Omitting event.", path, e);
         }
 
     }
@@ -69,8 +71,7 @@ final class BindingDOMMountPointListenerAdapter<T extends MountPointListener>
             final InstanceIdentifier<? extends DataObject> bindingPath = toBinding(path);
             listener.onMountPointRemoved(bindingPath);
         } catch (final DeserializationException e) {
-            BindingDOMMountPointServiceAdapter.LOG.error("Unable to translate mountPoint path {}. Omitting event.",
-                    path, e);
+            LOG.error("Unable to translate mountPoint path {}. Omitting event.", path, e);
         }
     }
 }
index 896d26ffc9729176c2173b575a1ef6a6d020d279..244ab51199bbc1ba10e4b6555f839b80a46882f6 100644 (file)
@@ -22,26 +22,26 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class BindingDOMMountPointServiceAdapter implements MountPointService {
-    public static final Logger LOG = LoggerFactory.getLogger(BindingDOMMountPointServiceAdapter.class);
+    private static final Logger LOG = LoggerFactory.getLogger(BindingDOMMountPointServiceAdapter.class);
 
     private final BindingToNormalizedNodeCodec codec;
     private final DOMMountPointService mountService;
     private final LoadingCache<DOMMountPoint, BindingMountPointAdapter> bindingMountpoints = CacheBuilder.newBuilder()
             .weakKeys().build(new CacheLoader<DOMMountPoint, BindingMountPointAdapter>() {
-
                 @Override
-                public BindingMountPointAdapter load(DOMMountPoint key) {
-                    return new BindingMountPointAdapter(codec,key);
+                public BindingMountPointAdapter load(final DOMMountPoint key) {
+                    return new BindingMountPointAdapter(codec, key);
                 }
             });
 
-    public BindingDOMMountPointServiceAdapter(DOMMountPointService mountService,BindingToNormalizedNodeCodec codec) {
+    public BindingDOMMountPointServiceAdapter(final DOMMountPointService mountService,
+            final BindingToNormalizedNodeCodec codec) {
         this.codec = codec;
         this.mountService = mountService;
     }
 
     @Override
-    public Optional<MountPoint> getMountPoint(InstanceIdentifier<?> mountPoint) {
+    public Optional<MountPoint> getMountPoint(final InstanceIdentifier<?> mountPoint) {
 
         YangInstanceIdentifier domPath = codec.toYangInstanceIdentifierBlocking(mountPoint);
         Optional<DOMMountPoint> domMount = mountService.getMountPoint(domPath);
@@ -52,8 +52,8 @@ public class BindingDOMMountPointServiceAdapter implements MountPointService {
     }
 
     @Override
-    public <T extends MountPointListener> ListenerRegistration<T> registerListener(InstanceIdentifier<?> path,
-            T listener) {
+    public <T extends MountPointListener> ListenerRegistration<T> registerListener(final InstanceIdentifier<?> path,
+            final T listener) {
         return new BindingDOMMountPointListenerAdapter<>(listener, codec, mountService);
     }
 }
index 5d9c1d61c32e899e85655944768e6bb876b6fb67..1bff714179c7ce9cb4e99d8b0cb965dcd7b90ba6 100644 (file)
@@ -20,6 +20,7 @@ import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.SettableFuture;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.util.AbstractMap.SimpleEntry;
@@ -228,7 +229,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
             public void onComplete(final Throwable failure, final ActorRef actorRef) {
                 if (failure != null) {
                     LOG.warn("No local shard found for {} datastoreType {} - Cannot request leadership transfer to"
-                                    + " local shard.", shardName, failure);
+                            + " local shard.", shardName, dataStoreType, failure);
                     makeLeaderLocalAsk.failure(failure);
                 } else {
                     makeLeaderLocalAsk
@@ -672,9 +673,10 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         onMessageFailure(String.format("Failed to back up datastore to file %s", fileName), returnFuture, failure);
     }
 
+    @SuppressFBWarnings("SLF4J_SIGN_ONLY_FORMAT")
     private static <T> void onMessageFailure(final String msg, final SettableFuture<RpcResult<T>> returnFuture,
             final Throwable failure) {
-        LOG.error(msg, failure);
+        LOG.error("{}", msg, failure);
         returnFuture.set(ClusterAdminRpcService.<T>newFailedRpcResultBuilder(String.format("%s: %s", msg,
                 failure.getMessage())).build());
     }
index 6bd7a053b0bcc9480cd7ff42d3f67379a06ef805..c0e260ae66a9f2dcc6c73e254449da1167f403ff 100644 (file)
@@ -10,12 +10,14 @@ package org.opendaylight.controller.cluster.common.actor;
 
 import akka.actor.ActorRef;
 import akka.actor.UntypedActor;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.eclipse.jdt.annotation.NonNull;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public abstract class AbstractUntypedActor extends UntypedActor implements ExecuteInSelfActor {
     // The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now.
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     @SuppressWarnings("checkstyle:MemberName")
     protected final Logger LOG = LoggerFactory.getLogger(getClass());
 
index e9aaa65453c4b14fc40de1451a668a3091699fa9..5ee3c499881e5a54bc0d6a7e4858a24edad1604b 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.common.actor;
 
 import akka.actor.ActorRef;
 import akka.persistence.UntypedPersistentActor;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.eclipse.jdt.annotation.NonNull;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -17,6 +18,7 @@ import org.slf4j.LoggerFactory;
 public abstract class AbstractUntypedPersistentActor extends UntypedPersistentActor implements ExecuteInSelfActor {
 
     // The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now.
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     @SuppressWarnings("checkstyle:MemberName")
     protected final Logger LOG = LoggerFactory.getLogger(getClass());
 
index 14aedffd2ab59ecd866df71d66eaf6827adb2b28..9d1126a78309664fa5269f1737e3c36419564026 100644 (file)
@@ -24,7 +24,8 @@ import scala.concurrent.duration.Duration;
 
 public class ActorSystemProviderImpl implements ActorSystemProvider, AutoCloseable {
     private static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-data";
-    static final Logger LOG = LoggerFactory.getLogger(ActorSystemProviderImpl.class);
+    private static final Logger LOG = LoggerFactory.getLogger(ActorSystemProviderImpl.class);
+
     private final ActorSystem actorSystem;
     private final ListenerRegistry<ActorSystemProviderListener> listeners = new ListenerRegistry<>();
 
index ee549d36118e8f026f1ed6e83c80b842fe5bbf50..b79a5ab88ec8af82e4ded27b937de4044f48e117 100644 (file)
@@ -129,7 +129,7 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
             LOG.debug("Invalidating backend information {}", staleInfo);
             flushCache(staleInfo.getShardName());
 
-            LOG.trace("Invalidated cache %s", staleInfo);
+            LOG.trace("Invalidated cache {}", staleInfo);
             backends.remove(cookie, existing);
         }
 
index 370484d9556f03d8eb677c20f321d9b5fdcb2458..9741a6bda49b1880caf44f112ee3dc6b72cd0851 100644 (file)
@@ -88,7 +88,7 @@ final class SimpleShardBackendResolver extends AbstractShardBackendResolver {
             synchronized (this) {
                 LOG.debug("Invalidating backend information {}", staleInfo);
                 flushCache(shardName);
-                LOG.trace("Invalidated cache %s", staleInfo);
+                LOG.trace("Invalidated cache {}", staleInfo);
                 state = null;
             }
         }
index 7c3bba9756043c0b4bad67ede6f272cb1c813111..db57f1aed51bb08ce23c73c4ba86a2639c9eb0b5 100644 (file)
@@ -19,7 +19,7 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 final class SingleClientHistory extends AbstractClientHistory {
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractClientHistory.class);
+    private static final Logger LOG = LoggerFactory.getLogger(SingleClientHistory.class);
 
     SingleClientHistory(final AbstractDataStoreClientBehavior client, final LocalHistoryIdentifier identifier) {
         super(client, identifier);
index fb3e968fe85c43bf933f6982c7342d8460ae3d90..b4cdb81c5aee7e9088e94d31cd62d35ed1260cb1 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore;
 import akka.actor.ActorContext;
 import akka.actor.ActorRef;
 import akka.actor.Props;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
@@ -24,6 +25,7 @@ import org.slf4j.LoggerFactory;
  */
 @NotThreadSafe
 abstract class AbstractShardDataTreeNotificationPublisherActorProxy implements ShardDataTreeNotificationPublisher {
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     protected final Logger log = LoggerFactory.getLogger(getClass());
 
     private final ActorContext actorContext;
@@ -31,8 +33,8 @@ abstract class AbstractShardDataTreeNotificationPublisherActorProxy implements S
     private final String logContext;
     private ActorRef publisherActor;
 
-    protected AbstractShardDataTreeNotificationPublisherActorProxy(ActorContext actorContext, String actorName,
-            String logContext) {
+    protected AbstractShardDataTreeNotificationPublisherActorProxy(final ActorContext actorContext,
+            final String actorName, final String logContext) {
         this.actorContext = actorContext;
         this.actorName = actorName;
         this.logContext = logContext;
@@ -49,7 +51,7 @@ abstract class AbstractShardDataTreeNotificationPublisherActorProxy implements S
     }
 
     @Override
-    public void publishChanges(DataTreeCandidate candidate) {
+    public void publishChanges(final DataTreeCandidate candidate) {
         publisherActor().tell(new ShardDataTreeNotificationPublisherActor.PublishNotifications(candidate),
                 ActorRef.noSender());
     }
index 0ef49b6244c33a8a86fb7adec36106117c1f6175..200f766b5d3619da0bc1427145a1736ee94bcd77 100644 (file)
@@ -232,7 +232,7 @@ class CompositeDataTreeCohort {
 
         aggregateFuture.onComplete(new OnComplete<Iterable<Object>>() {
             @Override
-            public void onComplete(Throwable failure, Iterable<Object> results) {
+            public void onComplete(final Throwable failure, final Iterable<Object> results) {
                 callbackExecutor.execute(
                     () -> processResponses(failure, results, currentState, afterState, returnFuture));
             }
@@ -244,8 +244,8 @@ class CompositeDataTreeCohort {
     // FB issues violation for passing null to CompletableFuture#complete but it is valid and necessary when the
     // generic type is Void.
     @SuppressFBWarnings("NP_NONNULL_PARAM_VIOLATION")
-    private void processResponses(Throwable failure, Iterable<Object> results, State currentState, State afterState,
-            CompletableFuture<Void> resultFuture) {
+    private void processResponses(final Throwable failure, final Iterable<Object> results,
+            final State currentState, final State afterState, final CompletableFuture<Void> resultFuture) {
         if (failure != null) {
             successfulFromPrevious = Collections.emptyList();
             resultFuture.completeExceptionally(failure);
@@ -260,7 +260,7 @@ class CompositeDataTreeCohort {
             } else if (result instanceof Status.Failure) {
                 failed.add((Failure) result);
             } else {
-                LOG.warn("{}: unrecognized response {}, ignoring it", result);
+                LOG.warn("{}: unrecognized response {}, ignoring it", txId, result);
             }
         }
 
index 0a338428082e4a86d632817988c26336c143cb04..a57a799284921a49582bc857d9453189b77186a0 100644 (file)
@@ -76,7 +76,7 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
                             + "cannot be registered", logContext(), shardName, getInstance(), registeredPath);
                 } else if (failure != null) {
                     LOG.error("{}: Failed to find local shard {} - DataTreeChangeListener {} at path {} "
-                            + "cannot be registered: {}", logContext(), shardName, getInstance(), registeredPath,
+                            + "cannot be registered", logContext(), shardName, getInstance(), registeredPath,
                             failure);
                 } else {
                     doRegistration(shard);
index cfe12a1bc65e117832887579b50e32ad2f3ccdef..e14db0fe6abf34bd222c58c75b495c6cb4b96fba 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
@@ -37,8 +36,8 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
     @GuardedBy("this")
     private ActorRef cohortRegistry;
 
-
-    DataTreeCohortRegistrationProxy(ActorContext actorContext, DOMDataTreeIdentifier subtree, C cohort) {
+    DataTreeCohortRegistrationProxy(final ActorContext actorContext, final DOMDataTreeIdentifier subtree,
+            final C cohort) {
         super(cohort);
         this.subtree = Preconditions.checkNotNull(subtree);
         this.actorContext = Preconditions.checkNotNull(actorContext);
@@ -46,8 +45,7 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
                 subtree.getRootIdentifier()).withDispatcher(actorContext.getNotificationDispatcherPath()));
     }
 
-
-    public void init(String shardName) {
+    public void init(final String shardName) {
         // FIXME: Add late binding to shard.
         Future<ActorRef> findFuture = actorContext.findLocalShardAsync(shardName);
         findFuture.onComplete(new OnComplete<ActorRef>() {
@@ -58,7 +56,7 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
                             + "cannot be registered", shardName, getInstance(), subtree);
                 } else if (failure != null) {
                     LOG.error("Failed to find local shard {} - DataTreeChangeListener {} at path {} "
-                            + "cannot be registered: {}", shardName, getInstance(), subtree, failure);
+                            + "cannot be registered", shardName, getInstance(), subtree, failure);
                 } else {
                     performRegistration(shard);
                 }
@@ -66,7 +64,7 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
         }, actorContext.getClientDispatcher());
     }
 
-    private synchronized void performRegistration(ActorRef shard) {
+    private synchronized void performRegistration(final ActorRef shard) {
         if (isClosed()) {
             return;
         }
@@ -76,7 +74,7 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
         future.onComplete(new OnComplete<Object>() {
 
             @Override
-            public void onComplete(Throwable failure, Object val) {
+            public void onComplete(final Throwable failure, final Object val) {
                 if (failure != null) {
                     LOG.error("Unable to register {} as commit cohort", getInstance(), failure);
                 }
index 53d2730f86120ae7040f912425807bd73bd85458..9f9d169d1ac2227ce9263b1cf4196497d4d3307f 100644 (file)
@@ -13,6 +13,7 @@ import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.List;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.slf4j.Logger;
@@ -31,6 +32,8 @@ class DebugThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort<Object>
     private final AbstractThreePhaseCommitCohort<?> delegate;
     private final Throwable debugContext;
     private final TransactionIdentifier transactionId;
+
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_FINAL")
     private Logger log = LOG;
 
     DebugThreePhaseCommitCohort(final TransactionIdentifier transactionId,
index 81b782d236f751a54195faed7b37af19adb1ee5c..5de977da13588eb2dbd8d688ba56ee423a8ea3b1 100644 (file)
@@ -108,7 +108,7 @@ final class FrontendClientMetadataBuilder implements Builder<FrontendClientMetad
 
         // XXX: do we need to account for cookies?
         purgedHistories.add(historyId.getHistoryId());
-        LOG.debug("{}: Purged history {}", historyId);
+        LOG.debug("{}: Purged history {}", shardName, historyId);
     }
 
     void onTransactionAborted(final TransactionIdentifier txId) {
index 2087da07596cb90f20637bfe1d14f3dc414efd2f..db879c07730e99ddfb78aaca3af96790f6077c61 100644 (file)
@@ -72,7 +72,7 @@ class LocalThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
         return actorContext.executeOperationAsync(leader, message, actorContext.getTransactionCommitOperationTimeout());
     }
 
-    Future<ActorSelection> initiateCoordinatedCommit(Optional<SortedSet<String>> participatingShardNames) {
+    Future<ActorSelection> initiateCoordinatedCommit(final Optional<SortedSet<String>> participatingShardNames) {
         final Future<Object> messageFuture = initiateCommit(false, participatingShardNames);
         final Future<ActorSelection> ret = TransactionReadyReplyMapper.transform(messageFuture, actorContext,
                 transaction.getIdentifier());
@@ -104,7 +104,8 @@ class LocalThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
                     LOG.debug("Transaction {} committed successfully", transaction.getIdentifier());
                     transactionCommitted(transaction);
                 } else {
-                    LOG.error("Transaction {} resulted in unhandled message type {}, aborting", message.getClass());
+                    LOG.error("Transaction {} resulted in unhandled message type {}, aborting",
+                        transaction.getIdentifier(), message.getClass());
                     transactionAborted(transaction);
                 }
             }
@@ -137,9 +138,9 @@ class LocalThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
         throw new UnsupportedOperationException();
     }
 
-    protected void transactionAborted(SnapshotBackedWriteTransaction<TransactionIdentifier> aborted) {
+    protected void transactionAborted(final SnapshotBackedWriteTransaction<TransactionIdentifier> aborted) {
     }
 
-    protected void transactionCommitted(SnapshotBackedWriteTransaction<TransactionIdentifier> comitted) {
+    protected void transactionCommitted(final SnapshotBackedWriteTransaction<TransactionIdentifier> comitted) {
     }
 }
index 03ed5ad0ea8c38fd4f3ad200285049feb165b36d..89a8c03c6a0272b6e38d04139cc5a85877aa5256 100644 (file)
@@ -38,14 +38,14 @@ final class NoOpTransactionContext extends AbstractTransactionContext {
 
     @Override
     public Future<Object> directCommit(final Boolean havePermit) {
-        LOG.debug("Tx {} directCommit called, failure: {}", getIdentifier(), failure);
+        LOG.debug("Tx {} directCommit called, failure", getIdentifier(), failure);
         return akka.dispatch.Futures.failed(failure);
     }
 
     @Override
     public Future<ActorSelection> readyTransaction(final Boolean havePermit,
             final Optional<SortedSet<String>> participatingShardNamess) {
-        LOG.debug("Tx {} readyTransaction called, failure: {}", getIdentifier(), failure);
+        LOG.debug("Tx {} readyTransaction called, failure", getIdentifier(), failure);
         return akka.dispatch.Futures.failed(failure);
     }
 
index 52057faa4ba747eedf8f449cac712303759c8f49..b7c170568847cc701cce421b63a79633f4653226 100644 (file)
@@ -240,8 +240,8 @@ public class RemoteTransactionContext extends AbstractTransactionContext {
                 }
 
                 if (failure != null) {
-                    LOG.debug("Tx {} {} operation failed: {}", getIdentifier(), readCmd.getClass().getSimpleName(),
-                            failure);
+                    LOG.debug("Tx {} {} operation failed", getIdentifier(), readCmd.getClass().getSimpleName(),
+                        failure);
 
                     returnFuture.setException(new ReadFailedException("Error checking "
                         + readCmd.getClass().getSimpleName() + " for path " + readCmd.getPath(), failure));
index eebad9ce065c4fbb98e3f1a7c53ecb122c9cb5a0..403a96819f392726d7b24ef715827e1187f29841 100644 (file)
@@ -251,8 +251,8 @@ final class ShardCommitCoordinator {
 
             @Override
             public void onFailure(final Throwable failure) {
-                log.debug("{}: An exception occurred during canCommit for {}: {}", name,
-                        cohortEntry.getTransactionId(), failure);
+                log.debug("{}: An exception occurred during canCommit for {}", name, cohortEntry.getTransactionId(),
+                    failure);
 
                 cohortCache.remove(cohortEntry.getTransactionId());
                 cohortEntry.getReplySender().tell(new Failure(failure), cohortEntry.getShard().self());
@@ -276,7 +276,7 @@ final class ShardCommitCoordinator {
             // between canCommit and ready and the entry was expired from the cache or it was aborted.
             IllegalStateException ex = new IllegalStateException(
                     String.format("%s: Cannot canCommit transaction %s - no cohort entry found", name, transactionID));
-            log.error(ex.getMessage());
+            log.error("{}: Inconsistency during transaction {} canCommit", name, transactionID, ex);
             sender.tell(new Failure(ex), shard.self());
             return;
         }
@@ -353,7 +353,7 @@ final class ShardCommitCoordinator {
             // or it was aborted.
             IllegalStateException ex = new IllegalStateException(
                     String.format("%s: Cannot commit transaction %s - no cohort entry found", name, transactionID));
-            log.error(ex.getMessage());
+            log.error("{}: Inconsistency during transaction {} commit", name, transactionID, ex);
             sender.tell(new Failure(ex), shard.self());
             return;
         }
index e1c12cd489c3cb6f14164380cbff005512c50431..c37703c3359b503bf1e65cab8e46d49fdd9b5353 100644 (file)
@@ -738,8 +738,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
                 // For debugging purposes, allow dumping of the modification. Coupled with the above
                 // precondition log, it should allow us to understand what went on.
-                LOG.debug("{}: Store Tx {}: modifications: {} tree: {}", cohort.getIdentifier(), modification,
-                        dataTree);
+                LOG.debug("{}: Store Tx {}: modifications: {} tree: {}", logContext, cohort.getIdentifier(),
+                    modification, dataTree);
                 cause = new TransactionCommitFailedException("Data did not pass validation for path " + e.getPath(), e);
             } catch (Exception e) {
                 LOG.warn("{}: Unexpected failure in validation phase", logContext, e);
@@ -873,7 +873,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         processNextPendingTransaction();
     }
 
-    private void insertEntry(Deque<CommitEntry> queue, CommitEntry entry, int atIndex) {
+    private void insertEntry(final Deque<CommitEntry> queue, final CommitEntry entry, final int atIndex) {
         if (atIndex == 0) {
             queue.addFirst(entry);
             return;
@@ -892,7 +892,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     }
 
     private Collection<String> extractPrecedingShardNames(
-            java.util.Optional<SortedSet<String>> participatingShardNames) {
+            final java.util.Optional<SortedSet<String>> participatingShardNames) {
         return participatingShardNames.map((Function<SortedSet<String>, Collection<String>>)
             set -> set.headSet(shard.getShardName())).orElse(Collections.<String>emptyList());
     }
index 5fc3c7adc262242a261aa0cb145a1ec5c9005e7e..7ece110d0143c4e1261ba511a9f73f0dbb8b3905 100644 (file)
@@ -119,7 +119,7 @@ abstract class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
     @SuppressWarnings("checkstyle:IllegalCatch")
     public void applyRecoverySnapshot(final Snapshot.State snapshotState) {
         if (!(snapshotState instanceof ShardSnapshotState)) {
-            log.debug("{}: applyRecoverySnapshot ignoring snapshot: {}", snapshotState);
+            log.debug("{}: applyRecoverySnapshot ignoring snapshot: {}", shardName, snapshotState);
         }
 
         log.debug("{}: Applying recovered snapshot", shardName);
index d8c49a9e744807e5089da752f02abe69e13892c7..3ed3a48770054dc709d7076b9c79bd8c30de1bf1 100644 (file)
@@ -75,7 +75,7 @@ final class ShardSnapshotCohort implements RaftActorSnapshotCohort {
     @SuppressWarnings("checkstyle:IllegalCatch")
     public void applySnapshot(final Snapshot.State snapshotState) {
         if (!(snapshotState instanceof ShardSnapshotState)) {
-            log.debug("{}: applySnapshot ignoring snapshot: {}", snapshotState);
+            log.debug("{}: applySnapshot ignoring snapshot: {}", logId, snapshotState);
         }
 
         final ShardDataTreeSnapshot snapshot = ((ShardSnapshotState)snapshotState).getSnapshot();
index a78b9a2ef6294dbd04d896b4270e69c02fbd13ce..4d80d7fd8ab3fb3c5200f62aa4c589a6343ad2b7 100644 (file)
@@ -291,7 +291,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
 
                 @Override
                 public void onFailure(final Throwable failure) {
-                    LOG.debug("Tx {}: a {} cohort path Future failed: {}", transactionId, operationName, failure);
+                    LOG.debug("Tx {}: a {} cohort path Future failed", transactionId, operationName, failure);
 
                     if (propagateException) {
                         returnFuture.setException(failure);
index d4b52e52524ca894d123693db11497241a48fc7b..ef8cc49582d04dd383fecb542a4fdc0d3ab84653 100644 (file)
@@ -54,7 +54,7 @@ final class TransactionContextCleanup extends FinalizablePhantomReference<Transa
 
     @Override
     public void finalizeReferent() {
-        LOG.trace("Cleaning up {} Tx actors {}", cleanup);
+        LOG.trace("Cleaning up {} Tx actors", cleanup);
 
         if (CACHE.remove(cleanup) != null) {
             cleanup.closeTransaction();
index 6f4c8dbbd29f5afa875825943cf213debd71a843..b7321afe5536f07f61136c6f3632df6b774d2ca3 100644 (file)
@@ -513,7 +513,7 @@ class EntityOwnershipShard extends Shard {
 
             } else {
                 LOG.debug("{}: Found entity {} but no other candidates - not clearing owner", persistenceId(),
-                        entityPath, newOwner);
+                        entityPath);
             }
         });
 
index fdd66c9f56a7c6a9b81b04aabd906575a909b311..d3fb58e9d15e66e5c25ca40ce60c546cfb6d866e 100644 (file)
@@ -305,7 +305,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             LOG.warn("{}: Failed to delete prior snapshots", persistenceId(),
                     ((DeleteSnapshotsFailure) message).cause());
         } else if (message instanceof DeleteSnapshotsSuccess) {
-            LOG.debug("{}: Successfully deleted prior snapshots", persistenceId(), message);
+            LOG.debug("{}: Successfully deleted prior snapshots", persistenceId());
         } else if (message instanceof RegisterRoleChangeListenerReply) {
             LOG.trace("{}: Received RegisterRoleChangeListenerReply", persistenceId());
         } else if (message instanceof ClusterEvent.MemberEvent) {
@@ -441,13 +441,14 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             public void onComplete(final Throwable failure, final Object response) {
                 if (failure != null) {
                     shardReplicaOperationsInProgress.remove(shardName);
-                    String msg = String.format("RemoveServer request to leader %s for shard %s failed",
-                            primaryPath, shardName);
 
-                    LOG.debug("{}: {}", persistenceId(), msg, failure);
+                    LOG.debug("{}: RemoveServer request to leader {} for shard {} failed", persistenceId(), primaryPath,
+                        shardName, failure);
 
                     // FAILURE
-                    sender.tell(new Status.Failure(new RuntimeException(msg, failure)), self());
+                    sender.tell(new Status.Failure(new RuntimeException(
+                        String.format("RemoveServer request to leader %s for shard %s failed", primaryPath, shardName),
+                        failure)), self());
                 } else {
                     // SUCCESS
                     self().tell(new WrappedShardResponse(shardId, response, primaryPath), sender);
@@ -481,13 +482,13 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             public void onComplete(final Throwable failure, final Object response) {
                 if (failure != null) {
                     shardReplicaOperationsInProgress.remove(shardName);
-                    String msg = String.format("RemoveServer request to leader %s for shard %s failed",
-                            primaryPath, shardName);
-
-                    LOG.debug("{}: {}", persistenceId(), msg, failure);
+                    LOG.debug("{}: RemoveServer request to leader {} for shard {} failed", persistenceId(), primaryPath,
+                        shardName, failure);
 
                     // FAILURE
-                    sender.tell(new Status.Failure(new RuntimeException(msg, failure)), self());
+                    sender.tell(new Status.Failure(new RuntimeException(
+                        String.format("RemoveServer request to leader %s for shard %s failed", primaryPath, shardName),
+                        failure)), self());
                 } else {
                     // SUCCESS
                     self().tell(new WrappedShardResponse(shardId, response, primaryPath), sender);
@@ -859,7 +860,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         try {
             shardId = ShardIdentifier.fromShardIdString(actorName);
         } catch (IllegalArgumentException e) {
-            LOG.debug("{}: ignoring actor {}", actorName, e);
+            LOG.debug("{}: ignoring actor {}", persistenceId, actorName, e);
             return;
         }
 
@@ -1320,9 +1321,9 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
     private boolean isShardReplicaOperationInProgress(final String shardName, final ActorRef sender) {
         if (shardReplicaOperationsInProgress.contains(shardName)) {
-            String msg = String.format("A shard replica operation for %s is already in progress", shardName);
-            LOG.debug("{}: {}", persistenceId(), msg);
-            sender.tell(new Status.Failure(new IllegalStateException(msg)), getSelf());
+            LOG.debug("{}: A shard replica operation for {} is already in progress", persistenceId(), shardName);
+            sender.tell(new Status.Failure(new IllegalStateException(
+                String.format("A shard replica operation for %s is already in progress", shardName))), getSelf());
             return true;
         }
 
@@ -1338,10 +1339,11 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
         // Create the localShard
         if (schemaContext == null) {
-            String msg = String.format(
-                    "No SchemaContext is available in order to create a local shard instance for %s", shardName);
-            LOG.debug("{}: {}", persistenceId(), msg);
-            getSender().tell(new Status.Failure(new IllegalStateException(msg)), getSelf());
+            LOG.debug("{}: No SchemaContext is available in order to create a local shard instance for {}",
+                persistenceId(), shardName);
+            getSender().tell(new Status.Failure(new IllegalStateException(
+                "No SchemaContext is available in order to create a local shard instance for " + shardName)),
+                getSelf());
             return;
         }
 
@@ -1370,18 +1372,19 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
         // verify the shard with the specified name is present in the cluster configuration
         if (!this.configuration.isShardConfigured(shardName)) {
-            String msg = String.format("No module configuration exists for shard %s", shardName);
-            LOG.debug("{}: {}", persistenceId(), msg);
-            getSender().tell(new Status.Failure(new IllegalArgumentException(msg)), getSelf());
+            LOG.debug("{}: No module configuration exists for shard {}", persistenceId(), shardName);
+            getSender().tell(new Status.Failure(new IllegalArgumentException(
+                "No module configuration exists for shard " + shardName)), getSelf());
             return;
         }
 
         // Create the localShard
         if (schemaContext == null) {
-            String msg = String.format(
-                  "No SchemaContext is available in order to create a local shard instance for %s", shardName);
-            LOG.debug("{}: {}", persistenceId(), msg);
-            getSender().tell(new Status.Failure(new IllegalStateException(msg)), getSelf());
+            LOG.debug("{}: No SchemaContext is available in order to create a local shard instance for {}",
+                persistenceId(), shardName);
+            getSender().tell(new Status.Failure(new IllegalStateException(
+                "No SchemaContext is available in order to create a local shard instance for " + shardName)),
+                getSelf());
             return;
         }
 
@@ -1404,9 +1407,9 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
     }
 
     private void sendLocalReplicaAlreadyExistsReply(final String shardName, final ActorRef sender) {
-        String msg = String.format("Local shard %s already exists", shardName);
-        LOG.debug("{}: {}", persistenceId(), msg);
-        sender.tell(new Status.Failure(new AlreadyExistsException(msg)), getSelf());
+        LOG.debug("{}: Local shard {} already exists", persistenceId(), shardName);
+        sender.tell(new Status.Failure(new AlreadyExistsException(
+            String.format("Local shard %s already exists", shardName))), getSelf());
     }
 
     private void addPrefixShard(final String shardName, final YangInstanceIdentifier shardPrefix,
@@ -1758,15 +1761,16 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
                         getSelf().tell((RunnableMessage) () -> onLocalShardFound.accept((LocalShardFound) response),
                                 sender);
                     } else if (response instanceof LocalShardNotFound) {
-                        String msg = String.format("Local shard %s does not exist", shardName);
-                        LOG.debug("{}: {}", persistenceId, msg);
-                        sender.tell(new Status.Failure(new IllegalArgumentException(msg)), self());
+                        LOG.debug("{}: Local shard {} does not exist", persistenceId, shardName);
+                        sender.tell(new Status.Failure(new IllegalArgumentException(
+                            String.format("Local shard %s does not exist", shardName))), self());
                     } else {
-                        String msg = String.format("Failed to find local shard %s: received response: %s",
-                                shardName, response);
-                        LOG.debug("{}: {}", persistenceId, msg);
-                        sender.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response :
-                                new RuntimeException(msg)), self());
+                        LOG.debug("{}: Failed to find local shard {}: received response: {}", persistenceId, shardName,
+                            response);
+                        sender.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response
+                                : new RuntimeException(
+                                    String.format("Failed to find local shard %s: received response: %s", shardName,
+                                        response))), self());
                     }
                 }
             }
@@ -1795,10 +1799,11 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             public void onComplete(final Throwable failure, final Object response) {
                 shardReplicaOperationsInProgress.remove(shardName);
                 if (failure != null) {
-                    String msg = String.format("ChangeServersVotingStatus request to local shard %s failed",
-                            shardActorRef.path());
-                    LOG.debug("{}: {}", persistenceId(), msg, failure);
-                    sender.tell(new Status.Failure(new RuntimeException(msg, failure)), self());
+                    LOG.debug("{}: ChangeServersVotingStatus request to local shard {} failed", persistenceId(),
+                        shardActorRef.path(), failure);
+                    sender.tell(new Status.Failure(new RuntimeException(
+                        String.format("ChangeServersVotingStatus request to local shard %s failed",
+                            shardActorRef.path()), failure)), self());
                 } else {
                     LOG.debug("{}: Received {} from local shard {}", persistenceId(), response, shardActorRef.path());
 
@@ -1960,11 +1965,11 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
         @Override
         public void onUnknownResponse(final Object response) {
-            String msg = String.format("Failed to find leader for shard %s: received response: %s",
-                    shardName, response);
-            LOG.debug("{}: {}", persistenceId, msg);
-            targetActor.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response :
-                    new RuntimeException(msg)), shardManagerActor);
+            LOG.debug("{}: Failed to find leader for shard {}: received response: {}", persistenceId, shardName,
+                response);
+            targetActor.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response
+                    : new RuntimeException(String.format("Failed to find leader for shard %s: received response: %s",
+                        shardName, response))), shardManagerActor);
         }
     }
 
index 21af2c49697ed65f58458816b17c467f708e703d..ed384fd2c709407e993a6314b3d37ecc2feee2ec 100644 (file)
@@ -60,12 +60,13 @@ final class ShardManagerGetSnapshotReplyActor extends UntypedAbstractActor {
             params.replyToActor.tell(message, getSelf());
             getSelf().tell(PoisonPill.getInstance(), getSelf());
         } else if (message instanceof ReceiveTimeout) {
-            String msg = String.format(
-                    "Timed out after %s ms while waiting for snapshot replies from %d shard(s). %d shard(s) %s "
-                    + "did not respond.", params.receiveTimeout.toMillis(), params.shardNames.size(),
-                    remainingShardNames.size(), remainingShardNames);
-            LOG.warn("{}: {}", params.id, msg);
-            params.replyToActor.tell(new Failure(new TimeoutException(msg)), getSelf());
+            LOG.warn("{}: Timed out after {} ms while waiting for snapshot replies from {} shard(s). "
+                + "{} shard(s) {} did not respond", params.id, params.receiveTimeout.toMillis(),
+                params.shardNames.size(), remainingShardNames.size(), remainingShardNames);
+            params.replyToActor.tell(new Failure(new TimeoutException(String.format(
+                "Timed out after %s ms while waiting for snapshot replies from %d shard(s). %d shard(s) %s "
+                + "did not respond.", params.receiveTimeout.toMillis(), params.shardNames.size(),
+                remainingShardNames.size(), remainingShardNames))), getSelf());
             getSelf().tell(PoisonPill.getInstance(), getSelf());
         }
     }
index 2a47cae8b312a560ae0fe4ecc81d1216c91165c0..bba4d6140e1fb697769a7d32e3a5ff0ea07ba153 100644 (file)
@@ -75,7 +75,7 @@ public class ActorContext {
     private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER =
                                                               new Mapper<Throwable, Throwable>() {
         @Override
-        public Throwable apply(Throwable failure) {
+        public Throwable apply(final Throwable failure) {
             Throwable actualFailure = failure;
             if (failure instanceof AskTimeoutException) {
                 // A timeout exception most likely means the shard isn't initialized.
@@ -115,15 +115,15 @@ public class ActorContext {
     private final PrimaryShardInfoFutureCache primaryShardInfoCache;
     private final ShardStrategyFactory shardStrategyFactory;
 
-    public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
-            ClusterWrapper clusterWrapper, Configuration configuration) {
+    public ActorContext(final ActorSystem actorSystem, final ActorRef shardManager,
+            final ClusterWrapper clusterWrapper, final Configuration configuration) {
         this(actorSystem, shardManager, clusterWrapper, configuration,
                 DatastoreContext.newBuilder().build(), new PrimaryShardInfoFutureCache());
     }
 
-    public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
-            ClusterWrapper clusterWrapper, Configuration configuration,
-            DatastoreContext datastoreContext, PrimaryShardInfoFutureCache primaryShardInfoCache) {
+    public ActorContext(final ActorSystem actorSystem, final ActorRef shardManager,
+            final ClusterWrapper clusterWrapper, final Configuration configuration,
+            final DatastoreContext datastoreContext, final PrimaryShardInfoFutureCache primaryShardInfoCache) {
         this.actorSystem = actorSystem;
         this.shardManager = shardManager;
         this.clusterWrapper = clusterWrapper;
@@ -171,15 +171,15 @@ public class ActorContext {
         return shardManager;
     }
 
-    public ActorSelection actorSelection(String actorPath) {
+    public ActorSelection actorSelection(final String actorPath) {
         return actorSystem.actorSelection(actorPath);
     }
 
-    public ActorSelection actorSelection(ActorPath actorPath) {
+    public ActorSelection actorSelection(final ActorPath actorPath) {
         return actorSystem.actorSelection(actorPath);
     }
 
-    public void setSchemaContext(SchemaContext schemaContext) {
+    public void setSchemaContext(final SchemaContext schemaContext) {
         this.schemaContext = schemaContext;
 
         if (shardManager != null) {
@@ -187,7 +187,7 @@ public class ActorContext {
         }
     }
 
-    public void setDatastoreContext(DatastoreContextFactory contextFactory) {
+    public void setDatastoreContext(final DatastoreContextFactory contextFactory) {
         this.datastoreContext = contextFactory.getBaseDatastoreContext();
         setCachedProperties();
 
@@ -219,7 +219,7 @@ public class ActorContext {
 
         return future.transform(new Mapper<Object, PrimaryShardInfo>() {
             @Override
-            public PrimaryShardInfo checkedApply(Object response) throws UnknownMessageException {
+            public PrimaryShardInfo checkedApply(final Object response) throws UnknownMessageException {
                 if (response instanceof RemotePrimaryShardFound) {
                     LOG.debug("findPrimaryShardAsync received: {}", response);
                     RemotePrimaryShardFound found = (RemotePrimaryShardFound)response;
@@ -243,8 +243,8 @@ public class ActorContext {
         }, FIND_PRIMARY_FAILURE_TRANSFORMER, getClientDispatcher());
     }
 
-    private PrimaryShardInfo onPrimaryShardFound(String shardName, String primaryActorPath,
-            short primaryVersion, DataTree localShardDataTree) {
+    private PrimaryShardInfo onPrimaryShardFound(final String shardName, final String primaryActorPath,
+            final short primaryVersion, final DataTree localShardDataTree) {
         ActorSelection actorSelection = actorSystem.actorSelection(primaryActorPath);
         PrimaryShardInfo info = localShardDataTree == null ? new PrimaryShardInfo(actorSelection, primaryVersion) :
             new PrimaryShardInfo(actorSelection, primaryVersion, localShardDataTree);
@@ -259,7 +259,7 @@ public class ActorContext {
      * @return a reference to a local shard actor which represents the shard
      *         specified by the shardName
      */
-    public Optional<ActorRef> findLocalShard(String shardName) {
+    public Optional<ActorRef> findLocalShard(final String shardName) {
         Object result = executeOperation(shardManager, new FindLocalShard(shardName, false));
 
         if (result instanceof LocalShardFound) {
@@ -283,7 +283,7 @@ public class ActorContext {
 
         return future.map(new Mapper<Object, ActorRef>() {
             @Override
-            public ActorRef checkedApply(Object response) throws Throwable {
+            public ActorRef checkedApply(final Object response) throws Throwable {
                 if (response instanceof LocalShardFound) {
                     LocalShardFound found = (LocalShardFound)response;
                     LOG.debug("Local shard found {}", found.getPath());
@@ -309,7 +309,7 @@ public class ActorContext {
      * @return The response of the operation
      */
     @SuppressWarnings("checkstyle:IllegalCatch")
-    public Object executeOperation(ActorRef actor, Object message) {
+    public Object executeOperation(final ActorRef actor, final Object message) {
         Future<Object> future = executeOperationAsync(actor, message, operationTimeout);
 
         try {
@@ -328,7 +328,7 @@ public class ActorContext {
      * @return the response message
      */
     @SuppressWarnings("checkstyle:IllegalCatch")
-    public Object executeOperation(ActorSelection actor, Object message) {
+    public Object executeOperation(final ActorSelection actor, final Object message) {
         Future<Object> future = executeOperationAsync(actor, message);
 
         try {
@@ -339,7 +339,7 @@ public class ActorContext {
         }
     }
 
-    public Future<Object> executeOperationAsync(ActorRef actor, Object message, Timeout timeout) {
+    public Future<Object> executeOperationAsync(final ActorRef actor, final Object message, final Timeout timeout) {
         Preconditions.checkArgument(actor != null, "actor must not be null");
         Preconditions.checkArgument(message != null, "message must not be null");
 
@@ -355,8 +355,8 @@ public class ActorContext {
      * @param timeout the operation timeout
      * @return a Future containing the eventual result
      */
-    public Future<Object> executeOperationAsync(ActorSelection actor, Object message,
-            Timeout timeout) {
+    public Future<Object> executeOperationAsync(final ActorSelection actor, final Object message,
+            final Timeout timeout) {
         Preconditions.checkArgument(actor != null, "actor must not be null");
         Preconditions.checkArgument(message != null, "message must not be null");
 
@@ -372,7 +372,7 @@ public class ActorContext {
      * @param message the message to send
      * @return a Future containing the eventual result
      */
-    public Future<Object> executeOperationAsync(ActorSelection actor, Object message) {
+    public Future<Object> executeOperationAsync(final ActorSelection actor, final Object message) {
         return executeOperationAsync(actor, message, operationTimeout);
     }
 
@@ -383,7 +383,7 @@ public class ActorContext {
      * @param actor the ActorSelection
      * @param message the message to send
      */
-    public void sendOperationAsync(ActorSelection actor, Object message) {
+    public void sendOperationAsync(final ActorSelection actor, final Object message) {
         Preconditions.checkArgument(actor != null, "actor must not be null");
         Preconditions.checkArgument(message != null, "message must not be null");
 
@@ -413,16 +413,16 @@ public class ActorContext {
     /**
      * Send the message to each and every shard.
      */
-    public void broadcast(final Function<Short, Object> messageSupplier, Class<?> messageClass) {
+    public void broadcast(final Function<Short, Object> messageSupplier, final Class<?> messageClass) {
         for (final String shardName : configuration.getAllShardNames()) {
 
             Future<PrimaryShardInfo> primaryFuture = findPrimaryShardAsync(shardName);
             primaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
                 @Override
-                public void onComplete(Throwable failure, PrimaryShardInfo primaryShardInfo) {
+                public void onComplete(final Throwable failure, final PrimaryShardInfo primaryShardInfo) {
                     if (failure != null) {
-                        LOG.warn("broadcast failed to send message {} to shard {}:  {}",
-                            messageClass.getSimpleName(), shardName, failure);
+                        LOG.warn("broadcast failed to send message {} to shard {}", messageClass.getSimpleName(),
+                            shardName, failure);
                     } else {
                         Object message = messageSupplier.apply(primaryShardInfo.getPrimaryShardVersion());
                         primaryShardInfo.getPrimaryShardActor().tell(message, ActorRef.noSender());
@@ -440,7 +440,7 @@ public class ActorContext {
         return operationTimeout;
     }
 
-    public boolean isPathLocal(String path) {
+    public boolean isPathLocal(final String path) {
         if (Strings.isNullOrEmpty(path)) {
             return false;
         }
@@ -474,11 +474,11 @@ public class ActorContext {
      * @param operationName the name of the operation
      * @return the Timer instance
      */
-    public Timer getOperationTimer(String operationName) {
+    public Timer getOperationTimer(final String operationName) {
         return getOperationTimer(datastoreContext.getDataStoreName(), operationName);
     }
 
-    public Timer getOperationTimer(String dataStoreType, String operationName) {
+    public Timer getOperationTimer(final String dataStoreType, final String operationName) {
         final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, dataStoreType,
                 operationName, METRIC_RATE);
         return metricRegistry.timer(rate);
@@ -540,11 +540,11 @@ public class ActorContext {
         return shardStrategyFactory;
     }
 
-    protected Future<Object> doAsk(ActorRef actorRef, Object message, Timeout timeout) {
+    protected Future<Object> doAsk(final ActorRef actorRef, final Object message, final Timeout timeout) {
         return ask(actorRef, message, timeout);
     }
 
-    protected Future<Object> doAsk(ActorSelection actorRef, Object message, Timeout timeout) {
+    protected Future<Object> doAsk(final ActorSelection actorRef, final Object message, final Timeout timeout) {
         return ask(actorRef, message, timeout);
     }
 
index db6192b348e12775f5aae0c42606aa84f7741d14..c170944940234ab5da1e9e02a5a21912a36d471c 100644 (file)
@@ -73,7 +73,7 @@ public class SerializedDOMDataBroker extends AbstractDOMDataBroker {
             commitFuture = executor.submit(new CommitCoordinationTask<>(transaction, cohorts, commitStatsTracker,
                     futureValueSupplier));
         } catch (RejectedExecutionException e) {
-            LOG.error("The commit executor's queue is full - submit task was rejected. \n" + executor, e);
+            LOG.error("The commit executor {} queue is full - submit task was rejected. \n", executor, e);
             commitFuture = Futures.immediateFailedFuture(new TransactionCommitFailedException(
                     "Could not submit the commit task - the commit queue capacity has been exceeded.", e));
         }
index 23b73f2094524075eb64a113ed9d9f4e7063c41a..4d3a66c00e4ba2eb25d68a27520c4ad83338550c 100644 (file)
@@ -90,7 +90,7 @@ final class RpcInvoker extends AbstractUntypedActor {
             public void onFailure(final Throwable failure) {
                 LOG.debug("Failed to execute RPC {}", msg.getRpc(), failure);
                 LOG.error("Failed to execute RPC {} due to {}. More details are available on DEBUG level.",
-                    msg.getRpc(), Throwables.getRootCause(failure));
+                    msg.getRpc(), Throwables.getRootCause(failure).getMessage());
                 sender.tell(new akka.actor.Status.Failure(failure), self);
             }
         }, MoreExecutors.directExecutor());
index a0f52d1b15740367b427f05f4f79477f294acf24..9a84b91300c7681515f8dd85a94475aab1fb1364 100644 (file)
@@ -377,7 +377,7 @@ public abstract class BucketStoreActor<T extends BucketData<T>> extends
             versions.remove(addr);
             final Bucket<T> bucket = remoteBuckets.remove(addr);
             if (bucket != null) {
-                LOG.debug("Source actor dead, removing bucket {} from ", bucket, addr);
+                LOG.debug("Source actor dead, removing bucket {} from {}", bucket, addr);
                 onBucketRemoved(addr, bucket);
             }
         }
index 22c18f085c433ab2eb006e79a60990ea0603306c..87adef3d9cedf30f62ce9de22653ea2a643cde3b 100644 (file)
@@ -5,11 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.remote.rpc.registry.mbeans;
 
 import akka.actor.Address;
 import akka.util.Timeout;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -25,9 +25,9 @@ import org.slf4j.LoggerFactory;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 
-
 public class RemoteRpcRegistryMXBeanImpl extends AbstractMXBean implements RemoteRpcRegistryMXBean {
 
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     protected final Logger log = LoggerFactory.getLogger(getClass());
 
     private static final String LOCAL_CONSTANT = "local";
@@ -39,7 +39,7 @@ public class RemoteRpcRegistryMXBeanImpl extends AbstractMXBean implements Remot
     private final BucketStoreAccess rpcRegistryAccess;
     private final Timeout timeout;
 
-    public RemoteRpcRegistryMXBeanImpl(final BucketStoreAccess rpcRegistryAccess, Timeout timeout) {
+    public RemoteRpcRegistryMXBeanImpl(final BucketStoreAccess rpcRegistryAccess, final Timeout timeout) {
         super("RemoteRpcRegistry", "RemoteRpcBroker", null);
         this.rpcRegistryAccess = rpcRegistryAccess;
         this.timeout = timeout;
index d76d0c7aba423bc4c626d9f8cf794b5052fdf72e..60bfc5644fa1e53d13229c52e83da884067ccec1 100644 (file)
@@ -60,7 +60,7 @@ public class PeopleCarListener implements CarPurchaseListener {
 
             @Override
             public void onFailure(final Throwable ex) {
-                LOG.error(String.format("Failed to add car-person entry: [%s]", carPerson), ex);
+                LOG.error("Failed to add car-person entry: [{}]", carPerson, ex);
             }
         }, MoreExecutors.directExecutor());
     }
index 13349d95e325b3b1908951043d0c1912700af59a..a6f154bfcde7282b7b0bdb6c0d5a7166bdbd3238 100644 (file)
@@ -13,6 +13,7 @@ import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Collection;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
@@ -76,6 +77,7 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
+@SuppressFBWarnings("SLF4J_ILLEGAL_PASSED_CLASS")
 public class CarProvider implements CarService {
     private static final Logger LOG_PURCHASE_CAR = LoggerFactory.getLogger(PurchaseCarProvider.class);
 
index 2cd5d2b7bbdf77ae51b4491f49d30e62d54bb43e..5e6e53143652116bdc86ba624d883eecb4ae2145 100644 (file)
@@ -75,7 +75,7 @@ public class PeopleProvider implements PeopleService, AutoCloseable {
 
             @Override
             public void onFailure(final Throwable ex) {
-                LOG.error(String.format("RPC addPerson : person addition failed [%s]", person), ex);
+                LOG.error("RPC addPerson : person addition failed [{}]", person, ex);
                 futureResult.set(RpcResultBuilder.<AddPersonOutput>failed()
                         .withError(RpcError.ErrorType.APPLICATION, ex.getMessage()).build());
             }
index 3e644393f163975fb13ab220da40b2e80ab00fda..3fdc446542a2ab811e25c823d35ba8a814a0d103 100644 (file)
@@ -47,7 +47,7 @@ public class IdIntsListener implements ClusteredDOMDataTreeChangeListener {
 
         changes.forEach(change -> {
             if (change.getRootNode().getDataAfter().isPresent()) {
-                LOG.trace("Received change, data before: {}, data after: ",
+                LOG.trace("Received change, data before: {}, data after: {}",
                         change.getRootNode().getDataBefore().isPresent()
                                 ? change.getRootNode().getDataBefore().get() : "",
                         change.getRootNode().getDataAfter().get());
index c1c43f767601b07b7c42cea3cc09027807abc154..b9b9b3d678901190e45d36b5d26221d99165fa05 100644 (file)
@@ -120,13 +120,12 @@ public class KitchenServiceImpl extends AbstractMXBean
             if (result.isSuccessful()) {
                 LOG.info("makeBreakfast succeeded");
             } else {
-                LOG.warn("makeBreakfast failed: " + result.getErrors());
+                LOG.warn("makeBreakfast failed: {}", result.getErrors());
             }
 
             return result.isSuccessful();
-
         } catch (InterruptedException | ExecutionException e) {
-            LOG.warn("An error occurred while maing breakfast: " + e);
+            LOG.warn("An error occurred while maing breakfast", e);
         }
 
         return Boolean.FALSE;
@@ -146,7 +145,7 @@ public class KitchenServiceImpl extends AbstractMXBean
      */
     @Override
     public void onToasterRestocked(final ToasterRestocked notification) {
-        LOG.info("ToasterRestocked notification - amountOfBread: " + notification.getAmountOfBread());
+        LOG.info("ToasterRestocked notification - amountOfBread: {}", notification.getAmountOfBread());
         toasterOutOfBread = false;
     }
 }
index 0fcd474d9c0e631185a37e28836f40b8e66dd822..fc1e380a4c16bf324ee85e611dc510d602d89073 100644 (file)
@@ -215,7 +215,7 @@ public class OpendaylightToaster extends AbstractMXBean
      */
     @Override
     public ListenableFuture<RpcResult<MakeToastOutput>> makeToast(final MakeToastInput input) {
-        LOG.info("makeToast: " + input);
+        LOG.info("makeToast: {}", input);
 
         final SettableFuture<RpcResult<MakeToastOutput>> futureResult = SettableFuture.create();
 
@@ -322,7 +322,7 @@ public class OpendaylightToaster extends AbstractMXBean
      */
     @Override
     public ListenableFuture<RpcResult<RestockToasterOutput>> restockToaster(final RestockToasterInput input) {
-        LOG.info("restockToaster: " + input);
+        LOG.info("restockToaster: {}", input);
 
         amountOfBreadInStock.set(input.getAmountOfBreadToStock());