Bug 2669: Use slf4j Logger instead of akka LoggingAdapter 10/14810/6
authortpantelis <tpanteli@brocade.com>
Sun, 1 Feb 2015 21:52:59 +0000 (16:52 -0500)
committertpantelis <tpanteli@brocade.com>
Mon, 9 Feb 2015 02:27:33 +0000 (21:27 -0500)
Change-Id: I8ca8d4979448cb158f6fb2dd89124a3b4e6e39e8
Signed-off-by: tpantelis <tpanteli@brocade.com>
23 files changed:
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ClientActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContext.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractLeader.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTracker.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActorContext.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTrackerTest.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardManager.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TerminationMonitor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/compat/BackwardsCompatibleThreePhaseCommitCohort.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/TerminationMonitor.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistry.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStore.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Gossiper.java

index 8022e721579e0d66c8fff33d2955f6197f6607fb..fe25c75ae243268ae9a6602354f8f81af0804a7f 100644 (file)
@@ -11,14 +11,13 @@ package org.opendaylight.controller.cluster.example;
 import akka.actor.ActorRef;
 import akka.actor.Props;
 import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
 import org.opendaylight.controller.cluster.example.messages.KeyValue;
 import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ClientActor extends UntypedActor {
-    protected final LoggingAdapter LOG =
-        Logging.getLogger(getContext().system(), this);
+    protected final Logger LOG = LoggerFactory.getLogger(getClass());
 
     private final ActorRef target;
 
index 9aff86ba2b09572a44200ac2095331d03c687a66..c5ae4c41b2f4822a04ca9da300171ae2e618d52e 100644 (file)
@@ -125,7 +125,7 @@ public class ExampleActor extends RaftActor {
         try {
             bs = fromObject(state);
         } catch (Exception e) {
-            LOG.error(e, "Exception in creating snapshot");
+            LOG.error("Exception in creating snapshot", e);
         }
         getSelf().tell(new CaptureSnapshotReply(bs.toByteArray()), null);
     }
@@ -135,7 +135,7 @@ public class ExampleActor extends RaftActor {
         try {
             state.putAll((HashMap) toObject(snapshot));
         } catch (Exception e) {
-           LOG.error(e, "Exception in applying snapshot");
+           LOG.error("Exception in applying snapshot", e);
         }
         if(LOG.isDebugEnabled()) {
             LOG.debug("Snapshot applied to state : {}", ((HashMap) state).size());
index 766b80e73dd12c890df3ed493e397a7cd144aab4..3dc6ae469a932474effca186c956a7a6f4ec8631 100644 (file)
@@ -10,8 +10,6 @@ package org.opendaylight.controller.cluster.raft;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
 import akka.japi.Procedure;
 import akka.persistence.RecoveryCompleted;
 import akka.persistence.SaveSnapshotFailure;
@@ -43,6 +41,8 @@ import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * RaftActor encapsulates a state machine that needs to be kept synchronized
@@ -85,8 +85,7 @@ import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntries
  * </ul>
  */
 public abstract class RaftActor extends AbstractUntypedPersistentActor {
-    protected final LoggingAdapter LOG =
-        Logging.getLogger(getContext().system(), this);
+    protected final Logger LOG = LoggerFactory.getLogger(getClass());
 
     /**
      * The current state determines the current behavior of a RaftActor
@@ -338,8 +337,8 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         } else if (message instanceof SaveSnapshotFailure) {
             SaveSnapshotFailure saveSnapshotFailure = (SaveSnapshotFailure) message;
 
-            LOG.error(saveSnapshotFailure.cause(), "{}: SaveSnapshotFailure received for snapshot Cause:",
-                    persistenceId());
+            LOG.error("{}: SaveSnapshotFailure received for snapshot Cause:",
+                    persistenceId(), saveSnapshotFailure.cause());
 
             context.getReplicatedLog().snapshotRollback();
 
index 0e1f20b24681ed6a0cd0644b513251114f225745..9d391a1588ba31c575125572db2ef642e3e14e58 100644 (file)
@@ -12,9 +12,8 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
 import akka.actor.Props;
-import akka.event.LoggingAdapter;
-
 import java.util.Map;
+import org.slf4j.Logger;
 
 /**
  * The RaftActorContext contains that portion of the RaftActors state that
@@ -106,7 +105,7 @@ public interface RaftActorContext {
      *
      * @return
      */
-    LoggingAdapter getLogger();
+    Logger getLogger();
 
     /**
      * Get a mapping of peerId's to their addresses
index 5438fe7c4840ed4c91f17c98ebc1022ace720d7c..b71b3be3522e1082e46f56f681a0ee49440ab79d 100644 (file)
@@ -8,15 +8,14 @@
 
 package org.opendaylight.controller.cluster.raft;
 
+import static com.google.common.base.Preconditions.checkState;
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
 import akka.actor.Props;
 import akka.actor.UntypedActorContext;
-import akka.event.LoggingAdapter;
 import java.util.Map;
-
-import static com.google.common.base.Preconditions.checkState;
+import org.slf4j.Logger;
 
 public class RaftActorContextImpl implements RaftActorContext {
 
@@ -36,7 +35,7 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     private final Map<String, String> peerAddresses;
 
-    private final LoggingAdapter LOG;
+    private final Logger LOG;
 
     private final ConfigParams configParams;
 
@@ -47,7 +46,7 @@ public class RaftActorContextImpl implements RaftActorContext {
         ElectionTerm termInformation, long commitIndex,
         long lastApplied, ReplicatedLog replicatedLog,
         Map<String, String> peerAddresses, ConfigParams configParams,
-        LoggingAdapter logger) {
+        Logger logger) {
         this.actor = actor;
         this.context = context;
         this.id = id;
@@ -115,7 +114,7 @@ public class RaftActorContextImpl implements RaftActorContext {
         return context.system();
     }
 
-    @Override public LoggingAdapter getLogger() {
+    @Override public Logger getLogger() {
         return this.LOG;
     }
 
index afd68847ed55be67238bae830db9983aac53b78c..0927b0a5313dd4fc32393cd1b51f44c5007dcb7b 100644 (file)
@@ -637,7 +637,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
                         followerToSnapshot.getTotalChunks());
             }
         } catch (IOException e) {
-            LOG.error(e, "{}: InstallSnapshot failed for Leader.", context.getId());
+            LOG.error("{}: InstallSnapshot failed for Leader.", context.getId(), e);
         }
     }
 
index 99824b0bb4e6235a56d12cb719384fee85fd309a..075b2873e45332364c09aee83c49e6b23e40780c 100644 (file)
@@ -10,7 +10,6 @@ package org.opendaylight.controller.cluster.raft.behaviors;
 
 import akka.actor.ActorRef;
 import akka.actor.Cancellable;
-import akka.event.LoggingAdapter;
 import java.util.Random;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
@@ -24,6 +23,7 @@ import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import org.slf4j.Logger;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
@@ -46,7 +46,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
     /**
      *
      */
-    protected final LoggingAdapter LOG;
+    protected final Logger LOG;
 
     /**
      *
@@ -349,7 +349,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
             } else {
                 //if one index is not present in the log, no point in looping
                 // around as the rest wont be present either
-                LOG.warning(
+                LOG.warn(
                         "{}: Missing index {} from log. Cannot apply state. Ignoring {} to {}",
                         context.getId(), i, i, index);
                 break;
@@ -394,7 +394,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
         try {
             close();
         } catch (Exception e) {
-            LOG.error(e, "{}: Failed to close behavior : {}", context.getId(), this.state());
+            LOG.error("{}: Failed to close behavior : {}", context.getId(), this.state(), e);
         }
 
         return behavior;
index 410b3c266c87066cef3828732f62ca154a580df3..8a0788702d81f3dfe04d713bcc99e527273c82d2 100644 (file)
@@ -342,7 +342,7 @@ public class Follower extends AbstractRaftActorBehavior {
             snapshotTracker = null;
 
         } catch (Exception e){
-            LOG.error(e, "{}: Exception in InstallSnapshot of follower", context.getId());
+            LOG.error("{}: Exception in InstallSnapshot of follower", context.getId(), e);
             //send reply with success as false. The chunk will be sent again on failure
             sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
                     installSnapshot.getChunkIndex(), false), actor());
index 26fbde07117e38e8b06096108b471401a503f426..d26837f1808306edbabb159ca7f0c13084836f2d 100644 (file)
@@ -8,22 +8,22 @@
 
 package org.opendaylight.controller.cluster.raft.behaviors;
 
-import akka.event.LoggingAdapter;
 import com.google.common.base.Optional;
 import com.google.protobuf.ByteString;
+import org.slf4j.Logger;
 
 /**
  * SnapshotTracker does house keeping for a snapshot that is being installed in chunks on the Follower
  */
 public class SnapshotTracker {
-    private final LoggingAdapter LOG;
+    private final Logger LOG;
     private final int totalChunks;
     private ByteString collectedChunks = ByteString.EMPTY;
     private int lastChunkIndex = AbstractLeader.FIRST_CHUNK_INDEX - 1;
     private boolean sealed = false;
     private int lastChunkHashCode = AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE;
 
-    SnapshotTracker(LoggingAdapter LOG, int totalChunks){
+    SnapshotTracker(Logger LOG, int totalChunks){
         this.LOG = LOG;
         this.totalChunks = totalChunks;
     }
@@ -77,6 +77,8 @@ public class SnapshotTracker {
     }
 
     public static class InvalidChunkException extends Exception {
+        private static final long serialVersionUID = 1L;
+
         InvalidChunkException(String message){
             super(message);
         }
index 9d3e5dcb12da55ee474f27e055487ff690321def..c490cb21e48a4885458e44d5e0ee82653b6badf9 100644 (file)
@@ -12,8 +12,6 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
 import akka.actor.Props;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.GeneratedMessage;
 import java.io.Serializable;
@@ -22,6 +20,8 @@ import java.util.Map;
 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
 import org.opendaylight.controller.protobuff.messages.cluster.raft.test.MockPayloadMessages;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class MockRaftActorContext implements RaftActorContext {
 
@@ -144,8 +144,8 @@ public class MockRaftActorContext implements RaftActorContext {
         return this.system;
     }
 
-    @Override public LoggingAdapter getLogger() {
-        return Logging.getLogger(system, this);
+    @Override public Logger getLogger() {
+        return LoggerFactory.getLogger(getClass());
     }
 
     @Override public Map<String, String> getPeerAddresses() {
index 1b3a8f5fb5077a57f9071f8bf55bbbdfda3a2db0..f103abcf8409d7d69abd51baee8bbb29198e943c 100644 (file)
@@ -1,8 +1,6 @@
 package org.opendaylight.controller.cluster.raft.behaviors;
 
 import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.mock;
-import akka.event.LoggingAdapter;
 import com.google.common.base.Optional;
 import com.google.protobuf.ByteString;
 import java.io.ByteArrayOutputStream;
@@ -13,9 +11,13 @@ import java.util.Map;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class SnapshotTrackerTest {
 
+    Logger logger = LoggerFactory.getLogger(getClass());
+
     Map<String, String> data;
     ByteString byteString;
     ByteString chunk1;
@@ -37,14 +39,14 @@ public class SnapshotTrackerTest {
 
     @Test
     public void testAddChunk() throws SnapshotTracker.InvalidChunkException {
-        SnapshotTracker tracker1 = new SnapshotTracker(mock(LoggingAdapter.class), 5);
+        SnapshotTracker tracker1 = new SnapshotTracker(logger, 5);
 
         tracker1.addChunk(1, chunk1, Optional.<Integer>absent());
         tracker1.addChunk(2, chunk2, Optional.<Integer>absent());
         tracker1.addChunk(3, chunk3, Optional.<Integer>absent());
 
         // Verify that an InvalidChunkException is thrown when we try to add a chunk to a sealed tracker
-        SnapshotTracker tracker2 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+        SnapshotTracker tracker2 = new SnapshotTracker(logger, 2);
 
         tracker2.addChunk(1, chunk1, Optional.<Integer>absent());
         tracker2.addChunk(2, chunk2, Optional.<Integer>absent());
@@ -57,7 +59,7 @@ public class SnapshotTrackerTest {
         }
 
         // The first chunk's index must at least be FIRST_CHUNK_INDEX
-        SnapshotTracker tracker3 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+        SnapshotTracker tracker3 = new SnapshotTracker(logger, 2);
 
         try {
             tracker3.addChunk(AbstractLeader.FIRST_CHUNK_INDEX - 1, chunk1, Optional.<Integer>absent());
@@ -67,7 +69,7 @@ public class SnapshotTrackerTest {
         }
 
         // Out of sequence chunk indexes won't work
-        SnapshotTracker tracker4 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+        SnapshotTracker tracker4 = new SnapshotTracker(logger, 2);
 
         tracker4.addChunk(AbstractLeader.FIRST_CHUNK_INDEX, chunk1, Optional.<Integer>absent());
 
@@ -80,7 +82,7 @@ public class SnapshotTrackerTest {
 
         // No exceptions will be thrown when invalid chunk is added with the right sequence
         // If the lastChunkHashCode is missing
-        SnapshotTracker tracker5 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+        SnapshotTracker tracker5 = new SnapshotTracker(logger, 2);
 
         tracker5.addChunk(AbstractLeader.FIRST_CHUNK_INDEX, chunk1, Optional.<Integer>absent());
         // Look I can add the same chunk again
@@ -88,7 +90,7 @@ public class SnapshotTrackerTest {
 
         // An exception will be thrown when an invalid chunk is addedd with the right sequence
         // when the lastChunkHashCode is present
-        SnapshotTracker tracker6 = new SnapshotTracker(mock(LoggingAdapter.class), 2);
+        SnapshotTracker tracker6 = new SnapshotTracker(logger, 2);
 
         tracker6.addChunk(AbstractLeader.FIRST_CHUNK_INDEX, chunk1, Optional.of(-1));
 
@@ -106,7 +108,7 @@ public class SnapshotTrackerTest {
     public void testGetSnapShot() throws SnapshotTracker.InvalidChunkException {
 
         // Trying to get a snapshot before all chunks have been received will throw an exception
-        SnapshotTracker tracker1 = new SnapshotTracker(mock(LoggingAdapter.class), 5);
+        SnapshotTracker tracker1 = new SnapshotTracker(logger, 5);
 
         tracker1.addChunk(1, chunk1, Optional.<Integer>absent());
         try {
@@ -116,7 +118,7 @@ public class SnapshotTrackerTest {
 
         }
 
-        SnapshotTracker tracker2 = new SnapshotTracker(mock(LoggingAdapter.class), 3);
+        SnapshotTracker tracker2 = new SnapshotTracker(logger, 3);
 
         tracker2.addChunk(1, chunk1, Optional.<Integer>absent());
         tracker2.addChunk(2, chunk2, Optional.<Integer>absent());
@@ -129,7 +131,7 @@ public class SnapshotTrackerTest {
 
     @Test
     public void testGetCollectedChunks() throws SnapshotTracker.InvalidChunkException {
-        SnapshotTracker tracker1 = new SnapshotTracker(mock(LoggingAdapter.class), 5);
+        SnapshotTracker tracker1 = new SnapshotTracker(logger, 5);
 
         ByteString chunks = chunk1.concat(chunk2);
 
index 21a0cb6a889a78cf31910198eadc79f6c53f10e3..a604b05c013ebada7fcc2704fc444bf486ffa1e4 100644 (file)
@@ -9,12 +9,11 @@
 package org.opendaylight.controller.cluster.common.actor;
 
 import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public abstract class AbstractUntypedActor extends UntypedActor {
-    protected final LoggingAdapter LOG =
-        Logging.getLogger(getContext().system(), this);
+    protected final Logger LOG = LoggerFactory.getLogger(getClass());
 
     public AbstractUntypedActor() {
         if(LOG.isDebugEnabled()) {
index 8a6217deab24029bf1f5fbe2f8e7d2b4154bdcff..95ee21674a579ef262231eef71158a6371c4fc14 100644 (file)
@@ -8,17 +8,16 @@
 
 package org.opendaylight.controller.cluster.common.actor;
 
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
 import akka.japi.Procedure;
 import akka.persistence.SnapshotSelectionCriteria;
 import akka.persistence.UntypedPersistentActor;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public abstract class AbstractUntypedPersistentActor extends UntypedPersistentActor {
 
-    protected final LoggingAdapter LOG =
-        Logging.getLogger(getContext().system(), this);
+    protected final Logger LOG = LoggerFactory.getLogger(getClass());
 
     public AbstractUntypedPersistentActor() {
         if(LOG.isDebugEnabled()) {
@@ -119,7 +118,7 @@ public abstract class AbstractUntypedPersistentActor extends UntypedPersistentAc
             try {
                 procedure.apply(o);
             } catch (Exception e) {
-                LOG.error(e, "An unexpected error occurred");
+                LOG.error("An unexpected error occurred", e);
             }
         }
 
index 744e2c22c69a9de8f16ff048648fbd087066eb41..87a0fb931edb171ca8e2bb7d4563ff1172de4912 100644 (file)
@@ -12,8 +12,6 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.Cancellable;
 import akka.actor.Props;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
 import akka.japi.Creator;
 import akka.persistence.RecoveryFailure;
 import akka.serialization.Serialization;
@@ -101,8 +99,6 @@ public class Shard extends RaftActor {
     // The state of this Shard
     private final InMemoryDOMDataStore store;
 
-    private final LoggingAdapter LOG = Logging.getLogger(getContext().system(), this);
-
     /// The name of this shard
     private final ShardIdentifier name;
 
@@ -220,8 +216,8 @@ public class Shard extends RaftActor {
         }
 
         if (message instanceof RecoveryFailure){
-            LOG.error(((RecoveryFailure) message).cause(), "{}: Recovery failed because of this cause",
-                    persistenceId());
+            LOG.error("{}: Recovery failed because of this cause",
+                    persistenceId(), ((RecoveryFailure) message).cause());
 
             // Even though recovery failed, we still need to finish our recovery, eg send the
             // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
@@ -274,7 +270,7 @@ public class Shard extends RaftActor {
         if(cohortEntry != null) {
             long elapsed = System.currentTimeMillis() - cohortEntry.getLastAccessTime();
             if(elapsed > transactionCommitTimeout) {
-                LOG.warning("{}: Current transaction {} has timed out after {} ms - aborting",
+                LOG.warn("{}: Current transaction {} has timed out after {} ms - aborting",
                         persistenceId(), cohortEntry.getTransactionID(), transactionCommitTimeout);
 
                 doAbortTransaction(cohortEntry.getTransactionID(), null);
@@ -322,8 +318,8 @@ public class Shard extends RaftActor {
                         new ModificationPayload(cohortEntry.getModification()));
             }
         } catch (Exception e) {
-            LOG.error(e, "{} An exception occurred while preCommitting transaction {}",
-                    persistenceId(), cohortEntry.getTransactionID());
+            LOG.error("{} An exception occurred while preCommitting transaction {}",
+                    persistenceId(), cohortEntry.getTransactionID(), e);
             shardMBean.incrementFailedTransactionsCount();
             getSender().tell(new akka.actor.Status.Failure(e), getSelf());
         }
@@ -376,7 +372,8 @@ public class Shard extends RaftActor {
         } catch (Exception e) {
             sender.tell(new akka.actor.Status.Failure(e), getSelf());
 
-            LOG.error(e, "{}, An exception occurred while committing transaction {}", persistenceId(), transactionID);
+            LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
+                    transactionID, e);
             shardMBean.incrementFailedTransactionsCount();
         } finally {
             commitCoordinator.currentTransactionComplete(transactionID, true);
@@ -445,7 +442,7 @@ public class Shard extends RaftActor {
 
                 @Override
                 public void onFailure(final Throwable t) {
-                    LOG.error(t, "{}: An exception happened during abort", persistenceId());
+                    LOG.error("{}: An exception happened during abort", persistenceId(), t);
 
                     if(sender != null) {
                         sender.tell(new akka.actor.Status.Failure(t), self);
@@ -580,7 +577,7 @@ public class Shard extends RaftActor {
             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
         } catch (InterruptedException | ExecutionException e) {
             shardMBean.incrementFailedTransactionsCount();
-            LOG.error(e, "{}: Failed to commit", persistenceId());
+            LOG.error("{}: Failed to commit", persistenceId(), e);
         }
     }
 
@@ -667,7 +664,7 @@ public class Shard extends RaftActor {
             try {
                 currentLogRecoveryBatch.add(((ModificationPayload) data).getModification());
             } catch (ClassNotFoundException | IOException e) {
-                LOG.error(e, "{}: Error extracting ModificationPayload", persistenceId());
+                LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
             }
         } else if (data instanceof CompositeModificationPayload) {
             currentLogRecoveryBatch.add(((CompositeModificationPayload) data).getModification());
@@ -722,7 +719,7 @@ public class Shard extends RaftActor {
                     shardMBean.incrementCommittedTransactionCount();
                 } catch (InterruptedException | ExecutionException e) {
                     shardMBean.incrementFailedTransactionsCount();
-                    LOG.error(e, "{}: Failed to commit", persistenceId());
+                    LOG.error("{}: Failed to commit", persistenceId(), e);
                 }
             }
         }
@@ -752,7 +749,7 @@ public class Shard extends RaftActor {
             try {
                 applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
             } catch (ClassNotFoundException | IOException e) {
-                LOG.error(e, "{}: Error extracting ModificationPayload", persistenceId());
+                LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
             }
         }
         else if (data instanceof CompositeModificationPayload) {
@@ -835,7 +832,7 @@ public class Shard extends RaftActor {
             transaction.write(DATASTORE_ROOT, node);
             syncCommitTransaction(transaction);
         } catch (InterruptedException | ExecutionException e) {
-            LOG.error(e, "{}: An exception occurred when applying snapshot", persistenceId());
+            LOG.error("{}: An exception occurred when applying snapshot", persistenceId(), e);
         } finally {
             LOG.info("{}: Done applying snapshot", persistenceId());
         }
index 165e272d8b09ca1631a94e7a1e7d14a4370bb595..8b95404c4e6bdd06e2f4bb9d234eeab4465dd81f 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
 import akka.actor.Status;
-import akka.event.LoggingAdapter;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
 import java.util.LinkedList;
@@ -20,6 +19,7 @@ import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransacti
 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.modification.Modification;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.slf4j.Logger;
 
 /**
  * Coordinates commits for a shard ensuring only one concurrent 3-phase commit.
@@ -36,11 +36,11 @@ public class ShardCommitCoordinator {
 
     private final int queueCapacity;
 
-    private final LoggingAdapter log;
+    private final Logger log;
 
     private final String name;
 
-    public ShardCommitCoordinator(long cacheExpiryTimeoutInSec, int queueCapacity, LoggingAdapter log,
+    public ShardCommitCoordinator(long cacheExpiryTimeoutInSec, int queueCapacity, Logger log,
             String name) {
         cohortCache = CacheBuilder.newBuilder().expireAfterAccess(
                 cacheExpiryTimeoutInSec, TimeUnit.SECONDS).build();
index 22e2dbd47d4d7148c8e41cfe05ce8532df527466..9c8f0b24440775a68f6047e3be19210583e66dbb 100644 (file)
@@ -15,8 +15,6 @@ import akka.actor.OneForOneStrategy;
 import akka.actor.Props;
 import akka.actor.SupervisorStrategy;
 import akka.cluster.ClusterEvent;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
 import akka.japi.Creator;
 import akka.japi.Function;
 import akka.japi.Procedure;
@@ -54,6 +52,8 @@ import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
 import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import scala.concurrent.duration.Duration;
 
 /**
@@ -67,8 +67,7 @@ import scala.concurrent.duration.Duration;
  */
 public class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
-    protected final LoggingAdapter LOG =
-        Logging.getLogger(getContext().system(), this);
+    private final Logger LOG = LoggerFactory.getLogger(getClass());
 
     // Stores a mapping between a member name and the address of the member
     // Member names look like "member-1", "member-2" etc and are as specified
@@ -186,7 +185,7 @@ public class ShardManager extends AbstractUntypedPersistentActorWithMetering {
                 knownModules = ImmutableSet.copyOf(msg.getModules());
             } else if (message instanceof RecoveryFailure) {
                 RecoveryFailure failure = (RecoveryFailure) message;
-                LOG.error(failure.cause(), "Recovery failed");
+                LOG.error("Recovery failed", failure.cause());
             } else if (message instanceof RecoveryCompleted) {
                 LOG.info("Recovery complete : {}", persistenceId());
 
@@ -424,12 +423,7 @@ public class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             new Function<Throwable, SupervisorStrategy.Directive>() {
                 @Override
                 public SupervisorStrategy.Directive apply(Throwable t) {
-                    StringBuilder sb = new StringBuilder();
-                    for(StackTraceElement element : t.getStackTrace()) {
-                       sb.append("\n\tat ")
-                         .append(element.toString());
-                    }
-                    LOG.warning("Supervisor Strategy of resume applied {}",sb.toString());
+                    LOG.warn("Supervisor Strategy caught unexpected exception - resuming", t);
                     return SupervisorStrategy.resume();
                 }
             }
index 2a97036883b272d3f6757d9657417c0891567f7f..50528575e77123ce433bf0b4e6ab73f0077c39c5 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import akka.event.LoggingAdapter;
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import java.util.Collection;
@@ -22,6 +21,7 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
 
 /**
  * Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
@@ -40,10 +40,10 @@ class ShardRecoveryCoordinator {
     private final SchemaContext schemaContext;
     private final String shardName;
     private final ExecutorService executor;
-    private final LoggingAdapter log;
+    private final Logger log;
     private final String name;
 
-    ShardRecoveryCoordinator(String shardName, SchemaContext schemaContext, LoggingAdapter log,
+    ShardRecoveryCoordinator(String shardName, SchemaContext schemaContext, Logger log,
             String name) {
         this.schemaContext = schemaContext;
         this.shardName = shardName;
index 0c3d33a78c4801002571e2b5dc0ab02f8ddae971..6dd0ab123006a7180722ed456e3202319cf19a15 100644 (file)
@@ -10,16 +10,15 @@ package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.Terminated;
 import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
 import org.opendaylight.controller.cluster.datastore.messages.Monitor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TerminationMonitor extends UntypedActor{
-    protected final LoggingAdapter LOG =
-        Logging.getLogger(getContext().system(), this);
+    private static final Logger LOG = LoggerFactory.getLogger(TerminationMonitor.class);
 
     public TerminationMonitor(){
-        LOG.info("Created TerminationMonitor");
+        LOG.debug("Created TerminationMonitor");
     }
 
     @Override public void onReceive(Object message) throws Exception {
index 30ab97ceb16c76b22286cfb66340b3ba600528d0..f05ef91fc503c8cb912a8b3a2e4b7c5c212667c4 100644 (file)
@@ -7,17 +7,17 @@
  */
 package org.opendaylight.controller.cluster.datastore.compat;
 
+import akka.actor.PoisonPill;
+import akka.actor.Props;
+import akka.japi.Creator;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransactionReply;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
-import akka.japi.Creator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An actor to maintain backwards compatibility for the base Helium version where the 3-phase commit
@@ -28,7 +28,7 @@ import akka.japi.Creator;
  */
 public class BackwardsCompatibleThreePhaseCommitCohort extends AbstractUntypedActor {
 
-    private final LoggingAdapter LOG = Logging.getLogger(getContext().system(), this);
+    private static final Logger LOG = LoggerFactory.getLogger(BackwardsCompatibleThreePhaseCommitCohort.class);
 
     private final String transactionId;
 
index 48ccd824d41cf6b1456007012d6801d028443fce..13399f6f9deddcc21a2607a111334c6a679f39cb 100644 (file)
@@ -10,16 +10,15 @@ package org.opendaylight.controller.remote.rpc;
 
 import akka.actor.Terminated;
 import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
 import org.opendaylight.controller.cluster.common.actor.Monitor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TerminationMonitor extends UntypedActor{
-    protected final LoggingAdapter LOG =
-        Logging.getLogger(getContext().system(), this);
+    private static final Logger LOG = LoggerFactory.getLogger(TerminationMonitor.class);
 
     public TerminationMonitor(){
-        LOG.info("Created TerminationMonitor");
+        LOG.debug("Created TerminationMonitor");
     }
 
     @Override public void onReceive(Object message) throws Exception {
index 845c1c819a70ca6bac0fb1a717b31f7861b6a6b6..219646d8478ade824d22589842c4d4ddf1edccaa 100644 (file)
@@ -8,8 +8,6 @@
 package org.opendaylight.controller.remote.rpc.registry;
 
 import akka.actor.ActorRef;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
 import akka.japi.Option;
 import akka.japi.Pair;
 import com.google.common.base.Preconditions;
@@ -32,8 +30,6 @@ import org.opendaylight.controller.sal.connector.api.RpcRouter.RouteIdentifier;
  */
 public class RpcRegistry extends BucketStore<RoutingTable> {
 
-    final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
-
     public RpcRegistry() {
         getLocalBucket().setData(new RoutingTable());
     }
index 934609b7cfcfeb8ea14a8c1ff803c53ba938b1aa..628deb4311cebe1da5ff1b44deb715d933b8b8b2 100644 (file)
@@ -13,8 +13,6 @@ import akka.actor.ActorRefProvider;
 import akka.actor.Address;
 import akka.actor.Props;
 import akka.cluster.ClusterActorRefProvider;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
 import com.google.common.annotations.VisibleForTesting;
 import java.util.HashMap;
 import java.util.Map;
@@ -29,6 +27,8 @@ import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketSto
 import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
 import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
 import org.opendaylight.controller.utils.ConditionalProbe;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A store that syncs its data across nodes in the cluster.
@@ -43,7 +43,7 @@ public class BucketStore<T extends Copier<T>> extends AbstractUntypedActorWithMe
 
     private static final Long NO_VERSION = -1L;
 
-    final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
+    protected final Logger log = LoggerFactory.getLogger(getClass());
 
     /**
      * Bucket owned by the node
index 1bbcc69f5ed4d5fa6d7d8ea773823c97c9bb6e05..8af1c83c558a0a6b4842b9cb7accfa0a6e68a79b 100644 (file)
@@ -17,14 +17,7 @@ import akka.cluster.ClusterActorRefProvider;
 import akka.cluster.ClusterEvent;
 import akka.cluster.Member;
 import akka.dispatch.Mapper;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
 import akka.pattern.Patterns;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
-import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
@@ -32,15 +25,20 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
-
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersions;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersionsReply;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembers;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus;
-import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipTick;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersions;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersionsReply;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembers;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipTick;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
 
 /**
  * Gossiper that syncs bucket store across nodes in the cluster.
@@ -61,7 +59,7 @@ import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.Go
 
 public class Gossiper extends AbstractUntypedActorWithMetering {
 
-    final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
+    private final Logger log = LoggerFactory.getLogger(getClass());
 
     private Cluster cluster;
 
@@ -121,30 +119,29 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
 
     @Override
     public void postStop(){
-        if (cluster != null)
+        if (cluster != null) {
             cluster.unsubscribe(getSelf());
-        if (gossipTask != null)
+        }
+        if (gossipTask != null) {
             gossipTask.cancel();
+        }
     }
 
     @Override
     protected void handleReceive(Object message) throws Exception {
         //Usually sent by self via gossip task defined above. But its not enforced.
         //These ticks can be sent by another actor as well which is esp. useful while testing
-        if (message instanceof GossipTick)
+        if (message instanceof GossipTick) {
             receiveGossipTick();
-
-            //Message from remote gossiper with its bucket versions
-        else if (message instanceof GossipStatus)
+        } else if (message instanceof GossipStatus) {
+            // Message from remote gossiper with its bucket versions
             receiveGossipStatus((GossipStatus) message);
-
-            //Message from remote gossiper with buckets. This is usually in response to GossipStatus message
-            //The contained buckets are newer as determined by the remote gossiper by comparing the GossipStatus
-            //message with its local versions
-        else if (message instanceof GossipEnvelope)
+        } else if (message instanceof GossipEnvelope) {
+            // Message from remote gossiper with buckets. This is usually in response to GossipStatus
+            // message. The contained buckets are newer as determined by the remote gossiper by
+            // comparing the GossipStatus message with its local versions.
             receiveGossip((GossipEnvelope) message);
-
-        else if (message instanceof ClusterEvent.MemberUp) {
+        } else if (message instanceof ClusterEvent.MemberUp) {
             receiveMemberUp(((ClusterEvent.MemberUp) message).member());
 
         } else if (message instanceof ClusterEvent.MemberRemoved) {
@@ -153,8 +150,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
         } else if ( message instanceof ClusterEvent.UnreachableMember){
             receiveMemberRemoveOrUnreachable(((ClusterEvent.UnreachableMember) message).member());
 
-        } else
+        } else {
             unhandled(message);
+        }
     }
 
     /**
@@ -181,11 +179,13 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
      */
     void receiveMemberUp(Member member) {
 
-        if (selfAddress.equals(member.address()))
+        if (selfAddress.equals(member.address())) {
             return; //ignore up notification for self
+        }
 
-        if (!clusterMembers.contains(member.address()))
+        if (!clusterMembers.contains(member.address())) {
             clusterMembers.add(member.address());
+        }
         if(log.isDebugEnabled()) {
             log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
         }
@@ -198,13 +198,15 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
      * 3. If there are more than one member, randomly pick one and send gossip status (bucket versions) to it.
      */
     void receiveGossipTick(){
-        if (clusterMembers.size() == 0) return; //no members to send gossip status to
+        if (clusterMembers.size() == 0) {
+            return; //no members to send gossip status to
+        }
 
         Address remoteMemberToGossipTo;
 
-        if (clusterMembers.size() == 1)
+        if (clusterMembers.size() == 1) {
             remoteMemberToGossipTo = clusterMembers.get(0);
-        else {
+        else {
             Integer randomIndex = ThreadLocalRandom.current().nextInt(0, clusterMembers.size());
             remoteMemberToGossipTo = clusterMembers.get(randomIndex);
         }
@@ -229,8 +231,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
      */
     void receiveGossipStatus(GossipStatus status){
         //Don't accept messages from non-members
-        if (!clusterMembers.contains(status.from()))
+        if (!clusterMembers.contains(status.from())) {
             return;
+        }
 
         final ActorRef sender = getSender();
         Future<Object> futureReply =
@@ -385,19 +388,23 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
 
                     for (Address address : remoteVersions.keySet()){
 
-                        if (localVersions.get(address) == null || remoteVersions.get(address) == null)
+                        if (localVersions.get(address) == null || remoteVersions.get(address) == null) {
                             continue; //this condition is taken care of by above diffs
-                        if (localVersions.get(address) <  remoteVersions.get(address))
+                        }
+                        if (localVersions.get(address) <  remoteVersions.get(address)) {
                             localIsOlder.add(address);
-                        else if (localVersions.get(address) > remoteVersions.get(address))
+                        } else if (localVersions.get(address) > remoteVersions.get(address)) {
                             localIsNewer.add(address);
+                        }
                     }
 
-                    if (!localIsOlder.isEmpty())
+                    if (!localIsOlder.isEmpty()) {
                         sendGossipStatusTo(sender, localVersions );
+                    }
 
-                    if (!localIsNewer.isEmpty())
+                    if (!localIsNewer.isEmpty()) {
                         sendGossipTo(sender, localIsNewer);//send newer buckets to remote
+                    }
 
                 }
                 return null;