2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.remote.rpc.registry.gossip;
11 import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess.Singletons.GET_ALL_BUCKETS;
12 import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess.Singletons.GET_BUCKET_VERSIONS;
14 import akka.actor.ActorRef;
15 import akka.actor.ActorRefProvider;
16 import akka.actor.Address;
17 import akka.actor.PoisonPill;
18 import akka.actor.Terminated;
19 import akka.cluster.ClusterActorRefProvider;
20 import akka.persistence.DeleteSnapshotsFailure;
21 import akka.persistence.DeleteSnapshotsSuccess;
22 import akka.persistence.RecoveryCompleted;
23 import akka.persistence.SaveSnapshotFailure;
24 import akka.persistence.SaveSnapshotSuccess;
25 import akka.persistence.SnapshotOffer;
26 import akka.persistence.SnapshotSelectionCriteria;
27 import com.google.common.annotations.VisibleForTesting;
28 import com.google.common.base.Preconditions;
29 import com.google.common.base.Verify;
30 import com.google.common.collect.HashMultimap;
31 import com.google.common.collect.ImmutableMap;
32 import com.google.common.collect.SetMultimap;
33 import java.util.Collection;
34 import java.util.HashMap;
36 import java.util.Map.Entry;
37 import java.util.Optional;
38 import java.util.function.Consumer;
39 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
40 import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
43 * A store that syncs its data across nodes in the cluster.
44 * It maintains a {@link org.opendaylight.controller.remote.rpc.registry.gossip.Bucket} per node. Buckets are versioned.
45 * A node can write ONLY to its bucket. This way, write conflicts are avoided.
48 * Buckets are sync'ed across nodes using Gossip protocol (http://en.wikipedia.org/wiki/Gossip_protocol).
49 * This store uses a {@link org.opendaylight.controller.remote.rpc.registry.gossip.Gossiper}.
51 public abstract class BucketStoreActor<T extends BucketData<T>> extends
52 AbstractUntypedPersistentActorWithMetering {
53 // Internal marker interface for messages which are just bridges to execute a method
55 private interface ExecuteInActor extends Consumer<BucketStoreActor<?>> {
60 * Buckets owned by other known nodes in the cluster.
62 private final Map<Address, Bucket<T>> remoteBuckets = new HashMap<>();
65 * Bucket version for every known node in the cluster including this node.
67 private final Map<Address, Long> versions = new HashMap<>();
70 * {@link ActorRef}s being watched for liveness due to being referenced in bucket data. Each actor is monitored
71 * once, possibly being tied to multiple addresses (and by extension, buckets).
73 private final SetMultimap<ActorRef, Address> watchedActors = HashMultimap.create(1, 1);
75 private final RemoteRpcProviderConfig config;
76 private final String persistenceId;
79 * Cluster address for this node.
81 private Address selfAddress;
84 * Bucket owned by the node. Initialized during recovery (due to incarnation number).
86 private LocalBucket<T> localBucket;
87 private T initialData;
88 private Integer incarnation;
89 private boolean persisting;
91 protected BucketStoreActor(final RemoteRpcProviderConfig config, final String persistenceId, final T initialData) {
92 this.config = Preconditions.checkNotNull(config);
93 this.initialData = Preconditions.checkNotNull(initialData);
94 this.persistenceId = Preconditions.checkNotNull(persistenceId);
97 static ExecuteInActor getBucketsByMembersMessage(final Collection<Address> members) {
98 return actor -> actor.getBucketsByMembers(members);
101 static ExecuteInActor removeBucketMessage(final Address addr) {
102 return actor -> actor.removeBucket(addr);
105 static ExecuteInActor updateRemoteBucketsMessage(final Map<Address, Bucket<?>> buckets) {
106 return actor -> actor.updateRemoteBuckets(buckets);
109 public final T getLocalData() {
110 return getLocalBucket().getData();
113 public final Map<Address, Bucket<T>> getRemoteBuckets() {
114 return remoteBuckets;
117 public final Map<Address, Long> getVersions() {
122 public final String persistenceId() {
123 return persistenceId;
127 public void preStart() {
128 ActorRefProvider provider = getContext().provider();
129 selfAddress = provider.getDefaultAddress();
131 if (provider instanceof ClusterActorRefProvider) {
132 getContext().actorOf(Gossiper.props(config).withMailbox(config.getMailBoxName()), "gossiper");
137 protected void handleCommand(final Object message) throws Exception {
138 if (GET_ALL_BUCKETS == message) {
139 // GetAllBuckets is used only in testing
140 getSender().tell(getAllBuckets(), self());
145 handleSnapshotMessage(message);
149 if (message instanceof ExecuteInActor) {
150 ((ExecuteInActor) message).accept(this);
151 } else if (GET_BUCKET_VERSIONS == message) {
152 // FIXME: do we need to send ourselves?
153 getSender().tell(ImmutableMap.copyOf(versions), getSelf());
154 } else if (message instanceof Terminated) {
155 actorTerminated((Terminated) message);
156 } else if (message instanceof DeleteSnapshotsSuccess) {
157 LOG.debug("{}: got command: {}", persistenceId(), message);
158 } else if (message instanceof DeleteSnapshotsFailure) {
159 LOG.warn("{}: failed to delete prior snapshots", persistenceId(),
160 ((DeleteSnapshotsFailure) message).cause());
162 LOG.debug("Unhandled message [{}]", message);
167 private void handleSnapshotMessage(final Object message) {
168 if (message instanceof SaveSnapshotFailure) {
169 LOG.error("{}: failed to persist state", persistenceId(), ((SaveSnapshotFailure) message).cause());
171 self().tell(PoisonPill.getInstance(), ActorRef.noSender());
172 } else if (message instanceof SaveSnapshotSuccess) {
173 LOG.debug("{}: got command: {}", persistenceId(), message);
174 SaveSnapshotSuccess saved = (SaveSnapshotSuccess)message;
175 deleteSnapshots(new SnapshotSelectionCriteria(saved.metadata().sequenceNr(),
176 saved.metadata().timestamp() - 1, 0L, 0L));
180 LOG.debug("{}: stashing command {}", persistenceId(), message);
186 protected final void handleRecover(final Object message) throws Exception {
187 if (message instanceof RecoveryCompleted) {
188 if (incarnation != null) {
189 incarnation = incarnation + 1;
194 this.localBucket = new LocalBucket<>(incarnation.intValue(), initialData);
196 LOG.debug("{}: persisting new incarnation {}", persistenceId(), incarnation);
198 saveSnapshot(incarnation);
199 } else if (message instanceof SnapshotOffer) {
200 incarnation = (Integer) ((SnapshotOffer)message).snapshot();
201 LOG.debug("{}: recovered incarnation {}", persistenceId(), incarnation);
203 LOG.warn("{}: ignoring recovery message {}", persistenceId(), message);
207 protected final RemoteRpcProviderConfig getConfig() {
211 protected final void updateLocalBucket(final T data) {
212 final LocalBucket<T> local = getLocalBucket();
213 final boolean bumpIncarnation = local.setData(data);
214 versions.put(selfAddress, local.getVersion());
216 if (bumpIncarnation) {
217 LOG.debug("Version wrapped. incrementing incarnation");
219 Verify.verify(incarnation < Integer.MAX_VALUE, "Ran out of incarnations, cannot continue");
220 incarnation = incarnation + 1;
223 saveSnapshot(incarnation);
228 * Callback to subclasses invoked when a bucket is removed.
230 * @param address Remote address
231 * @param bucket Bucket removed
233 protected abstract void onBucketRemoved(final Address address, final Bucket<T> bucket);
236 * Callback to subclasses invoked when the set of remote buckets is updated.
238 * @param newBuckets Map of address to new bucket. Never null, but can be empty.
240 protected abstract void onBucketsUpdated(final Map<Address, Bucket<T>> newBuckets);
243 * Helper to collect all known buckets.
245 * @return self owned + remote buckets
247 private Map<Address, Bucket<T>> getAllBuckets() {
248 Map<Address, Bucket<T>> all = new HashMap<>(remoteBuckets.size() + 1);
250 //first add the local bucket
251 all.put(selfAddress, getLocalBucket().snapshot());
253 //then get all remote buckets
254 all.putAll(remoteBuckets);
260 * Helper to collect buckets for requested members.
262 * @param members requested members
264 private void getBucketsByMembers(final Collection<Address> members) {
265 Map<Address, Bucket<T>> buckets = new HashMap<>();
267 //first add the local bucket if asked
268 if (members.contains(selfAddress)) {
269 buckets.put(selfAddress, getLocalBucket().snapshot());
272 //then get buckets for requested remote nodes
273 for (Address address : members) {
274 if (remoteBuckets.containsKey(address)) {
275 buckets.put(address, remoteBuckets.get(address));
279 getSender().tell(buckets, getSelf());
282 private void removeBucket(final Address addr) {
283 final Bucket<T> bucket = remoteBuckets.remove(addr);
284 if (bucket != null) {
285 bucket.getWatchActor().ifPresent(ref -> removeWatch(addr, ref));
286 onBucketRemoved(addr, bucket);
291 * Update local copy of remote buckets where local copy's version is older.
293 * @param receivedBuckets buckets sent by remote
294 * {@link org.opendaylight.controller.remote.rpc.registry.gossip.Gossiper}
297 void updateRemoteBuckets(final Map<Address, Bucket<?>> receivedBuckets) {
298 LOG.debug("{}: receiveUpdateRemoteBuckets: {}", selfAddress, receivedBuckets);
299 if (receivedBuckets == null || receivedBuckets.isEmpty()) {
304 final Map<Address, Bucket<T>> newBuckets = new HashMap<>(receivedBuckets.size());
305 for (Entry<Address, Bucket<?>> entry : receivedBuckets.entrySet()) {
306 final Address addr = entry.getKey();
308 if (selfAddress.equals(addr)) {
309 // Remote cannot update our bucket
313 @SuppressWarnings("unchecked")
314 final Bucket<T> receivedBucket = (Bucket<T>) entry.getValue();
315 if (receivedBucket == null) {
316 LOG.debug("Ignoring null bucket from {}", addr);
320 // update only if remote version is newer
321 final long remoteVersion = receivedBucket.getVersion();
322 final Long localVersion = versions.get(addr);
323 if (localVersion != null && remoteVersion <= localVersion.longValue()) {
324 LOG.debug("Ignoring down-versioned bucket from {} ({} local {} remote)", addr, localVersion,
328 newBuckets.put(addr, receivedBucket);
329 versions.put(addr, remoteVersion);
330 final Bucket<T> prevBucket = remoteBuckets.put(addr, receivedBucket);
332 // Deal with DeathWatch subscriptions
333 final Optional<ActorRef> prevRef = prevBucket != null ? prevBucket.getWatchActor() : Optional.empty();
334 final Optional<ActorRef> curRef = receivedBucket.getWatchActor();
335 if (!curRef.equals(prevRef)) {
336 prevRef.ifPresent(ref -> removeWatch(addr, ref));
337 curRef.ifPresent(ref -> addWatch(addr, ref));
340 LOG.debug("Updating bucket from {} to version {}", entry.getKey(), remoteVersion);
343 LOG.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets);
345 onBucketsUpdated(newBuckets);
348 private void addWatch(final Address addr, final ActorRef ref) {
349 if (!watchedActors.containsKey(ref)) {
350 getContext().watch(ref);
351 LOG.debug("Watching {}", ref);
353 watchedActors.put(ref, addr);
356 private void removeWatch(final Address addr, final ActorRef ref) {
357 watchedActors.remove(ref, addr);
358 if (!watchedActors.containsKey(ref)) {
359 getContext().unwatch(ref);
360 LOG.debug("No longer watching {}", ref);
364 private void actorTerminated(final Terminated message) {
365 LOG.info("Actor termination {} received", message);
367 for (Address addr : watchedActors.removeAll(message.getActor())) {
368 versions.remove(addr);
369 final Bucket<T> bucket = remoteBuckets.remove(addr);
370 if (bucket != null) {
371 LOG.debug("Source actor dead, removing bucket {} from ", bucket, addr);
372 onBucketRemoved(addr, bucket);
378 protected boolean isPersisting() {
382 private LocalBucket<T> getLocalBucket() {
383 Preconditions.checkState(localBucket != null, "Attempted to access local bucket before recovery completed");