2 * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.access.client;
10 import com.google.common.annotations.Beta;
11 import com.google.common.base.Preconditions;
12 import com.google.common.base.Stopwatch;
13 import com.google.common.base.Verify;
14 import java.util.Collection;
16 import java.util.Optional;
17 import java.util.concurrent.ConcurrentHashMap;
18 import java.util.concurrent.TimeUnit;
19 import java.util.concurrent.TimeoutException;
20 import javax.annotation.Nonnull;
21 import javax.annotation.Nullable;
22 import javax.annotation.concurrent.GuardedBy;
23 import org.opendaylight.controller.cluster.access.commands.NotLeaderException;
24 import org.opendaylight.controller.cluster.access.commands.OutOfSequenceEnvelopeException;
25 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
26 import org.opendaylight.controller.cluster.access.concepts.FailureEnvelope;
27 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
28 import org.opendaylight.controller.cluster.access.concepts.RequestException;
29 import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
30 import org.opendaylight.controller.cluster.access.concepts.ResponseEnvelope;
31 import org.opendaylight.controller.cluster.access.concepts.RetiredGenerationException;
32 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
33 import org.opendaylight.controller.cluster.access.concepts.SuccessEnvelope;
34 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
35 import org.opendaylight.yangtools.concepts.Identifiable;
36 import org.opendaylight.yangtools.concepts.WritableIdentifier;
37 import org.slf4j.Logger;
38 import org.slf4j.LoggerFactory;
39 import scala.concurrent.duration.FiniteDuration;
42 * A behavior, which handles messages sent to a {@link AbstractClientActor}.
44 * @author Robert Varga
47 public abstract class ClientActorBehavior<T extends BackendInfo> extends
48 RecoveredClientActorBehavior<ClientActorContext> implements Identifiable<ClientIdentifier> {
50 * Connection reconnect cohort, driven by this class.
53 protected interface ConnectionConnectCohort {
55 * Finish the connection by replaying previous messages onto the new connection.
57 * @param enqueuedEntries Previously-enqueued entries
58 * @return A {@link ReconnectForwarder} to handle any straggler messages which arrive after this method returns.
60 @Nonnull ReconnectForwarder finishReconnect(@Nonnull Collection<ConnectionEntry> enqueuedEntries);
63 private static final Logger LOG = LoggerFactory.getLogger(ClientActorBehavior.class);
64 private static final FiniteDuration RESOLVE_RETRY_DURATION = FiniteDuration.apply(5, TimeUnit.SECONDS);
67 * Map of connections to the backend. This map is concurrent to allow lookups, but given complex operations
68 * involved in connection transitions it is protected by a {@link InversibleLock}. Write-side of the lock is taken
69 * during connection transitions. Optimistic read-side of the lock is taken when new connections are introduced
73 * The lock detects potential AB/BA deadlock scenarios and will force the reader side out by throwing
74 * a {@link InversibleLockException} -- which must be propagated up, releasing locks as it propagates. The initial
75 * entry point causing the the conflicting lookup must then call {@link InversibleLockException#awaitResolution()}
76 * before retrying the operation.
78 // TODO: it should be possible to move these two into ClientActorContext
79 private final Map<Long, AbstractClientConnection<T>> connections = new ConcurrentHashMap<>();
80 private final InversibleLock connectionsLock = new InversibleLock();
81 private final BackendInfoResolver<T> resolver;
83 protected ClientActorBehavior(@Nonnull final ClientActorContext context,
84 @Nonnull final BackendInfoResolver<T> resolver) {
86 this.resolver = Preconditions.checkNotNull(resolver);
91 public final ClientIdentifier getIdentifier() {
92 return context().getIdentifier();
96 * Get a connection to a shard.
98 * @param shard Shard cookie
99 * @return Connection to a shard
100 * @throws InversibleLockException if the shard is being reconnected
102 public final AbstractClientConnection<T> getConnection(final Long shard) {
104 final long stamp = connectionsLock.optimisticRead();
105 final AbstractClientConnection<T> conn = connections.computeIfAbsent(shard, this::createConnection);
106 if (connectionsLock.validate(stamp)) {
107 // No write-lock in-between, return success
113 private AbstractClientConnection<T> getConnection(final ResponseEnvelope<?> response) {
114 // Always called from actor context: no locking required
115 return connections.get(extractCookie(response.getMessage().getTarget()));
118 @SuppressWarnings("unchecked")
120 final ClientActorBehavior<T> onReceiveCommand(final Object command) {
121 if (command instanceof InternalCommand) {
122 return ((InternalCommand<T>) command).execute(this);
124 if (command instanceof SuccessEnvelope) {
125 return onRequestSuccess((SuccessEnvelope) command);
127 if (command instanceof FailureEnvelope) {
128 return internalOnRequestFailure((FailureEnvelope) command);
131 return onCommand(command);
134 private static long extractCookie(final WritableIdentifier id) {
135 if (id instanceof TransactionIdentifier) {
136 return ((TransactionIdentifier) id).getHistoryId().getCookie();
137 } else if (id instanceof LocalHistoryIdentifier) {
138 return ((LocalHistoryIdentifier) id).getCookie();
140 throw new IllegalArgumentException("Unhandled identifier " + id);
144 private void onResponse(final ResponseEnvelope<?> response) {
145 final AbstractClientConnection<T> connection = getConnection(response);
146 if (connection != null) {
147 connection.receiveResponse(response);
149 LOG.info("{}: Ignoring unknown response {}", persistenceId(), response);
153 private ClientActorBehavior<T> onRequestSuccess(final SuccessEnvelope success) {
158 private ClientActorBehavior<T> onRequestFailure(final FailureEnvelope failure) {
163 private ClientActorBehavior<T> internalOnRequestFailure(final FailureEnvelope command) {
164 final AbstractClientConnection<T> conn = getConnection(command);
167 * We are talking to multiple actors, which may be lagging behind our state significantly. This has
168 * the effect that we may be receiving responses from a previous connection after we have created a new
169 * one to a different actor.
171 * Since we are already replaying requests to the new actor, we want to ignore errors reported on the old
172 * connection -- for example NotLeaderException, which must not cause a new reconnect. Check the envelope's
173 * sessionId and if it does not match our current connection just ignore it.
175 final Optional<T> optBackend = conn.getBackendInfo();
176 if (optBackend.isPresent() && optBackend.get().getSessionId() != command.getSessionId()) {
177 LOG.debug("{}: Mismatched current connection {} and envelope {}, ignoring response", persistenceId(),
183 final RequestFailure<?, ?> failure = command.getMessage();
184 final RequestException cause = failure.getCause();
185 if (cause instanceof RetiredGenerationException) {
186 LOG.error("{}: current generation {} has been superseded", persistenceId(), getIdentifier(), cause);
191 if (cause instanceof NotLeaderException) {
192 if (conn instanceof ReconnectingClientConnection) {
193 // Already reconnecting, do not churn the logs
195 } else if (conn != null) {
196 LOG.info("{}: connection {} indicated no leadership, reconnecting it", persistenceId(), conn, cause);
197 return conn.reconnect(this, cause);
200 if (cause instanceof OutOfSequenceEnvelopeException) {
201 if (conn instanceof ReconnectingClientConnection) {
202 // Already reconnecting, do not churn the logs
204 } else if (conn != null) {
205 LOG.info("{}: connection {} indicated no sequencing mismatch on {} sequence {}, reconnecting it",
206 persistenceId(), conn, failure.getTarget(), failure.getSequence(), cause);
207 return conn.reconnect(this, cause);
211 return onRequestFailure(command);
214 private void poison(final RequestException cause) {
215 final long stamp = connectionsLock.writeLock();
217 for (AbstractClientConnection<T> q : connections.values()) {
223 connectionsLock.unlockWrite(stamp);
228 * Halt And Catch Fire. Halt processing on this client. Implementations need to ensure they initiate state flush
229 * procedures. No attempt to use this instance should be made after this method returns. Any such use may result
230 * in undefined behavior.
232 * @param cause Failure cause
234 protected abstract void haltClient(@Nonnull Throwable cause);
237 * Override this method to handle any command which is not handled by the base behavior.
239 * @param command the command to process
240 * @return Next behavior to use, null if this actor should shut down.
243 protected abstract ClientActorBehavior<T> onCommand(@Nonnull Object command);
246 * Override this method to provide a backend resolver instance.
248 * @return a backend resolver instance
250 protected final @Nonnull BackendInfoResolver<T> resolver() {
255 * Callback invoked when a new connection has been established. Implementations are expected perform preparatory
256 * tasks before the previous connection is frozen.
258 * @param newConn New connection
259 * @return ConnectionConnectCohort which will be used to complete the process of bringing the connection up.
261 @GuardedBy("connectionsLock")
262 @Nonnull protected abstract ConnectionConnectCohort connectionUp(@Nonnull ConnectedClientConnection<T> newConn);
264 private void backendConnectFinished(final Long shard, final AbstractClientConnection<T> conn,
265 final T backend, final Throwable failure) {
266 if (failure != null) {
267 if (failure instanceof TimeoutException) {
268 if (!conn.equals(connections.get(shard))) {
269 // AbstractClientConnection will remove itself when it decides there is no point in continuing,
270 // at which point we want to stop retrying
271 LOG.info("{}: stopping resolution of shard {} on stale connection {}", persistenceId(), shard, conn,
276 LOG.debug("{}: timed out resolving shard {}, scheduling retry in {}", persistenceId(), shard,
277 RESOLVE_RETRY_DURATION, failure);
278 context().executeInActor(b -> {
279 resolveConnection(shard, conn);
281 }, RESOLVE_RETRY_DURATION);
285 LOG.error("{}: failed to resolve shard {}", persistenceId(), shard, failure);
286 final RequestException cause;
287 if (failure instanceof RequestException) {
288 cause = (RequestException) failure;
290 cause = new RuntimeRequestException("Failed to resolve shard " + shard, failure);
297 LOG.info("{}: resolved shard {} to {}", persistenceId(), shard, backend);
298 final long stamp = connectionsLock.writeLock();
300 final Stopwatch sw = Stopwatch.createStarted();
302 // Create a new connected connection
303 final ConnectedClientConnection<T> newConn = new ConnectedClientConnection<>(conn.context(),
304 conn.cookie(), backend);
305 LOG.info("{}: resolving connection {} to {}", persistenceId(), conn, newConn);
307 // Start reconnecting without the old connection lock held
308 final ConnectionConnectCohort cohort = Verify.verifyNotNull(connectionUp(newConn));
310 // Lock the old connection and get a reference to its entries
311 final Collection<ConnectionEntry> replayIterable = conn.startReplay();
313 // Finish the connection attempt
314 final ReconnectForwarder forwarder = Verify.verifyNotNull(cohort.finishReconnect(replayIterable));
316 // Install the forwarder, unlocking the old connection
317 conn.finishReplay(forwarder);
319 // Make sure new lookups pick up the new connection
320 if (!connections.replace(shard, conn, newConn)) {
321 final AbstractClientConnection<T> existing = connections.get(conn.cookie());
322 LOG.warn("{}: old connection {} does not match existing {}, new connection {} in limbo",
323 persistenceId(), conn, existing, newConn);
325 LOG.info("{}: replaced connection {} with {} in {}", persistenceId(), conn, newConn, sw);
328 connectionsLock.unlockWrite(stamp);
332 void removeConnection(final AbstractClientConnection<?> conn) {
333 final long stamp = connectionsLock.writeLock();
335 if (!connections.remove(conn.cookie(), conn)) {
336 final AbstractClientConnection<T> existing = connections.get(conn.cookie());
337 if (existing != null) {
338 LOG.warn("{}: failed to remove connection {}, as it was superseded by {}", persistenceId(), conn,
341 LOG.warn("{}: failed to remove connection {}, as it was not tracked", persistenceId(), conn);
344 LOG.info("{}: removed connection {}", persistenceId(), conn);
347 connectionsLock.unlockWrite(stamp);
351 @SuppressWarnings("unchecked")
352 void reconnectConnection(final ConnectedClientConnection<?> oldConn,
353 final ReconnectingClientConnection<?> newConn) {
354 final ReconnectingClientConnection<T> conn = (ReconnectingClientConnection<T>)newConn;
355 LOG.info("{}: connection {} reconnecting as {}", persistenceId(), oldConn, newConn);
357 final long stamp = connectionsLock.writeLock();
359 final boolean replaced = connections.replace(oldConn.cookie(), (AbstractClientConnection<T>)oldConn, conn);
361 final AbstractClientConnection<T> existing = connections.get(oldConn.cookie());
362 if (existing != null) {
363 LOG.warn("{}: failed to replace connection {}, as it was superseded by {}", persistenceId(), conn,
366 LOG.warn("{}: failed to replace connection {}, as it was not tracked", persistenceId(), conn);
370 connectionsLock.unlockWrite(stamp);
373 final Long shard = oldConn.cookie();
374 LOG.info("{}: refreshing backend for shard {}", persistenceId(), shard);
375 resolver().refreshBackendInfo(shard, conn.getBackendInfo().get()).whenComplete(
376 (backend, failure) -> context().executeInActor(behavior -> {
377 backendConnectFinished(shard, conn, backend, failure);
382 private ConnectingClientConnection<T> createConnection(final Long shard) {
383 final ConnectingClientConnection<T> conn = new ConnectingClientConnection<>(context(), shard);
384 resolveConnection(shard, conn);
388 private void resolveConnection(final Long shard, final AbstractClientConnection<T> conn) {
389 LOG.debug("{}: resolving shard {} connection {}", persistenceId(), shard, conn);
390 resolver().getBackendInfo(shard).whenComplete((backend, failure) -> context().executeInActor(behavior -> {
391 backendConnectFinished(shard, conn, backend, failure);