import org.opendaylight.controller.cluster.io.FileBackedOutputStreamFactory;
import org.opendaylight.controller.cluster.messaging.MessageAssembler;
import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
+import org.opendaylight.yangtools.concepts.Identifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.duration.FiniteDuration;
}
private static final Logger LOG = LoggerFactory.getLogger(ClientActorBehavior.class);
- private static final FiniteDuration RESOLVE_RETRY_DURATION = FiniteDuration.apply(5, TimeUnit.SECONDS);
+ private static final FiniteDuration RESOLVE_RETRY_DURATION = FiniteDuration.apply(1, TimeUnit.SECONDS);
/**
* Map of connections to the backend. This map is concurrent to allow lookups, but given complex operations
return this;
}
+ if (context().messageSlicer().handleMessage(command)) {
+ return this;
+ }
+
return onCommand(command);
}
- private static long extractCookie(final WritableIdentifier id) {
+ private static long extractCookie(final Identifier id) {
if (id instanceof TransactionIdentifier) {
return ((TransactionIdentifier) id).getHistoryId().getCookie();
} else if (id instanceof LocalHistoryIdentifier) {
} finally {
connectionsLock.unlockWrite(stamp);
}
+
+ context().messageSlicer().close();
}
/**
@GuardedBy("connectionsLock")
@Nonnull protected abstract ConnectionConnectCohort connectionUp(@Nonnull ConnectedClientConnection<T> newConn);
- private void backendConnectFinished(final Long shard, final AbstractClientConnection<T> conn,
+ private void backendConnectFinished(final Long shard, final AbstractClientConnection<T> oldConn,
final T backend, final Throwable failure) {
if (failure != null) {
if (failure instanceof TimeoutException) {
- if (!conn.equals(connections.get(shard))) {
+ if (!oldConn.equals(connections.get(shard))) {
// AbstractClientConnection will remove itself when it decides there is no point in continuing,
// at which point we want to stop retrying
- LOG.info("{}: stopping resolution of shard {} on stale connection {}", persistenceId(), shard, conn,
- failure);
+ LOG.info("{}: stopping resolution of shard {} on stale connection {}", persistenceId(), shard,
+ oldConn, failure);
return;
}
LOG.debug("{}: timed out resolving shard {}, scheduling retry in {}", persistenceId(), shard,
RESOLVE_RETRY_DURATION, failure);
context().executeInActor(b -> {
- resolveConnection(shard, conn);
+ resolveConnection(shard, oldConn);
return b;
}, RESOLVE_RETRY_DURATION);
return;
cause = new RuntimeRequestException("Failed to resolve shard " + shard, failure);
}
- conn.poison(cause);
+ oldConn.poison(cause);
return;
}
final Stopwatch sw = Stopwatch.createStarted();
// Create a new connected connection
- final ConnectedClientConnection<T> newConn = new ConnectedClientConnection<>(conn.context(),
- conn.cookie(), backend);
- LOG.info("{}: resolving connection {} to {}", persistenceId(), conn, newConn);
+ final ConnectedClientConnection<T> newConn = new ConnectedClientConnection<>(oldConn, backend);
+ LOG.info("{}: resolving connection {} to {}", persistenceId(), oldConn, newConn);
// Start reconnecting without the old connection lock held
final ConnectionConnectCohort cohort = Verify.verifyNotNull(connectionUp(newConn));
// Lock the old connection and get a reference to its entries
- final Collection<ConnectionEntry> replayIterable = conn.startReplay();
+ final Collection<ConnectionEntry> replayIterable = oldConn.startReplay();
// Finish the connection attempt
final ReconnectForwarder forwarder = Verify.verifyNotNull(cohort.finishReconnect(replayIterable));
+ // Cancel sleep debt after entries were replayed, before new connection starts receiving.
+ newConn.cancelDebt();
+
// Install the forwarder, unlocking the old connection
- conn.finishReplay(forwarder);
+ oldConn.finishReplay(forwarder);
// Make sure new lookups pick up the new connection
- if (!connections.replace(shard, conn, newConn)) {
- final AbstractClientConnection<T> existing = connections.get(conn.cookie());
+ if (!connections.replace(shard, oldConn, newConn)) {
+ final AbstractClientConnection<T> existing = connections.get(oldConn.cookie());
LOG.warn("{}: old connection {} does not match existing {}, new connection {} in limbo",
- persistenceId(), conn, existing, newConn);
+ persistenceId(), oldConn, existing, newConn);
} else {
- LOG.info("{}: replaced connection {} with {} in {}", persistenceId(), conn, newConn, sw);
+ LOG.info("{}: replaced connection {} with {} in {}", persistenceId(), oldConn, newConn, sw);
}
} finally {
connectionsLock.unlockWrite(stamp);
}
} else {
LOG.info("{}: removed connection {}", persistenceId(), conn);
+ cancelSlicing(conn.cookie());
}
} finally {
connectionsLock.unlockWrite(stamp);
} else {
LOG.warn("{}: failed to replace connection {}, as it was not tracked", persistenceId(), conn);
}
+ } else {
+ cancelSlicing(oldConn.cookie());
}
} finally {
connectionsLock.unlockWrite(stamp);
}));
}
+ private void cancelSlicing(final Long cookie) {
+ context().messageSlicer().cancelSlicing(id -> {
+ try {
+ return cookie.equals(extractCookie(id));
+ } catch (IllegalArgumentException e) {
+ LOG.debug("extractCookie failed while cancelling slicing for cookie {}: {}", cookie, e);
+ return false;
+ }
+ });
+ }
+
private ConnectingClientConnection<T> createConnection(final Long shard) {
final ConnectingClientConnection<T> conn = new ConnectingClientConnection<>(context(), shard);
resolveConnection(shard, conn);