import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
+import java.util.concurrent.locks.StampedLock;
import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
import org.opendaylight.controller.cluster.access.client.ClientActorBehavior;
import org.opendaylight.controller.cluster.access.client.ClientActorContext;
import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection;
+import org.opendaylight.controller.cluster.access.client.ConnectionEntry;
+import org.opendaylight.controller.cluster.access.client.ReconnectForwarder;
import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
*
* <p>
* This class is not visible outside of this package because it breaks the actor containment. Services provided to
- * Java world outside of actor containment are captured in {@link DistributedDataStoreClient}.
+ * Java world outside of actor containment are captured in {@link DataStoreClient}.
*
* <p>
- * IMPORTANT: this class breaks actor containment via methods implementing {@link DistributedDataStoreClient} contract.
+ * IMPORTANT: this class breaks actor containment via methods implementing {@link DataStoreClient} contract.
* When touching internal state, be mindful of the execution context from which execution context, Actor
* or POJO, is the state being accessed or modified.
*
* @author Robert Varga
*/
abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior<ShardBackendInfo>
- implements DistributedDataStoreClient {
+ implements DataStoreClient {
private static final Logger LOG = LoggerFactory.getLogger(AbstractDataStoreClientBehavior.class);
private final Map<LocalHistoryIdentifier, ClientLocalHistory> histories = new ConcurrentHashMap<>();
private final AtomicLong nextHistoryId = new AtomicLong(1);
+ private final StampedLock lock = new StampedLock();
private final SingleClientHistory singleHistory;
private volatile Throwable aborted;
}
private void abortOperations(final Throwable cause) {
- // This acts as a barrier, application threads check this after they have added an entry in the maps,
- // and if they observe aborted being non-null, they will perform their cleanup and not return the handle.
- aborted = cause;
+ final long stamp = lock.writeLock();
+ try {
+ // This acts as a barrier, application threads check this after they have added an entry in the maps,
+ // and if they observe aborted being non-null, they will perform their cleanup and not return the handle.
+ aborted = cause;
- for (ClientLocalHistory h : histories.values()) {
- h.localAbort(cause);
+ for (ClientLocalHistory h : histories.values()) {
+ h.localAbort(cause);
+ }
+ histories.clear();
+ } finally {
+ lock.unlockWrite(stamp);
}
- histories.clear();
}
private AbstractDataStoreClientBehavior shutdown(final ClientActorBehavior<ShardBackendInfo> currentBehavior) {
* involved, as the messages need to be replayed to the individual proxies.
*/
@Override
- @GuardedBy("connectionsLock")
- protected final ConnectedClientConnection<ShardBackendInfo> connectionUp(
- final AbstractClientConnection<ShardBackendInfo> conn, final ShardBackendInfo backend) {
+ protected final ConnectionConnectCohort connectionUp(final ConnectedClientConnection<ShardBackendInfo> newConn) {
+ final long stamp = lock.writeLock();
- // Step 0: create a new connected connection
- final ConnectedClientConnection<ShardBackendInfo> newConn = new ConnectedClientConnection<>(conn.context(),
- conn.cookie(), backend);
+ // Step 1: Freeze all AbstractProxyHistory instances pointing to that shard. This indirectly means that no
+ // further TransactionProxies can be created and we can safely traverse maps without risking
+ // missing an entry
+ final Collection<HistoryReconnectCohort> cohorts = new ArrayList<>();
+ startReconnect(singleHistory, newConn, cohorts);
+ for (ClientLocalHistory h : histories.values()) {
+ startReconnect(h, newConn, cohorts);
+ }
- LOG.debug("{}: resolving connection {} to {}", persistenceId(), conn, newConn);
+ return previousEntries -> finishReconnect(newConn, stamp, cohorts, previousEntries);
+ }
- final Collection<HistoryReconnectCohort> cohorts = new ArrayList<>();
+ private ReconnectForwarder finishReconnect(final ConnectedClientConnection<ShardBackendInfo> newConn,
+ final long stamp, final Collection<HistoryReconnectCohort> cohorts,
+ final Collection<ConnectionEntry> previousEntries) {
try {
- // Step 1: Freeze all AbstractProxyHistory instances pointing to that shard. This indirectly means that no
- // further TransactionProxies can be created and we can safely traverse maps without risking
- // missing an entry
- startReconnect(singleHistory, newConn, cohorts);
- for (ClientLocalHistory h : histories.values()) {
- startReconnect(h, newConn, cohorts);
- }
-
// Step 2: Collect previous successful requests from the cohorts. We do not want to expose
// the non-throttling interface to the connection, hence we use a wrapper consumer
for (HistoryReconnectCohort c : cohorts) {
- c.replaySuccessfulRequests();
+ c.replayRequests(previousEntries);
}
// Step 3: Install a forwarder, which will forward requests back to affected cohorts. Any outstanding
- // requests will be immediately sent to it and requests being sent concurrently will get forwarded
- // once they hit the new connection.
- conn.setForwarder(BouncingReconnectForwarder.forCohorts(newConn, cohorts));
+ // requests will be immediately sent to it and requests being sent concurrently will get
+ // forwarded once they hit the new connection.
+ return BouncingReconnectForwarder.forCohorts(newConn, cohorts);
} finally {
- // Step 4: Complete switchover of the connection. The cohorts can resume normal operations.
- for (HistoryReconnectCohort c : cohorts) {
- c.close();
+ try {
+ // Step 4: Complete switchover of the connection. The cohorts can resume normal operations.
+ for (HistoryReconnectCohort c : cohorts) {
+ c.close();
+ }
+ } finally {
+ lock.unlockWrite(stamp);
}
}
-
- return newConn;
}
private static void startReconnect(final AbstractClientHistory history,
public final ClientLocalHistory createLocalHistory() {
final LocalHistoryIdentifier historyId = new LocalHistoryIdentifier(getIdentifier(),
nextHistoryId.getAndIncrement());
- final ClientLocalHistory history = new ClientLocalHistory(this, historyId);
- LOG.debug("{}: creating a new local history {}", persistenceId(), history);
- Verify.verify(histories.put(historyId, history) == null);
+ final long stamp = lock.readLock();
+ try {
+ if (aborted != null) {
+ Throwables.throwIfUnchecked(aborted);
+ throw new RuntimeException(aborted);
+ }
- final Throwable a = aborted;
- if (a != null) {
- history.localAbort(a);
- histories.remove(historyId, history);
- throw Throwables.propagate(a);
- }
+ final ClientLocalHistory history = new ClientLocalHistory(this, historyId);
+ LOG.debug("{}: creating a new local history {}", persistenceId(), history);
- return history;
+ Verify.verify(histories.put(historyId, history) == null);
+ return history;
+ } finally {
+ lock.unlockRead(stamp);
+ }
}
@Override
}
@Override
- public final void close() {
+ public final ClientSnapshot createSnapshot() {
+ return singleHistory.takeSnapshot();
+ }
+
+ @Override
+ public void close() {
+ super.close();
context().executeInActor(this::shutdown);
}
- abstract Long resolveShardForPath(final YangInstanceIdentifier path);
+ abstract Long resolveShardForPath(YangInstanceIdentifier path);
}