import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
+import java.util.concurrent.locks.StampedLock;
import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
import org.opendaylight.controller.cluster.access.client.ClientActorBehavior;
import org.opendaylight.controller.cluster.access.client.ClientActorContext;
private final Map<LocalHistoryIdentifier, ClientLocalHistory> histories = new ConcurrentHashMap<>();
private final AtomicLong nextHistoryId = new AtomicLong(1);
+ private final StampedLock lock = new StampedLock();
private final SingleClientHistory singleHistory;
private volatile Throwable aborted;
}
private void abortOperations(final Throwable cause) {
- // This acts as a barrier, application threads check this after they have added an entry in the maps,
- // and if they observe aborted being non-null, they will perform their cleanup and not return the handle.
- aborted = cause;
+ final long stamp = lock.writeLock();
+ try {
+ // This acts as a barrier, application threads check this after they have added an entry in the maps,
+ // and if they observe aborted being non-null, they will perform their cleanup and not return the handle.
+ aborted = cause;
- for (ClientLocalHistory h : histories.values()) {
- h.localAbort(cause);
+ for (ClientLocalHistory h : histories.values()) {
+ h.localAbort(cause);
+ }
+ histories.clear();
+ } finally {
+ lock.unlockWrite(stamp);
}
- histories.clear();
}
private AbstractDataStoreClientBehavior shutdown(final ClientActorBehavior<ShardBackendInfo> currentBehavior) {
* involved, as the messages need to be replayed to the individual proxies.
*/
@Override
- @GuardedBy("connectionsLock")
- protected final ConnectedClientConnection<ShardBackendInfo> connectionUp(
- final AbstractClientConnection<ShardBackendInfo> conn, final ShardBackendInfo backend) {
-
- // Step 0: create a new connected connection
- final ConnectedClientConnection<ShardBackendInfo> newConn = new ConnectedClientConnection<>(conn.context(),
- conn.cookie(), backend);
-
- LOG.debug("{}: resolving connection {} to {}", persistenceId(), conn, newConn);
+ protected final ConnectionConnectCohort connectionUp(final ConnectedClientConnection<ShardBackendInfo> newConn) {
+ final long stamp = lock.writeLock();
+ // Step 1: Freeze all AbstractProxyHistory instances pointing to that shard. This indirectly means that no
+ // further TransactionProxies can be created and we can safely traverse maps without risking
+ // missing an entry
final Collection<HistoryReconnectCohort> cohorts = new ArrayList<>();
- try {
- // Step 1: Freeze all AbstractProxyHistory instances pointing to that shard. This indirectly means that no
- // further TransactionProxies can be created and we can safely traverse maps without risking
- // missing an entry
- startReconnect(singleHistory, newConn, cohorts);
- for (ClientLocalHistory h : histories.values()) {
- startReconnect(h, newConn, cohorts);
- }
-
- // Step 2: Collect previous successful requests from the cohorts. We do not want to expose
- // the non-throttling interface to the connection, hence we use a wrapper consumer
- for (HistoryReconnectCohort c : cohorts) {
- c.replaySuccessfulRequests();
- }
-
- // Step 3: Install a forwarder, which will forward requests back to affected cohorts. Any outstanding
- // requests will be immediately sent to it and requests being sent concurrently will get forwarded
- // once they hit the new connection.
- conn.setForwarder(BouncingReconnectForwarder.forCohorts(newConn, cohorts));
- } finally {
- // Step 4: Complete switchover of the connection. The cohorts can resume normal operations.
- for (HistoryReconnectCohort c : cohorts) {
- c.close();
- }
+ startReconnect(singleHistory, newConn, cohorts);
+ for (ClientLocalHistory h : histories.values()) {
+ startReconnect(h, newConn, cohorts);
}
- return newConn;
+ return previousEntries -> {
+ try {
+ // Step 2: Collect previous successful requests from the cohorts. We do not want to expose
+ // the non-throttling interface to the connection, hence we use a wrapper consumer
+ for (HistoryReconnectCohort c : cohorts) {
+ c.replaySuccessfulRequests(previousEntries);
+ }
+
+ // Step 3: Install a forwarder, which will forward requests back to affected cohorts. Any outstanding
+ // requests will be immediately sent to it and requests being sent concurrently will get
+ // forwarded once they hit the new connection.
+ return BouncingReconnectForwarder.forCohorts(newConn, cohorts);
+ } finally {
+ try {
+ // Step 4: Complete switchover of the connection. The cohorts can resume normal operations.
+ for (HistoryReconnectCohort c : cohorts) {
+ c.close();
+ }
+ } finally {
+ lock.unlockWrite(stamp);
+ }
+ }
+ };
}
private static void startReconnect(final AbstractClientHistory history,
public final ClientLocalHistory createLocalHistory() {
final LocalHistoryIdentifier historyId = new LocalHistoryIdentifier(getIdentifier(),
nextHistoryId.getAndIncrement());
- final ClientLocalHistory history = new ClientLocalHistory(this, historyId);
- LOG.debug("{}: creating a new local history {}", persistenceId(), history);
- Verify.verify(histories.put(historyId, history) == null);
+ final long stamp = lock.readLock();
+ try {
+ if (aborted != null) {
+ throw Throwables.propagate(aborted);
+ }
+
+ final ClientLocalHistory history = new ClientLocalHistory(this, historyId);
+ LOG.debug("{}: creating a new local history {}", persistenceId(), history);
- final Throwable a = aborted;
- if (a != null) {
- history.localAbort(a);
- histories.remove(historyId, history);
- throw Throwables.propagate(a);
+ Verify.verify(histories.put(historyId, history) == null);
+ return history;
+ } finally {
+ lock.unlockRead(stamp);
}
-
- return history;
}
@Override
return singleHistory.createTransaction();
}
+ @Override
+ public final ClientSnapshot createSnapshot() {
+ return singleHistory.doCreateSnapshot();
+ }
+
@Override
public final void close() {
context().executeInActor(this::shutdown);