*/
package org.opendaylight.netvirt.elan.l2gw.ha.handlers;
-import static org.opendaylight.mdsal.binding.util.Datastore.CONFIGURATION;
-
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.MoreExecutors;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
-import javax.inject.Inject;
import javax.inject.Singleton;
-import org.opendaylight.infrautils.utils.concurrent.LoggingFutures;
-import org.opendaylight.mdsal.binding.api.DataBroker;
import org.opendaylight.mdsal.binding.util.Datastore;
import org.opendaylight.mdsal.binding.util.Datastore.Configuration;
import org.opendaylight.mdsal.binding.util.Datastore.Operational;
-import org.opendaylight.mdsal.binding.util.ManagedNewTransactionRunner;
-import org.opendaylight.mdsal.binding.util.ManagedNewTransactionRunnerImpl;
import org.opendaylight.mdsal.binding.util.TypedReadWriteTransaction;
+import org.opendaylight.netvirt.elan.l2gw.ha.BatchedTransaction;
import org.opendaylight.netvirt.elan.l2gw.ha.HwvtepHAUtil;
import org.opendaylight.netvirt.elan.l2gw.ha.listeners.HAJobScheduler;
import org.opendaylight.netvirt.elan.l2gw.ha.merge.GlobalAugmentationMerger;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.hwvtep.rev150901.HwvtepGlobalRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.hwvtep.rev150901.PhysicalSwitchAugmentation;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.hwvtep.rev150901.PhysicalSwitchAugmentationBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.hwvtep.rev150901.hwvtep.global.attributes.Managers;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Singleton
-public class NodeCopier {
+public class NodeCopier<D extends Datastore> implements INodeCopier<D> {
private static final Logger LOG = LoggerFactory.getLogger(NodeCopier.class);
- private final GlobalAugmentationMerger globalAugmentationMerger = GlobalAugmentationMerger.getInstance();
- private final PSAugmentationMerger psAugmentationMerger = PSAugmentationMerger.getInstance();
- private final GlobalNodeMerger globalNodeMerger = GlobalNodeMerger.getInstance();
- private final PSNodeMerger psNodeMerger = PSNodeMerger.getInstance();
- private final ManagedNewTransactionRunner txRunner;
-
- @Inject
- public NodeCopier(DataBroker db) {
- this.txRunner = new ManagedNewTransactionRunnerImpl(db);
- }
+ GlobalAugmentationMerger globalAugmentationMerger = GlobalAugmentationMerger.getInstance();
+ PSAugmentationMerger psAugmentationMerger = PSAugmentationMerger.getInstance();
+ GlobalNodeMerger globalNodeMerger = GlobalNodeMerger.getInstance();
+ PSNodeMerger psNodeMerger = PSNodeMerger.getInstance();
+ @Override
public <D extends Datastore> void copyGlobalNode(Optional<Node> srcGlobalNodeOptional,
- InstanceIdentifier<Node> srcPath,
- InstanceIdentifier<Node> dstPath,
- Class<D> datastoreType,
- TypedReadWriteTransaction<D> tx)
- throws ExecutionException, InterruptedException {
- if (!srcGlobalNodeOptional.isPresent() && Configuration.class.equals(datastoreType)) {
- Futures.addCallback(tx.read(srcPath), new FutureCallback<Optional<Node>>() {
- @Override
- public void onSuccess(Optional<Node> nodeOptional) {
- HAJobScheduler.getInstance().submitJob(() -> LoggingFutures.addErrorLogging(
- txRunner.callWithNewReadWriteTransactionAndSubmit(datastoreType, tx -> {
- if (nodeOptional.isPresent()) {
- copyGlobalNode(nodeOptional, srcPath, dstPath, datastoreType, tx);
- } else {
- /*
- * In case the Parent HA Global Node is not present and Child HA node is present
- * It means that both the child are disconnected/removed hence the parent is
- * deleted.
- * @see org.opendaylight.netvirt.elan.l2gw.ha.listeners.HAOpNodeListener
- * OnGLobalNode() delete function
- * So we should delete the existing config child node as cleanup
- */
- HwvtepHAUtil.deleteNodeIfPresent(tx, dstPath);
- }
- }), LOG, "Failed to read source node {}", srcPath));
- }
+ InstanceIdentifier<Node> srcPath,
+ InstanceIdentifier<Node> dstPath,
+ Class<D> logicalDatastoreType,
+ TypedReadWriteTransaction<D> tx) {
- @Override
- public void onFailure(Throwable throwable) {
- }
- }, MoreExecutors.directExecutor());
- return;
- }
HwvtepGlobalAugmentation srcGlobalAugmentation =
srcGlobalNodeOptional.get().augmentation(HwvtepGlobalAugmentation.class);
if (srcGlobalAugmentation == null) {
- /*
- * If Source HA Global Node is not present
- * It means that both the child are disconnected/removed hence the parent is deleted.
- * @see org.opendaylight.netvirt.elan.l2gw.ha.listeners.HAOpNodeListener OnGLobalNode() delete function
- * So we should delete the existing config child node as cleanup
- */
- HwvtepHAUtil.deleteNodeIfPresent(tx, dstPath);
- return;
+ if (Configuration.class.equals(logicalDatastoreType)) {
+ tx.put(srcPath, new NodeBuilder().setNodeId(srcPath
+ .firstKeyOf(Node.class).getNodeId()).build());
+ return;
+ }
+ else {
+ LOG.error("Operational child node information is not present");
+ return;
+ }
}
NodeBuilder haNodeBuilder = HwvtepHAUtil.getNodeBuilderForPath(dstPath);
HwvtepGlobalAugmentationBuilder haBuilder = new HwvtepGlobalAugmentationBuilder();
-
- Optional<Node> existingDstGlobalNodeOptional = tx.read(dstPath).get();
- Node existingDstGlobalNode =
- existingDstGlobalNodeOptional.isPresent() ? existingDstGlobalNodeOptional.get() : null;
- HwvtepGlobalAugmentation existingHAGlobalData = HwvtepHAUtil.getGlobalAugmentationOfNode(existingDstGlobalNode);
-
-
- if (Operational.class.equals(datastoreType)) {
- globalAugmentationMerger.mergeOperationalData(haBuilder, existingHAGlobalData, srcGlobalAugmentation,
- dstPath);
+ Optional<Node> existingDstGlobalNodeOptional = Optional.empty();
+ try {
+ existingDstGlobalNodeOptional = tx.read(dstPath).get();
+ } catch (ExecutionException | InterruptedException e) {
+ LOG.error("READ Failed for {} during copyGlobalNode", dstPath);
+ }
+ Node existingDstGlobalNode = existingDstGlobalNodeOptional.isPresent()
+ ? existingDstGlobalNodeOptional.get() : null;
+ HwvtepGlobalAugmentation existingHAGlobalData = HwvtepHAUtil
+ .getGlobalAugmentationOfNode(existingDstGlobalNode);
+ if (Operational.class.equals(logicalDatastoreType)) {
+ globalAugmentationMerger.mergeOperationalData(
+ haBuilder, existingHAGlobalData, srcGlobalAugmentation, dstPath);
globalNodeMerger.mergeOperationalData(haNodeBuilder,
existingDstGlobalNode, srcGlobalNodeOptional.get(), dstPath);
+
haBuilder.setManagers(HwvtepHAUtil.buildManagersForHANode(srcGlobalNodeOptional.get(),
existingDstGlobalNodeOptional));
- //Also update the manager section in config which helps in cluster reboot scenarios
- LoggingFutures.addErrorLogging(
- txRunner.callWithNewWriteOnlyTransactionAndSubmit(CONFIGURATION,
- confTx -> haBuilder.getManagers().values().forEach(manager -> {
- InstanceIdentifier<Managers> managerIid =
- dstPath.augmentation(HwvtepGlobalAugmentation.class).child(Managers.class, manager.key());
- confTx.mergeParentStructurePut(managerIid, manager);
- })), LOG, "Error updating the manager section in config");
} else {
globalAugmentationMerger.mergeConfigData(haBuilder, srcGlobalAugmentation, dstPath);
}
haBuilder.setDbVersion(srcGlobalAugmentation.getDbVersion());
- haNodeBuilder.addAugmentation(HwvtepGlobalAugmentation.class, haBuilder.build());
+ haNodeBuilder.addAugmentation(haBuilder.build());
Node haNode = haNodeBuilder.build();
- if (Operational.class.equals(datastoreType)) {
+ if (Operational.class.equals(logicalDatastoreType)) {
tx.mergeParentStructureMerge(dstPath, haNode);
} else {
tx.mergeParentStructurePut(dstPath, haNode);
}
}
+
public <D extends Datastore> void copyPSNode(Optional<Node> srcPsNodeOptional,
InstanceIdentifier<Node> srcPsPath,
InstanceIdentifier<Node> dstPsPath,
InstanceIdentifier<Node> dstGlobalPath,
- Class<D> datastoreType,
- TypedReadWriteTransaction<D> tx)
- throws ExecutionException, InterruptedException {
- if (!srcPsNodeOptional.isPresent() && Configuration.class.equals(datastoreType)) {
+ Class<D> logicalDatastoreType,
+ TypedReadWriteTransaction<D> tx) {
+ if (!srcPsNodeOptional.isPresent() && Configuration.class.equals(logicalDatastoreType)) {
Futures.addCallback(tx.read(srcPsPath), new FutureCallback<Optional<Node>>() {
@Override
public void onSuccess(Optional<Node> nodeOptional) {
HAJobScheduler.getInstance().submitJob(() -> {
- LoggingFutures.addErrorLogging(
- txRunner.callWithNewReadWriteTransactionAndSubmit(datastoreType, tx -> {
- if (nodeOptional.isPresent()) {
- copyPSNode(nodeOptional,
- srcPsPath, dstPsPath, dstGlobalPath, datastoreType, tx);
- } else {
- /*
- * Deleting node please refer @see #copyGlobalNode for explanation
- */
- HwvtepHAUtil.deleteNodeIfPresent(tx, dstPsPath);
- }
- }), LOG, "Failed to read source node {}", srcPsPath);
+ TypedReadWriteTransaction<D> tx1 = new BatchedTransaction(
+ logicalDatastoreType);
+ if (nodeOptional.isPresent()) {
+ copyPSNode(nodeOptional,
+ srcPsPath, dstPsPath, dstGlobalPath, logicalDatastoreType, tx1);
+ } else {
+ tx1.put(dstPsPath, new NodeBuilder().setNodeId(dstPsPath
+ .firstKeyOf(Node.class).getNodeId()).build());
+ }
+
});
}
PhysicalSwitchAugmentation srcPsAugmenatation =
srcPsNodeOptional.get().augmentation(PhysicalSwitchAugmentation.class);
-
- Node existingDstPsNode = tx.read(dstPsPath).get().orElse(null);
+ Node existingDstPsNode = null;
+ try {
+ existingDstPsNode = HwvtepHAUtil.readNode(tx, dstPsPath);
+ } catch (ExecutionException | InterruptedException e) {
+ LOG.error("NodeCopier Read Failed for Node:{}", dstPsPath);
+ }
PhysicalSwitchAugmentation existingDstPsAugmentation =
HwvtepHAUtil.getPhysicalSwitchAugmentationOfNode(existingDstPsNode);
- if (Operational.class.equals(datastoreType)) {
+ mergeOpManagedByAttributes(srcPsAugmenatation, dstPsAugmentationBuilder, dstGlobalPath);
+ if (Operational.class.equals(logicalDatastoreType)) {
psAugmentationMerger.mergeOperationalData(dstPsAugmentationBuilder, existingDstPsAugmentation,
srcPsAugmenatation, dstPsPath);
psNodeMerger.mergeOperationalData(dstPsNodeBuilder, existingDstPsNode, srcPsNodeOptional.get(), dstPsPath);
+ dstPsNodeBuilder.addAugmentation(dstPsAugmentationBuilder.build());
+ Node dstPsNode = dstPsNodeBuilder.build();
+ tx.mergeParentStructureMerge(dstPsPath, dstPsNode);
} else {
+ /* Below Change done to rerduce the side of tx.put() generated here.
+ 1. Check if child node already exists in config-topo.
+ 2. If not present, then construct Child ps-node with augmentation data only and do tx.put(node).
+ Followed by, then tx.put(termination-points) for each of termination-points present in parent ps-node.
+ 3. If present, then construct augmentation data and do tx.put(augmentation) then followed by
+ tx.put(termination-points) for each of termination-points present in parent ps-node.
+ */
+ String dstNodeName = dstPsNodeBuilder.getNodeId().getValue();
psAugmentationMerger.mergeConfigData(dstPsAugmentationBuilder, srcPsAugmenatation, dstPsPath);
+ try {
+ boolean isEntryExists = tx.exists(dstPsPath).get();
+ if (isEntryExists) {
+ LOG.info("Destination PS Node: {} already exists in config-topo.", dstNodeName);
+ InstanceIdentifier<PhysicalSwitchAugmentation> dstPsAugPath =
+ dstPsPath.augmentation(PhysicalSwitchAugmentation.class);
+ tx.put(dstPsAugPath, dstPsAugmentationBuilder.build());
+ } else {
+ LOG.info("Destination PS Node: {} doesn't still exist in config-topo.",
+ dstNodeName);
+ dstPsNodeBuilder.addAugmentation(dstPsAugmentationBuilder.build());
+ Node dstPsNode = dstPsNodeBuilder.build();
+ tx.put(dstPsPath, dstPsNode);
+ }
+ } catch (InterruptedException | ExecutionException e) {
+ LOG.error("Error While checking Existing on Node {} in config-topo", dstPsPath);
+ }
psNodeMerger.mergeConfigData(dstPsNodeBuilder, srcPsNodeOptional.get(), dstPsPath);
- }
- mergeOpManagedByAttributes(srcPsAugmenatation, dstPsAugmentationBuilder, dstGlobalPath);
- dstPsNodeBuilder.addAugmentation(PhysicalSwitchAugmentation.class, dstPsAugmentationBuilder.build());
- Node dstPsNode = dstPsNodeBuilder.build();
- tx.mergeParentStructureMerge(dstPsPath, dstPsNode);
- LOG.debug("Copied {} physical switch node from {} to {}", datastoreType, srcPsPath, dstPsPath);
+ if (dstPsNodeBuilder.getTerminationPoint() != null) {
+ dstPsNodeBuilder.getTerminationPoint().values().forEach(terminationPoint -> {
+ InstanceIdentifier<TerminationPoint> terminationPointPath =
+ dstPsPath.child(TerminationPoint.class, terminationPoint.key());
+ tx.put(terminationPointPath, terminationPoint);
+ LOG.trace("Destination PS Node: {} updated with termination-point : {}",
+ dstNodeName, terminationPoint.key());
+ });
+ }
+ }
+ LOG.debug("Copied {} physical switch node from {} to {}", logicalDatastoreType, srcPsPath, dstPsPath);
}
public void mergeOpManagedByAttributes(PhysicalSwitchAugmentation psAugmentation,