Bulk merge of l2gw changes
[netvirt.git] / elanmanager / impl / src / main / java / org / opendaylight / netvirt / elan / l2gw / ha / handlers / NodeCopier.java
index 629b67a0818bb72918af7e97c4c39946be2c19c8..7fcaac279833b7e1d9ba7b73b966a8868b75eba9 100644 (file)
@@ -7,18 +7,16 @@
  */
 package org.opendaylight.netvirt.elan.l2gw.ha.handlers;
 
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
-import com.google.common.base.Optional;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
-import javax.inject.Inject;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.Optional;
+import java.util.concurrent.ExecutionException;
 import javax.inject.Singleton;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.mdsal.binding.util.Datastore;
+import org.opendaylight.mdsal.binding.util.Datastore.Configuration;
+import org.opendaylight.mdsal.binding.util.Datastore.Operational;
+import org.opendaylight.mdsal.binding.util.TypedReadWriteTransaction;
 import org.opendaylight.netvirt.elan.l2gw.ha.BatchedTransaction;
 import org.opendaylight.netvirt.elan.l2gw.ha.HwvtepHAUtil;
 import org.opendaylight.netvirt.elan.l2gw.ha.listeners.HAJobScheduler;
@@ -31,145 +29,108 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.hw
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.hwvtep.rev150901.HwvtepGlobalRef;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.hwvtep.rev150901.PhysicalSwitchAugmentation;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.hwvtep.rev150901.PhysicalSwitchAugmentationBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.hwvtep.rev150901.hwvtep.global.attributes.Managers;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @Singleton
-public class NodeCopier implements INodeCopier {
+public class NodeCopier<D extends Datastore> implements INodeCopier<D> {
 
     private static final Logger LOG = LoggerFactory.getLogger(NodeCopier.class);
 
-    private final GlobalAugmentationMerger globalAugmentationMerger = GlobalAugmentationMerger.getInstance();
-    private final PSAugmentationMerger psAugmentationMerger = PSAugmentationMerger.getInstance();
-    private final GlobalNodeMerger globalNodeMerger = GlobalNodeMerger.getInstance();
-    private final PSNodeMerger psNodeMerger = PSNodeMerger.getInstance();
-    private final DataBroker db;
-
-    @Inject
-    public NodeCopier(DataBroker db) {
-        this.db = db;
-    }
+    GlobalAugmentationMerger globalAugmentationMerger = GlobalAugmentationMerger.getInstance();
+    PSAugmentationMerger psAugmentationMerger = PSAugmentationMerger.getInstance();
+    GlobalNodeMerger globalNodeMerger = GlobalNodeMerger.getInstance();
+    PSNodeMerger psNodeMerger = PSNodeMerger.getInstance();
 
     @Override
-    public void copyGlobalNode(Optional<Node> srcGlobalNodeOptional,
-                               InstanceIdentifier<Node> srcPath,
-                               InstanceIdentifier<Node> dstPath,
-                               LogicalDatastoreType logicalDatastoreType,
-                               ReadWriteTransaction tx) throws ReadFailedException {
-        if (!srcGlobalNodeOptional.isPresent() && logicalDatastoreType == CONFIGURATION) {
-            Futures.addCallback(tx.read(logicalDatastoreType, srcPath), new FutureCallback<Optional<Node>>() {
-                @Override
-                public void onSuccess(Optional<Node> nodeOptional) {
-                    HAJobScheduler.getInstance().submitJob(() -> {
-                        try {
-                            ReadWriteTransaction tx1 = new BatchedTransaction();
-                            if (nodeOptional.isPresent()) {
-                                copyGlobalNode(nodeOptional, srcPath, dstPath, logicalDatastoreType, tx1);
-                            } else {
-                                /**
-                                 * In case the Parent HA Global Node is not present and Child HA node is present
-                                 * It means that both the child are disconnected/removed hence the parent is deleted.
-                                 * @see org.opendaylight.netvirt.elan.l2gw.ha.listeners.HAOpNodeListener
-                                 * OnGLobalNode() delete function
-                                 * So we should delete the existing config child node as cleanup
-                                 */
-                                HwvtepHAUtil.deleteNodeIfPresent(tx1, logicalDatastoreType, dstPath);
-                            }
-                        } catch (ReadFailedException e) {
-                            LOG.error("Failed to read source node {}",srcPath);
-                        }
-                    });
-                }
+    public <D extends Datastore> void copyGlobalNode(Optional<Node> srcGlobalNodeOptional,
+                        InstanceIdentifier<Node> srcPath,
+                        InstanceIdentifier<Node> dstPath,
+                        Class<D> logicalDatastoreType,
+                        TypedReadWriteTransaction<D> tx) {
 
-                @Override
-                public void onFailure(Throwable throwable) {
-                }
-            });
-            return;
-        }
         HwvtepGlobalAugmentation srcGlobalAugmentation =
                 srcGlobalNodeOptional.get().augmentation(HwvtepGlobalAugmentation.class);
         if (srcGlobalAugmentation == null) {
-            /**
-             * If Source HA Global Node is not present
-             * It means that both the child are disconnected/removed hence the parent is deleted.
-             * @see org.opendaylight.netvirt.elan.l2gw.ha.listeners.HAOpNodeListener OnGLobalNode() delete function
-             * So we should delete the existing config child node as cleanup
-             */
-            HwvtepHAUtil.deleteNodeIfPresent(tx, logicalDatastoreType, dstPath);
-            return;
+            if (Configuration.class.equals(logicalDatastoreType)) {
+                tx.put(srcPath, new NodeBuilder().setNodeId(srcPath
+                        .firstKeyOf(Node.class).getNodeId()).build());
+                return;
+            }
+            else {
+                LOG.error("Operational child node information is not present");
+                return;
+            }
         }
         NodeBuilder haNodeBuilder = HwvtepHAUtil.getNodeBuilderForPath(dstPath);
         HwvtepGlobalAugmentationBuilder haBuilder = new HwvtepGlobalAugmentationBuilder();
+        Optional<Node> existingDstGlobalNodeOptional = Optional.empty();
+        try {
+            existingDstGlobalNodeOptional = tx.read(dstPath).get();
+        } catch (ExecutionException | InterruptedException e) {
+            LOG.error("READ Failed for {} during copyGlobalNode", dstPath);
+        }
+        Node existingDstGlobalNode = existingDstGlobalNodeOptional.isPresent()
+            ? existingDstGlobalNodeOptional.get() : null;
+        HwvtepGlobalAugmentation existingHAGlobalData = HwvtepHAUtil
+            .getGlobalAugmentationOfNode(existingDstGlobalNode);
+        if (Operational.class.equals(logicalDatastoreType)) {
+            globalAugmentationMerger.mergeOperationalData(
+                    haBuilder, existingHAGlobalData, srcGlobalAugmentation, dstPath);
+            globalNodeMerger.mergeOperationalData(haNodeBuilder,
+                    existingDstGlobalNode, srcGlobalNodeOptional.get(), dstPath);
 
-        Optional<Node> existingDstGlobalNodeOptional = tx.read(logicalDatastoreType, dstPath).checkedGet();
-        Node existingDstGlobalNode =
-                existingDstGlobalNodeOptional.isPresent() ? existingDstGlobalNodeOptional.get() : null;
-        HwvtepGlobalAugmentation existingHAGlobalData = HwvtepHAUtil.getGlobalAugmentationOfNode(existingDstGlobalNode);
-
-        globalAugmentationMerger.mergeOperationalData(haBuilder, existingHAGlobalData, srcGlobalAugmentation, dstPath);
-        globalNodeMerger.mergeOperationalData(haNodeBuilder,
-                existingDstGlobalNode, srcGlobalNodeOptional.get(), dstPath);
-
-
-        if (OPERATIONAL == logicalDatastoreType) {
             haBuilder.setManagers(HwvtepHAUtil.buildManagersForHANode(srcGlobalNodeOptional.get(),
                     existingDstGlobalNodeOptional));
-            //Also update the manager section in config which helps in cluster reboot scenarios
-            haBuilder.getManagers().forEach((manager) -> {
-                InstanceIdentifier<Managers> managerIid = dstPath.augmentation(HwvtepGlobalAugmentation.class)
-                        .child(Managers.class, manager.key());
-                tx.put(CONFIGURATION, managerIid, manager, true);
-            });
 
+        } else {
+            globalAugmentationMerger.mergeConfigData(haBuilder, srcGlobalAugmentation, dstPath);
+            globalNodeMerger.mergeConfigData(haNodeBuilder, srcGlobalNodeOptional.get(), dstPath);
         }
+
         haBuilder.setDbVersion(srcGlobalAugmentation.getDbVersion());
-        haNodeBuilder.addAugmentation(HwvtepGlobalAugmentation.class, haBuilder.build());
+        haNodeBuilder.addAugmentation(haBuilder.build());
         Node haNode = haNodeBuilder.build();
-        if (OPERATIONAL == logicalDatastoreType) {
-            tx.merge(logicalDatastoreType, dstPath, haNode, true);
+        if (Operational.class.equals(logicalDatastoreType)) {
+            tx.mergeParentStructureMerge(dstPath, haNode);
         } else {
-            tx.put(logicalDatastoreType, dstPath, haNode, true);
+            tx.mergeParentStructurePut(dstPath, haNode);
         }
     }
 
-    @Override
-    public void copyPSNode(Optional<Node> srcPsNodeOptional,
+
+    public <D extends Datastore> void copyPSNode(Optional<Node> srcPsNodeOptional,
                            InstanceIdentifier<Node> srcPsPath,
                            InstanceIdentifier<Node> dstPsPath,
                            InstanceIdentifier<Node> dstGlobalPath,
-                           LogicalDatastoreType logicalDatastoreType,
-                           ReadWriteTransaction tx) throws ReadFailedException {
-        if (!srcPsNodeOptional.isPresent() && logicalDatastoreType == CONFIGURATION) {
-            Futures.addCallback(tx.read(logicalDatastoreType, srcPsPath), new FutureCallback<Optional<Node>>() {
+                           Class<D> logicalDatastoreType,
+                           TypedReadWriteTransaction<D> tx) {
+        if (!srcPsNodeOptional.isPresent() && Configuration.class.equals(logicalDatastoreType)) {
+            Futures.addCallback(tx.read(srcPsPath), new FutureCallback<Optional<Node>>() {
                 @Override
                 public void onSuccess(Optional<Node> nodeOptional) {
                     HAJobScheduler.getInstance().submitJob(() -> {
-                        try {
-                            ReadWriteTransaction tx1 = new BatchedTransaction();
-                            if (nodeOptional.isPresent()) {
-                                copyPSNode(nodeOptional,
-                                        srcPsPath, dstPsPath, dstGlobalPath, logicalDatastoreType, tx1);
-                            } else {
-                                /**
-                                 * Deleting node please refer @see #copyGlobalNode for explanation
-                                 */
-                                HwvtepHAUtil.deleteNodeIfPresent(tx1, logicalDatastoreType, dstPsPath);
-                            }
-                        } catch (ReadFailedException e) {
-                            LOG.error("Failed to read src node {}", srcPsNodeOptional.get());
+                        TypedReadWriteTransaction<D> tx1 = new BatchedTransaction(
+                            logicalDatastoreType);
+                        if (nodeOptional.isPresent()) {
+                            copyPSNode(nodeOptional,
+                                srcPsPath, dstPsPath, dstGlobalPath, logicalDatastoreType, tx1);
+                        } else {
+                            tx1.put(dstPsPath, new NodeBuilder().setNodeId(dstPsPath
+                                .firstKeyOf(Node.class).getNodeId()).build());
                         }
+
                     });
                 }
 
                 @Override
                 public void onFailure(Throwable throwable) {
                 }
-            });
+            }, MoreExecutors.directExecutor());
             return;
         }
         NodeBuilder dstPsNodeBuilder = HwvtepHAUtil.getNodeBuilderForPath(dstPsPath);
@@ -177,23 +138,61 @@ public class NodeCopier implements INodeCopier {
 
         PhysicalSwitchAugmentation srcPsAugmenatation =
                 srcPsNodeOptional.get().augmentation(PhysicalSwitchAugmentation.class);
-
-        Node existingDstPsNode = HwvtepHAUtil.readNode(tx, logicalDatastoreType, dstPsPath);
+        Node existingDstPsNode = null;
+        try {
+            existingDstPsNode = HwvtepHAUtil.readNode(tx, dstPsPath);
+        } catch (ExecutionException | InterruptedException e) {
+            LOG.error("NodeCopier Read Failed for Node:{}", dstPsPath);
+        }
         PhysicalSwitchAugmentation existingDstPsAugmentation =
                 HwvtepHAUtil.getPhysicalSwitchAugmentationOfNode(existingDstPsNode);
-        if (OPERATIONAL == logicalDatastoreType) {
+        mergeOpManagedByAttributes(srcPsAugmenatation, dstPsAugmentationBuilder, dstGlobalPath);
+        if (Operational.class.equals(logicalDatastoreType)) {
             psAugmentationMerger.mergeOperationalData(dstPsAugmentationBuilder, existingDstPsAugmentation,
                     srcPsAugmenatation, dstPsPath);
             psNodeMerger.mergeOperationalData(dstPsNodeBuilder, existingDstPsNode, srcPsNodeOptional.get(), dstPsPath);
+            dstPsNodeBuilder.addAugmentation(dstPsAugmentationBuilder.build());
+            Node dstPsNode = dstPsNodeBuilder.build();
+            tx.mergeParentStructureMerge(dstPsPath, dstPsNode);
         } else {
+            /* Below Change done to rerduce the side of tx.put() generated here.
+            1. Check if child node already exists in config-topo.
+            2. If not present, then construct Child ps-node with augmentation data only and do tx.put(node).
+            Followed by, then tx.put(termination-points) for each of termination-points present in parent ps-node.
+            3. If present, then construct augmentation data and do tx.put(augmentation) then followed by
+            tx.put(termination-points) for each of termination-points present in parent ps-node.
+             */
+            String dstNodeName = dstPsNodeBuilder.getNodeId().getValue();
             psAugmentationMerger.mergeConfigData(dstPsAugmentationBuilder, srcPsAugmenatation, dstPsPath);
+            try {
+                boolean isEntryExists = tx.exists(dstPsPath).get();
+                if (isEntryExists) {
+                    LOG.info("Destination PS Node: {} already exists in config-topo.", dstNodeName);
+                    InstanceIdentifier<PhysicalSwitchAugmentation> dstPsAugPath =
+                        dstPsPath.augmentation(PhysicalSwitchAugmentation.class);
+                    tx.put(dstPsAugPath, dstPsAugmentationBuilder.build());
+                } else {
+                    LOG.info("Destination PS Node: {} doesn't still exist in config-topo.",
+                        dstNodeName);
+                    dstPsNodeBuilder.addAugmentation(dstPsAugmentationBuilder.build());
+                    Node dstPsNode = dstPsNodeBuilder.build();
+                    tx.put(dstPsPath, dstPsNode);
+                }
+            } catch (InterruptedException | ExecutionException e) {
+                LOG.error("Error While checking Existing on Node {} in config-topo", dstPsPath);
+            }
             psNodeMerger.mergeConfigData(dstPsNodeBuilder, srcPsNodeOptional.get(), dstPsPath);
-        }
-        mergeOpManagedByAttributes(srcPsAugmenatation, dstPsAugmentationBuilder, dstGlobalPath);
 
-        dstPsNodeBuilder.addAugmentation(PhysicalSwitchAugmentation.class, dstPsAugmentationBuilder.build());
-        Node dstPsNode = dstPsNodeBuilder.build();
-        tx.merge(logicalDatastoreType, dstPsPath, dstPsNode, true);
+            if (dstPsNodeBuilder.getTerminationPoint() != null) {
+                dstPsNodeBuilder.getTerminationPoint().values().forEach(terminationPoint -> {
+                    InstanceIdentifier<TerminationPoint> terminationPointPath =
+                        dstPsPath.child(TerminationPoint.class, terminationPoint.key());
+                    tx.put(terminationPointPath, terminationPoint);
+                    LOG.trace("Destination PS Node: {} updated with termination-point : {}",
+                        dstNodeName, terminationPoint.key());
+                });
+            }
+        }
         LOG.debug("Copied {} physical switch node from {} to {}", logicalDatastoreType, srcPsPath, dstPsPath);
     }