X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=renderers%2Fofoverlay%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fgroupbasedpolicy%2Frenderer%2Fofoverlay%2FOfWriter.java;h=4eb99dbfe96d4b0471fe5c3390b08b102aa462f4;hb=41c7620f8b7f2d5bb6ee18ad4e274a5b1d25ad6d;hp=ecbde4453f59b1176d8c0568bd0e3f25c58d843f;hpb=5cf357d93d0ecab7b5746e2f4e234472b6e5ce6a;p=groupbasedpolicy.git diff --git a/renderers/ofoverlay/src/main/java/org/opendaylight/groupbasedpolicy/renderer/ofoverlay/OfWriter.java b/renderers/ofoverlay/src/main/java/org/opendaylight/groupbasedpolicy/renderer/ofoverlay/OfWriter.java index ecbde4453..4eb99dbfe 100755 --- a/renderers/ofoverlay/src/main/java/org/opendaylight/groupbasedpolicy/renderer/ofoverlay/OfWriter.java +++ b/renderers/ofoverlay/src/main/java/org/opendaylight/groupbasedpolicy/renderer/ofoverlay/OfWriter.java @@ -13,6 +13,7 @@ import static org.opendaylight.groupbasedpolicy.renderer.ofoverlay.flow.FlowUtil import javax.annotation.Nullable; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -29,6 +30,8 @@ import com.google.common.collect.Sets; import com.google.common.util.concurrent.CheckedFuture; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.MoreExecutors; + import org.opendaylight.controller.md.sal.binding.api.DataBroker; import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction; import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType; @@ -39,6 +42,8 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.Fl import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table; import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder; import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow; +import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder; +import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder; import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupId; import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupTypes; import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.BucketsBuilder; @@ -167,6 +172,9 @@ public class OfWriter { Preconditions.checkNotNull(flow); Preconditions.checkNotNull(nodeId); + if (flow.getMatch() == null) { + flow = new FlowBuilder(flow).setMatch(new MatchBuilder().build()).build(); + } TableBuilder tableBuilder = this.getTableBuilderForNode(nodeId, tableId); // transforming List to Set (with customized equals/hashCode) to eliminate duplicate entries List flows = tableBuilder.getFlow(); @@ -182,7 +190,18 @@ public class OfWriter { } } - public void commitToDataStore(DataBroker dataBroker) { + /** + * Update groups and flows on every node + * Only flows created by gbp - which are present in actualFlowMap - can be removed. It ensures no other flows + * are deleted + * Newly created flows are returned and will be used as actual in next update + * + * @param actualFlowMap map of flows which are currently present on all nodes + * @return map of newly created flows. These flows will be "actual" in next update + */ + public Map, TableBuilder> commitToDataStore(DataBroker dataBroker, + Map, TableBuilder> actualFlowMap) { + Map, TableBuilder> actualFlows = new HashMap<>(); if (dataBroker != null) { for (NodeId nodeId : groupIdsByNode.keySet()) { @@ -193,47 +212,51 @@ public class OfWriter { } } - for (Map.Entry, TableBuilder> entry : flowMap.entrySet()) { + for (Map.Entry, TableBuilder> newEntry : flowMap.entrySet()) { try { - /* - * Get the currently configured flows for - * this table. - */ - updateFlowTable(dataBroker, entry); + // Get actual flows on the same node/table + Map.Entry, TableBuilder> actualEntry = null; + for (Map.Entry, TableBuilder> a : actualFlowMap.entrySet()) { + if (a.getKey().equals(newEntry.getKey())) { + actualEntry = a; + } + } + // Get the currently configured flows for this table + updateFlowTable(dataBroker, newEntry, actualEntry); + actualFlows.put(newEntry.getKey(), newEntry.getValue()); } catch (Exception e) { - LOG.warn("Couldn't read flow table {}", entry.getKey()); + LOG.warn("Couldn't read flow table {}", newEntry.getKey()); } } } + return actualFlows; } - private void updateFlowTable(DataBroker dataBroker, - Map.Entry, TableBuilder> entry) + private void updateFlowTable(DataBroker dataBroker, Map.Entry, TableBuilder> desiredFlowMap, + Map.Entry, TableBuilder> actualFlowMap) throws ExecutionException, InterruptedException { - // flows to update - Set update = new HashSet<>(entry.getValue().getFlow()); - // flows currently in the table - Set curr = new HashSet<>(); - final InstanceIdentifier tableIid = entry.getKey(); - ReadWriteTransaction t = dataBroker.newReadWriteTransaction(); - Optional
r = t.read(LogicalDatastoreType.CONFIGURATION, tableIid).get(); - - if (r.isPresent()) { - Table currentTable = r.get(); - curr = new HashSet<>(currentTable.getFlow()); + // Actual state + List actualFlows = new ArrayList<>(); + if (actualFlowMap != null && actualFlowMap.getValue() != null) { + actualFlows = actualFlowMap.getValue().getFlow(); } + // New state + List desiredFlows = new ArrayList<>(desiredFlowMap.getValue().getFlow()); // Sets with custom equivalence rules - Set> oldFlows = new HashSet<>( - Collections2.transform(curr, EquivalenceFabric.FLOW_WRAPPER_FUNCTION)); - Set> updatedFlows = new HashSet<>( - Collections2.transform(update, EquivalenceFabric.FLOW_WRAPPER_FUNCTION)); + Set> wrappedActualFlows = new HashSet<>( + Collections2.transform(actualFlows, EquivalenceFabric.FLOW_WRAPPER_FUNCTION)); + Set> wrappedDesiredFlows = new HashSet<>( + Collections2.transform(desiredFlows, EquivalenceFabric.FLOW_WRAPPER_FUNCTION)); - // what is still there but was not updated, needs to be deleted - Sets.SetView> deletions = Sets.difference(oldFlows, updatedFlows); - // new flows (they were not there before) - Sets.SetView> additions = Sets.difference(updatedFlows, oldFlows); + // All gbp flows which are not updated will be removed + Sets.SetView> deletions = Sets.difference(wrappedActualFlows, wrappedDesiredFlows); + // New flows (they were not there before) + Sets.SetView> additions = Sets.difference(wrappedDesiredFlows, wrappedActualFlows); + + final InstanceIdentifier
tableIid = desiredFlowMap.getKey(); + ReadWriteTransaction t = dataBroker.newReadWriteTransaction(); if (!deletions.isEmpty()) { for (Equivalence.Wrapper wf : deletions) { @@ -248,6 +271,9 @@ public class OfWriter { for (Equivalence.Wrapper wf : additions) { Flow f = wf.get(); if (f != null) { + if (f.getMatch() == null) { + f = new FlowBuilder(f).setMatch(new MatchBuilder().build()).build(); + } t.put(LogicalDatastoreType.CONFIGURATION, FlowUtils.createFlowPath(tableIid, f.getId()), f, true); } @@ -265,7 +291,7 @@ public class OfWriter { public void onSuccess(Void result) { LOG.debug("Flow table {} updated.", tableIid); } - }); + }, MoreExecutors.directExecutor()); } private void updateGroups(DataBroker dataBroker, final NodeId nodeId) @@ -343,7 +369,7 @@ public class OfWriter { public void onSuccess(Void result) { LOG.debug("Group table on node {} updated.", nodeId); } - }); + }, MoreExecutors.directExecutor()); } }