package org.opendaylight.groupbasedpolicy.renderer.ofoverlay;
+import static org.opendaylight.groupbasedpolicy.util.DataStoreHelper.readFromDs;
+
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.groupbasedpolicy.endpoint.EpRendererAugmentation;
import org.opendaylight.groupbasedpolicy.renderer.ofoverlay.node.SwitchManager;
import org.opendaylight.groupbasedpolicy.resolver.EgKey;
+import org.opendaylight.groupbasedpolicy.resolver.IndexedTenant;
import org.opendaylight.groupbasedpolicy.util.DataStoreHelper;
import org.opendaylight.groupbasedpolicy.util.IidFactory;
import org.opendaylight.groupbasedpolicy.util.SetUtils;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.common.rev140421.ConditionName;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.common.rev140421.EndpointGroupId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.common.rev140421.L2BridgeDomainId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.common.rev140421.TenantId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.Endpoints;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.RegisterEndpointInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.RegisterL3PrefixEndpointInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoint.fields.L3Address;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoint.fields.L3AddressBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoint.fields.L3AddressKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoints.Endpoint;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoints.EndpointBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoints.EndpointKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoints.EndpointL3;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoints.EndpointL3Builder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoints.EndpointL3Prefix;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoints.EndpointL3PrefixBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.ofoverlay.rev140528.EndpointLocation.LocationType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.ofoverlay.rev140528.OfOverlayConfig.LearningMode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.ofoverlay.rev140528.OfOverlayContext;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.ofoverlay.rev140528.OfOverlayContextBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.ofoverlay.rev140528.OfOverlayContextInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.ofoverlay.rev140528.OfOverlayL3Context;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.ofoverlay.rev140528.OfOverlayL3ContextBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.policy.rev140421.tenants.Tenant;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.policy.rev140421.tenants.tenant.L2BridgeDomain;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.policy.rev140421.tenants.tenant.L3Context;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
import com.google.common.base.Function;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.Collections2;
import com.google.common.collect.ImmutableList;
* In order to render the policy, we need to be able to efficiently enumerate
* all endpoints on a particular switch and also all the switches containing
* each particular endpoint group
-
*/
public class EndpointManager implements AutoCloseable, DataChangeListener {
private static final Logger LOG = LoggerFactory.getLogger(EndpointManager.class);
private final static InstanceIdentifier<Nodes> nodesIid = InstanceIdentifier.builder(Nodes.class).build();
- private static final InstanceIdentifier<Endpoint> endpointsIid = InstanceIdentifier.builder(Endpoints.class)
- .child(Endpoint.class)
- .build();
+
final ListenerRegistration<DataChangeListener> listenerReg;
private final ConcurrentHashMap<EpKey, Endpoint> endpoints = new ConcurrentHashMap<>();
+ private final ConcurrentHashMap<EpKey, Endpoint> externalEndpointsWithoutLocation = new ConcurrentHashMap<>();
private final ConcurrentHashMap<NodeId, ConcurrentMap<EgKey, Set<EpKey>>> endpointsByGroupByNode = new ConcurrentHashMap<>();
private final ConcurrentHashMap<NodeId, Set<EpKey>> endpointsByNode = new ConcurrentHashMap<>();
this.dataProvider = dataProvider;
EndpointRpcRegistry.register(dataProvider, rpcRegistry, endpointRpcAug);
if (dataProvider != null) {
- listenerReg = dataProvider.registerDataChangeListener(LogicalDatastoreType.OPERATIONAL, endpointsIid, this,
- DataChangeScope.ONE);
+ listenerReg = dataProvider.registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ IidFactory.endpointsIid(), this, DataChangeScope.SUBTREE);
} else
listenerReg = null;
if (ebn == null)
return Collections.emptyList();
return ImmutableList.copyOf(Collections2.transform(ebn, indexTransform));
+ }
+ public synchronized Collection<Endpoint> getExternalEndpointsWithoutLoc() {
+ return ImmutableList.copyOf(externalEndpointsWithoutLocation.values());
}
+ public synchronized Endpoint getExternalEndpointWithoutLoc(EpKey epKey) {
+ return externalEndpointsWithoutLocation.get(epKey);
+ }
/**
* Get the endpoint object for the given key
*
* XXX: alagalah I wanted to avoid adding another Map. Due to not being able to
* get to the granularity of the L3PrefixEndpoint List within the Endpoints container
* in the datastore, we have to pull all the Endpoints. If this causes performance issues
- * we may have to revisit a Map in updateEndpoint but note, this Endpoint doesn't have a location
+ * we may have to revisit a Map in updateEndpoint but note, this Endpoint doesn't have a
+ * location
* and hence we would have to process it outside the null location check.
*/
/*
- * XXX: alagalah See how much nicer it would have been if we could have just called this L3PrefixEndpoint and
- * refactored Endpoints to be L2Endpoint, L3Endpoint, L3PrefixEndpoint? Plurality reads sanely with
- * that but not with this. Naming is important, and backwards compatibility sometimes isn't worth it.
+ * XXX: alagalah See how much nicer it would have been if we could have just called this
+ * L3PrefixEndpoint and
+ * refactored Endpoints to be L2Endpoint, L3Endpoint, L3PrefixEndpoint? Plurality reads
+ * sanely with
+ * that but not with this. Naming is important, and backwards compatibility sometimes isn't
+ * worth it.
*/
if (dataProvider == null) {
LOG.error("Null DataProvider in EndpointManager getEndpointsL3Prefix");
LOG.warn("No Endpoints present in datastore.");
return null;
}
- if (endpoints.get().getEndpointL3Prefix()==null) {
+ if (endpoints.get().getEndpointL3Prefix() == null) {
LOG.warn("No L3 Prefix Endpoints present in datastore.");
return null;
}
return endpoints.get().getEndpointL3Prefix();
}
+
/**
* Set the learning mode to the specified value
*
return ImmutableList.copyOf(Collections2.transform(ebg, indexTransform));
}
+ public synchronized Collection<Endpoint> getExtEpsNoLocForGroup(final EgKey eg) {
+
+ return ImmutableSet.copyOf(Collections2.filter(externalEndpointsWithoutLocation.values(), new Predicate<Endpoint>() {
+
+ @Override
+ public boolean apply(Endpoint input) {
+ Set<EndpointGroupId> epgIds = new HashSet<>();
+ if (input.getEndpointGroup() != null) {
+ epgIds.add(input.getEndpointGroup());
+ }
+ if (input.getEndpointGroups() != null) {
+ epgIds.addAll(input.getEndpointGroups());
+ }
+ if (epgIds.isEmpty()) {
+ LOG.error("No EPGs for {}. This is not a valid Endpoint.",input.getKey());
+ return false;
+ }
+ return (epgIds.contains(eg.getEgId()));
+ }
+
+ }));
+ }
/**
* Get the effective list of conditions that apply to a particular endpoint.
* This could include additional conditions over the condition labels
if (dao instanceof Endpoint) {
updateEndpoint(null, (Endpoint) dao);
} else if (dao instanceof EndpointL3) {
- updateEndpointL3(null,(EndpointL3)dao);
+ updateEndpointL3(null, (EndpointL3) dao);
} else if (dao instanceof EndpointL3Prefix) {
-
+ continue;
}
}
for (InstanceIdentifier<?> iid : change.getRemovedPaths()) {
if (old == null) {
continue;
}
- if(old instanceof Endpoint) {
+ if (old instanceof Endpoint) {
updateEndpoint((Endpoint) old, null);
} else if (old instanceof EndpointL3) {
- continue;
+ updateEndpointL3((EndpointL3) old, null);
} else if (old instanceof EndpointL3Prefix) {
continue;
}
Map<InstanceIdentifier<?>, DataObject> dao = change.getUpdatedData();
for (Entry<InstanceIdentifier<?>, DataObject> entry : dao.entrySet()) {
if (entry.getValue() instanceof Endpoint) {
- DataObject old = change.getOriginalData().get(entry.getKey());
- Endpoint oldEp = null;
- if (old != null && old instanceof Endpoint)
- oldEp = (Endpoint) old;
+ Endpoint oldEp = (Endpoint) change.getOriginalData().get(entry.getKey());
updateEndpoint(oldEp, (Endpoint) entry.getValue());
} else if (entry.getValue() instanceof EndpointL3) {
- continue;
+ EndpointL3 oldEp3 = (EndpointL3)change.getOriginalData().get(entry.getKey());
+ updateEndpointL3(oldEp3, (EndpointL3) entry.getValue());
} else if (entry.getValue() instanceof EndpointL3Prefix) {
continue;
}
}
};
- private boolean validEp(Endpoint endpoint) {
+ private boolean isValidEp(Endpoint endpoint) {
return (endpoint != null && endpoint.getTenant() != null
&& (endpoint.getEndpointGroup() != null || endpoint.getEndpointGroups() != null)
&& endpoint.getL2Context() != null && endpoint.getMacAddress() != null);
}
+ private boolean isValidL3Ep(EndpointL3 endpoint) {
+ return (endpoint != null && endpoint.getTenant() != null
+ && (endpoint.getEndpointGroup() != null || endpoint.getEndpointGroups() != null)
+ && endpoint.getL3Context() != null && endpoint.getIpAddress() != null);
+ }
+
private NodeId getLocation(Endpoint endpoint) {
- if (!validEp(endpoint))
+ if (!isValidEp(endpoint))
return null;
OfOverlayContext context = endpoint.getAugmentation(OfOverlayContext.class);
if (context != null)
}
private EpKey getEpKey(Endpoint endpoint) {
- if (!validEp(endpoint))
+ if (!isValidEp(endpoint))
return null;
return new EpKey(endpoint.getL2Context(), endpoint.getMacAddress());
}
public EgKey getEgKey(Endpoint endpoint) {
- if (!validEp(endpoint))
+ if (!isValidEp(endpoint))
return null;
return new EgKey(endpoint.getTenant(), endpoint.getEndpointGroup());
}
/**
* Update the endpointL3 indexes. Set newEp to null to remove.
*/
- protected synchronized void updateEndpointL3(EndpointL3 oldEp, EndpointL3 newEp) {
+ protected synchronized void updateEndpointL3(EndpointL3 oldL3Ep, EndpointL3 newL3Ep) {
+
+ // if (oldEp == null && newEp != null ) {
+ if (newL3Ep != null) {
+ // new L3Endpoint
+ LOG.trace("Processing L3Endpoint");
+ if (isValidL3Ep(newL3Ep)) {
+ if (newL3Ep.getMacAddress() == null
+ && getLocationType(newL3Ep) != null
+ && getLocationType(newL3Ep).equals(LocationType.External)) {
+ if (newL3Ep.getNetworkContainment() != null) {
+ EndpointL3 l3Ep = updateEndpointL3MacAddress(newL3Ep);
+ ReadWriteTransaction rwTx = dataProvider.newReadWriteTransaction();
+ Endpoint newL2Ep = addEndpointFromL3Endpoint(l3Ep, rwTx);
+ l3Ep=updateL3EndpointL2Context(l3Ep, newL2Ep.getL2Context());
+ if (l3Ep == null) {
+ return;
+ }
+ rwTx.put(LogicalDatastoreType.OPERATIONAL,
+ IidFactory.endpointL3Iid(l3Ep.getL3Context(), l3Ep.getIpAddress()), l3Ep, true);
+ DataStoreHelper.submitToDs(rwTx);
+ return;
+ } else {
+ LOG.error("Cannot generate MacAddress for L3Endpoint {}. NetworkContainment is null.", newL3Ep);
+ return;
+ }
+ }
+ } else {
+ LOG.error("{} is not a valid L3 Endpoint", newL3Ep);
+ return;
+ }
+ return;
+ }
+ if (oldL3Ep != null && newL3Ep == null) {
+ // deleted L3Endpoint
+ return;
+ }
+
+ if (oldL3Ep != null && newL3Ep != null) {
+ LOG.trace("Updating L3 Endpoint {}");
+ // updated Endpoint
+ return;
+ }
+ if (newL3Ep.getAugmentation(OfOverlayL3Context.class) == null) {
+ LOG.info("L3Endpoint updatbut no augmentation information");
+ return;
+ }
}
+
+ private EndpointL3 updateL3EndpointL2Context(EndpointL3 l3Ep, L2BridgeDomainId l2BdId) {
+
+ if(l3Ep == null || l2BdId == null) {
+ LOG.warn("L3Endpoint {} or L2BridgeDomain {} was null in updateEndpointL3L2Context.",l3Ep,l2BdId);
+ return null;
+ }
+ return new EndpointL3Builder(l3Ep)
+ .addAugmentation(OfOverlayL3Context.class, l3Ep.getAugmentation(OfOverlayL3Context.class))
+ .setL2Context(l2BdId)
+ .build();
+
+ }
+
+ private LocationType getLocationType(EndpointL3 epL3) {
+ if (epL3 == null
+ || epL3.getAugmentation(OfOverlayL3Context.class) == null
+ || epL3.getAugmentation(OfOverlayL3Context.class).getLocationType() == null ) {
+ return null;
+ }
+ return epL3.getAugmentation(OfOverlayL3Context.class).getLocationType();
+ }
+
+ private Endpoint addEndpointFromL3Endpoint(EndpointL3 l3Ep, ReadWriteTransaction rwTx) {
+ // Make an indexed tenant and resolveL2Bridgedomain from L3EP containment if not L3
+ // (instanceof)
+ OfOverlayL3Context ofL3Ctx = l3Ep.getAugmentation(OfOverlayL3Context.class);
+ OfOverlayContext ofCtx = getOfOverlayContextFromL3Endpoint(ofL3Ctx);
+ if (l3Ep.getNetworkContainment() instanceof L3Context) {
+ LOG.error("Cannot generate Endpoint from EndpointL3, network containment is L3Context.");
+ rwTx.cancel();
+ return null;
+ }
+
+ IndexedTenant indexedTenant;
+ Optional<Tenant> tenant = readFromDs(LogicalDatastoreType.CONFIGURATION, IidFactory.tenantIid(l3Ep.getTenant()),
+ rwTx);
+ if (tenant.isPresent()) {
+ indexedTenant = new IndexedTenant(tenant.get());
+ } else {
+ LOG.error("Could not find tenant {} for EndpointL3 {}", l3Ep.getTenant(), l3Ep);
+ rwTx.cancel();
+ return null;
+ }
+ List<L3Address> l3Address = new ArrayList<>();
+ l3Address.add(new L3AddressBuilder()
+ .setIpAddress(l3Ep.getIpAddress())
+ .setL3Context(l3Ep.getL3Context())
+ .setKey(new L3AddressKey(l3Ep.getIpAddress(),l3Ep.getL3Context()))
+ .build());
+ L2BridgeDomain l2Bd = indexedTenant.resolveL2BridgeDomain(l3Ep.getNetworkContainment());
+ Endpoint ep = new EndpointBuilder().setKey(new EndpointKey(l2Bd.getId(), l3Ep.getMacAddress()))
+ .setMacAddress(l3Ep.getMacAddress())
+ .setL2Context(l2Bd.getId())
+ .setEndpointGroups(l3Ep.getEndpointGroups())
+ .setTenant(l3Ep.getTenant())
+ .setL3Address(l3Address)
+ .setCondition(l3Ep.getCondition())
+ .setNetworkContainment(l3Ep.getNetworkContainment())
+ .addAugmentation(OfOverlayContext.class, ofCtx)
+ .build();
+ rwTx.put(LogicalDatastoreType.OPERATIONAL, IidFactory.endpointIid(ep.getL2Context(), ep.getMacAddress()), ep);
+ return ep;
+ }
+
+ private OfOverlayContext getOfOverlayContextFromL3Endpoint(OfOverlayL3Context ofL3Ctx) {
+ OfOverlayContextBuilder ofBuilder = new OfOverlayContextBuilder();
+ if (ofL3Ctx.getInterfaceId() != null) {
+ ofBuilder.setInterfaceId(ofL3Ctx.getInterfaceId());
+ }
+ if (ofL3Ctx.getLocationType() != null) {
+ ofBuilder.setLocationType(ofL3Ctx.getLocationType());
+ }
+ if (ofL3Ctx.getNodeConnectorId() != null) {
+ ofBuilder.setNodeConnectorId(ofL3Ctx.getNodeConnectorId());
+ }
+ if (ofL3Ctx.getNodeId() != null) {
+ ofBuilder.setNodeId(ofL3Ctx.getNodeId());
+ }
+ if (ofL3Ctx.getPortName() != null) {
+ ofBuilder.setPortName(ofL3Ctx.getPortName());
+ }
+
+ return ofBuilder.build();
+ }
+
+ private EndpointL3 updateEndpointL3MacAddress(EndpointL3 endpointL3) {
+
+ EndpointL3Builder epL3Builder = new EndpointL3Builder(endpointL3).addAugmentation(OfOverlayL3Context.class,
+ endpointL3.getAugmentation(OfOverlayL3Context.class))
+ .setMacAddress(getMacAddress(endpointL3))
+ .setTimestamp(System.currentTimeMillis());
+
+ return epL3Builder.build();
+ }
+
+ private MacAddress getMacAddress(EndpointL3 endpointL3) {
+ return PolicyManager.getExternaMacAddress();
+ }
+
/**
* Update the endpoint indexes. Set newEp to null to remove.
*/
boolean notifyOldEg = false;
boolean notifyNewEg = false;
- // When newLoc and oldLoc are null there is nothing to do
+ /*
+ * When newLoc and oldLoc are null for Internal ports there is nothing to do
+ */
if (newLoc == null && oldLoc == null) {
- return;
+ if ((oldEp != null && isInternal(oldEp))
+ || (newEp !=null && isInternal(newEp))) {
+ return;
+ } else {
+ // Maintain "external endpoints" map
+ if (newEp != null) {
+ externalEndpointsWithoutLocation.put(newEpKey, newEp);
+ } else {
+ externalEndpointsWithoutLocation.remove(oldEpKey);
+ }
+ return; // No more processing for Externals.
+ }
}
Set<EndpointGroupId> newEpgIds = new HashSet<EndpointGroupId>();
/*
* New endpoint with location information
*/
- if (oldEp == null && newEp != null && newLoc != null) {
+ if (oldEp == null && newEp != null && newLoc != null ) {
// Update endpointsByNode
if (endpointsByNode.get(newLoc) == null) {
// TODO: alagalah cleaner way with checking epsNode
}
}
+ private boolean isExternal(Endpoint ep) {
+ return !isInternal(ep);
+ }
+
+ public static boolean isInternal(Endpoint ep) {
+ Preconditions.checkNotNull(ep);
+ OfOverlayContext ofc = ep.getAugmentation(OfOverlayContext.class);
+ if (ofc == null) return true; // Default is internal
+ if (ofc.getLocationType() == null || ofc.getLocationType().equals(LocationType.Internal)) return true; // Default is internal
+ return false;
+ }
+
// A wrapper class around node, nodeConnector info so we can pass a final
// object inside OnSuccess anonymous inner class
private static class NodeInfo {
package org.opendaylight.groupbasedpolicy.renderer.ofoverlay.flow;
+import static com.google.common.base.Preconditions.checkNotNull;
import static org.opendaylight.groupbasedpolicy.renderer.ofoverlay.flow.FlowUtils.ARP;
import static org.opendaylight.groupbasedpolicy.renderer.ofoverlay.flow.FlowUtils.IPv4;
import static org.opendaylight.groupbasedpolicy.renderer.ofoverlay.flow.FlowUtils.IPv6;
import static org.opendaylight.groupbasedpolicy.renderer.ofoverlay.flow.FlowUtils.outputAction;
import static org.opendaylight.groupbasedpolicy.renderer.ofoverlay.flow.FlowUtils.setDlDstAction;
import static org.opendaylight.groupbasedpolicy.renderer.ofoverlay.flow.FlowUtils.setDlSrcAction;
+import static org.opendaylight.groupbasedpolicy.util.DataStoreHelper.readFromDs;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Set;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.binding.api.ReadTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.groupbasedpolicy.endpoint.EpKey;
import org.opendaylight.groupbasedpolicy.renderer.ofoverlay.OfContext;
import org.opendaylight.groupbasedpolicy.renderer.ofoverlay.PolicyManager.FlowMap;
import org.opendaylight.groupbasedpolicy.resolver.EgKey;
import org.opendaylight.groupbasedpolicy.resolver.PolicyInfo;
import org.opendaylight.groupbasedpolicy.resolver.TenantUtils;
+import org.opendaylight.groupbasedpolicy.util.IidFactory;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.IpAddress;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Ipv4Prefix;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Ipv6Prefix;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.common.rev140421.TenantId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.Endpoints;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoint.fields.L3Address;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoint.l3.prefix.fields.EndpointL3Gateways;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoints.Endpoint;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoints.EndpointL3;
import org.opendaylight.yang.gen.v1.urn.opendaylight.groupbasedpolicy.endpoint.rev140421.endpoints.EndpointL3Key;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowjava.nx.match.rev140421.NxmNxReg6;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowjava.nx.match.rev140421.NxmNxReg7;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.overlay.rev150105.TunnelTypeVxlan;
-import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
+import com.google.common.base.Strings;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.SetMultimap;
import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.CheckedFuture;
/**
* Manage the table that maps the destination address to the next hop for the
*/
public static final MacAddress ROUTER_MAC = new MacAddress("88:f0:31:b5:12:b5");
public static final MacAddress MULTICAST_MAC = new MacAddress("01:00:00:00:00:00");
+ public static final Integer BASE_L3_PRIORITY = 100;
public DestinationMapper(OfContext ctx, short tableId) {
super(ctx);
- this.TABLE_ID=tableId;
+ this.TABLE_ID = tableId;
}
Map<TenantId, HashSet<Subnet>> subnetsByTenant = new HashMap<TenantId, HashSet<Subnet>>();
@Override
public void sync(NodeId nodeId, PolicyInfo policyInfo, FlowMap flowMap) throws Exception {
- TenantId currentTenant=null;
+ TenantId currentTenant = null;
flowMap.writeFlow(nodeId, TABLE_ID, dropFlow(Integer.valueOf(1), null));
Collection<EndpointL3Prefix> prefixEps = ctx.getEndpointManager().getEndpointsL3PrefixForTenant(currentTenant);
if (prefixEps != null) {
LOG.trace("DestinationMapper - Processing L3PrefixEndpoints");
+ for (EndpointL3Prefix prefixEp : prefixEps) {
+ Flow prefixFlow = createL3PrefixFlow(prefixEp, policyInfo, nodeId);
+ if (prefixFlow != null) {
+ flowMap.writeFlow(nodeId, TABLE_ID, prefixFlow);
+ LOG.trace("Wrote L3Prefix flow");
+ }
+ }
}
}
// set up next-hop destinations for all the endpoints in the endpoint
// group on the node
+ private Flow createL3PrefixFlow(EndpointL3Prefix prefixEp, PolicyInfo policyInfo, NodeId nodeId) throws Exception {
+ /*
+ * Priority: 100+lengthprefix
+ * Match: prefix, l3c, "mac address of router" ?
+ * Action:
+ * - set Reg2, Reg3 for L3Ep by L2Ep ?
+ * - if external,
+ * - Reg7: use switch location external port else punt for now
+ * - if internal
+ * - Reg7: grab L2Ep from L3Ep and use its location info
+ * - goto_table: POLENF (will check there for external on EP)
+ */
+
+ ReadOnlyTransaction rTx = ctx.getDataBroker().newReadOnlyTransaction();
+ // TODO Bug #3440 Target: Be - should support for more than first gateway.
+ EndpointL3Gateways l3Gateway = prefixEp.getEndpointL3Gateways().get(0);
+ Optional<EndpointL3> optL3Ep = readFromDs(LogicalDatastoreType.OPERATIONAL,
+ IidFactory.endpointL3Iid(l3Gateway.getL3Context(), l3Gateway.getIpAddress()), rTx);
+ if (!optL3Ep.isPresent()) {
+ LOG.error("createL3PrefixFlow - L3Endpoint gateway {} for L3Prefix {} not found.", l3Gateway, prefixEp);
+ return null;
+ }
+ EndpointL3 l3Ep = optL3Ep.get();
+ Optional<Endpoint> optL2Ep = readFromDs(LogicalDatastoreType.OPERATIONAL,
+ IidFactory.endpointIid(l3Ep.getL2Context(), l3Ep.getMacAddress()), rTx);
+ if (!optL2Ep.isPresent()) {
+ LOG.error("createL3PrefixFlow - L2Endpoint for L3Gateway {} not found.", l3Ep);
+ return null;
+ }
+ Endpoint l2Ep = optL2Ep.get();
+ EndpointFwdCtxOrdinals epFwdCtxOrds = OrdinalFactory.getEndpointFwdCtxOrdinals(ctx, policyInfo, l2Ep);
+
+ NetworkDomainId epNetworkContainment = getEPNetworkContainment(l2Ep);
+
+ MacAddress epDestMac = l2Ep.getMacAddress();
+ MacAddress destSubnetGatewayMac = l2Ep.getMacAddress();
+
+ ArrayList<Instruction> l3instructions = new ArrayList<>();
+ List<Action> applyActions = new ArrayList<>();
+ List<Action> l3ApplyActions = new ArrayList<>();
+
+ int order = 0;
+
+ Action setdEPG = nxLoadRegAction(NxmNxReg2.class, BigInteger.valueOf(epFwdCtxOrds.getEpgId()));
+ Action setdCG = nxLoadRegAction(NxmNxReg3.class, BigInteger.valueOf(epFwdCtxOrds.getCgId()));
+ Action setNextHop;
+ String nextHop=null;
+
+ OfOverlayContext ofc = l2Ep.getAugmentation(OfOverlayContext.class);
+ LocationType location;
+
+ if (ofc != null && ofc.getLocationType() != null) {
+ location = ofc.getLocationType();
+ } else if (ofc != null) {
+ // Augmentation, but using default location
+ location = LocationType.Internal;
+ } else {
+ LOG.info("createL3PrefixFlow - Endpoint {} had no augmentation.", l2Ep);
+ return null;
+ }
+
+ long portNum = -1;
+
+ if (location.equals(LocationType.Internal)) {
+ checkNotNull(ofc.getNodeConnectorId());
+ nextHop = ofc.getNodeConnectorId().getValue();
+ try {
+ portNum = getOfPortNum(ofc.getNodeConnectorId());
+ } catch (NumberFormatException ex) {
+ LOG.warn("Could not parse port number {}", ofc.getNodeConnectorId(), ex);
+ return null;
+ }
+
+ } else {
+ // External
+ Set<NodeConnectorId> externalPorts = ctx.getSwitchManager().getExternalPorts(nodeId);
+ checkNotNull(externalPorts);
+ for (NodeConnectorId externalPort : externalPorts) {
+ // TODO Bug #3440 Target: Be - should support for more than first external port.
+ nextHop = externalPort.getValue();
+ try {
+ portNum = getOfPortNum(externalPort);
+ } catch (NumberFormatException ex) {
+ LOG.warn("Could not parse port number {}", ofc.getNodeConnectorId(), ex);
+ return null;
+ }
+ continue;
+ }
+ }
+
+ if (Strings.isNullOrEmpty(nextHop)
+ || portNum == -1) {
+ LOG.error("createL3Prefix - Cannot find nodeConnectorId for {} for Prefix: ", l2Ep, prefixEp);
+ return null;
+ }
+ setNextHop = nxLoadRegAction(NxmNxReg7.class, BigInteger.valueOf(portNum));
+
+ Action setDlDst = setDlDstAction(epDestMac);
+ l3ApplyActions.add(setDlDst);
+
+ Action decTtl = decNwTtlAction();
+ l3ApplyActions.add(decTtl);
+
+ order += 1;
+ applyActions.add(setdEPG);
+ applyActions.add(setdCG);
+ applyActions.add(setNextHop);
+
+ applyActions.addAll(l3ApplyActions);
+ Instruction applyActionsIns = new InstructionBuilder().setOrder(order++)
+ .setInstruction(applyActionIns(applyActions.toArray(new Action[applyActions.size()])))
+ .build();
+
+ l3instructions.add(applyActionsIns);
+ Instruction gotoTable = new InstructionBuilder().setOrder(order++)
+ .setInstruction(gotoTableIns(ctx.getPolicyManager().getTABLEID_POLICY_ENFORCER()))
+ .build();
+ l3instructions.add(gotoTable);
+
+ Layer3Match m = null;
+ Long etherType = null;
+ String ikey = null;
+ Integer prefixLength=0;
+ if (prefixEp.getIpPrefix().getIpv4Prefix() != null) {
+ ikey = prefixEp.getIpPrefix().getIpv4Prefix().getValue();
+ etherType = IPv4;
+ prefixLength=Integer.valueOf(prefixEp.getIpPrefix().getIpv4Prefix().getValue().split("/")[1]);
+ m = new Ipv4MatchBuilder().setIpv4Destination(new Ipv4Prefix(ikey)).build();
+ } else if (prefixEp.getIpPrefix().getIpv6Prefix() != null) {
+ ikey = prefixEp.getIpPrefix().getIpv6Prefix().getValue();
+ etherType = IPv6;
+ /*
+ * This will result in flows with priority between 100-228, but since its matching on IPv6 prefix as well
+ * this shouldn't pose and issue, as the priority is more important within the address space of the matcher,
+ * even though technically flows are processed in priority order.
+ */
+
+ prefixLength=Integer.valueOf(prefixEp.getIpPrefix().getIpv6Prefix().getValue().split("/")[1]);
+ m = new Ipv6MatchBuilder().setIpv6Destination(new Ipv6Prefix(ikey)).build();
+ } else {
+ LOG.error("Endpoint has IPAddress that is not recognised as either IPv4 or IPv6.", prefixEp);
+ return null;
+ }
+
+ FlowId flowid = new FlowId(new StringBuilder().append(Integer.toString(epFwdCtxOrds.getL3Id()))
+ .append("|l3prefix|")
+ .append(ikey)
+ .append("|")
+ .append(destSubnetGatewayMac)
+ .append("|")
+ .append(nextHop)
+ .toString());
+ MatchBuilder mb = new MatchBuilder().setEthernetMatch(ethernetMatch(null, null, etherType));
+// MatchBuilder mb = new MatchBuilder();//.setLayer3Match(m);
+ addNxRegMatch(mb, RegMatch.of(NxmNxReg6.class, Long.valueOf(epFwdCtxOrds.getL3Id())));
+ FlowBuilder flowb = base().setId(flowid)
+ .setPriority(Integer.valueOf(BASE_L3_PRIORITY+prefixLength))
+ .setMatch(mb.build())
+ .setInstructions(new InstructionsBuilder().setInstruction(l3instructions).build());
+ return flowb.build();
+ }
+
private Flow createBroadcastFlow(EndpointFwdCtxOrdinals epOrd) {
FlowId flowId = new FlowId("broadcast|" + epOrd.getFdId());
- MatchBuilder mb = new MatchBuilder()
- .setEthernetMatch(new EthernetMatchBuilder()
- .setEthernetDestination(new EthernetDestinationBuilder().
- setAddress(MULTICAST_MAC)
- .setMask(MULTICAST_MAC).build())
- .build());
+ MatchBuilder mb = new MatchBuilder().setEthernetMatch(new EthernetMatchBuilder().setEthernetDestination(
+ new EthernetDestinationBuilder().setAddress(MULTICAST_MAC).setMask(MULTICAST_MAC).build()).build());
addNxRegMatch(mb, RegMatch.of(NxmNxReg5.class, Long.valueOf(epOrd.getFdId())));
FlowBuilder flowb = base().setPriority(Integer.valueOf(140))
Subnet destSubnet = null;
HashSet<Subnet> subnets = getSubnets(destEp.getTenant());
if (subnets == null) {
- LOG.trace("No subnets in tenant {}", destL3Address.getIpAddress());
+ LOG.trace("No subnets in tenant {}", destEp.getTenant());
return null;
}
NetworkDomainId epNetworkContainment = getEPNetworkContainment(destEp);
Subnet destSubnet = null;
HashSet<Subnet> subnets = getSubnets(destEp.getTenant());
if (subnets == null) {
- LOG.trace("No subnets in tenant {}", destL3Address.getIpAddress());
+ LOG.trace("No subnets in tenant {}", destEp.getTenant());
return null;
}
NetworkDomainId epNetworkContainment = getEPNetworkContainment(destEp);
return localSubnets;
}
- /**
- * Reads data from datastore as synchronous call.
- *
- * @return {@link Optional#isPresent()} is {@code true} if reading was
- * successful and data exists in datastore; {@link Optional#isPresent()} is
- * {@code false} otherwise
- */
- public static <T extends DataObject> Optional<T> readFromDs(LogicalDatastoreType store, InstanceIdentifier<T> path,
- ReadTransaction rTx) {
- CheckedFuture<Optional<T>, ReadFailedException> resultFuture = rTx.read(store, path);
- try {
- return resultFuture.checkedGet();
- } catch (ReadFailedException e) {
- LOG.warn("Read failed from DS.", e);
- return Optional.absent();
- }
- }
-
static byte[] bytesFromHexString(String values) {
String target = "";
if (values != null) {