/*
- * Copyright (c) 2015 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2015, 2017 Cisco Systems, Inc. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
package org.opendaylight.lispflowmapping.implementation;
+import java.util.AbstractMap.SimpleImmutableEntry;
import java.util.ArrayList;
+import java.util.Date;
import java.util.EnumMap;
+import java.util.HashSet;
import java.util.List;
+import java.util.Set;
+import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.lispflowmapping.config.ConfigIni;
import org.opendaylight.lispflowmapping.dsbackend.DataStoreBackEnd;
-import org.opendaylight.lispflowmapping.implementation.config.ConfigIni;
+import org.opendaylight.lispflowmapping.implementation.timebucket.implementation.TimeBucketMappingTimeoutService;
+import org.opendaylight.lispflowmapping.implementation.timebucket.interfaces.ISouthBoundMappingTimeoutService;
import org.opendaylight.lispflowmapping.implementation.util.DSBEInputUtil;
+import org.opendaylight.lispflowmapping.implementation.util.MSNotificationInputUtil;
import org.opendaylight.lispflowmapping.implementation.util.MappingMergeUtil;
import org.opendaylight.lispflowmapping.interfaces.dao.ILispDAO;
+import org.opendaylight.lispflowmapping.interfaces.dao.SubKeys;
+import org.opendaylight.lispflowmapping.interfaces.dao.Subscriber;
+import org.opendaylight.lispflowmapping.interfaces.mapcache.IAuthKeyDb;
+import org.opendaylight.lispflowmapping.interfaces.mapcache.ILispMapCache;
import org.opendaylight.lispflowmapping.interfaces.mapcache.IMapCache;
import org.opendaylight.lispflowmapping.interfaces.mapcache.IMappingSystem;
import org.opendaylight.lispflowmapping.interfaces.mappingservice.IMappingService;
+import org.opendaylight.lispflowmapping.lisp.type.LispMessage;
+import org.opendaylight.lispflowmapping.lisp.type.MappingData;
import org.opendaylight.lispflowmapping.lisp.util.LispAddressStringifier;
import org.opendaylight.lispflowmapping.lisp.util.LispAddressUtil;
-import org.opendaylight.lispflowmapping.mapcache.FlatMapCache;
+import org.opendaylight.lispflowmapping.lisp.util.MaskUtil;
+import org.opendaylight.lispflowmapping.mapcache.AuthKeyDb;
import org.opendaylight.lispflowmapping.mapcache.MultiTableMapCache;
import org.opendaylight.lispflowmapping.mapcache.SimpleMapCache;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.lisp.address.types.rev151105.SimpleAddress;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.lisp.address.types.rev151105.lisp.address.address.Ipv6;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.lisp.address.types.rev151105.lisp.address.address.ServicePath;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.lisp.address.types.rev151105.lisp.address.address.explicit.locator.path.explicit.locator.path.Hop;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.proto.rev151105.SiteId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.inet.binary.types.rev160303.IpAddressBinary;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.binary.address.types.rev160504.Ipv4PrefixBinaryAfi;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.binary.address.types.rev160504.Ipv6PrefixBinaryAfi;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.binary.address.types.rev160504.augmented.lisp.address.address.Ipv4PrefixBinary;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.binary.address.types.rev160504.augmented.lisp.address.address.Ipv6PrefixBinary;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.proto.rev151105.XtrId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.proto.rev151105.eid.container.Eid;
import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.proto.rev151105.locatorrecords.LocatorRecord;
import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.proto.rev151105.locatorrecords.LocatorRecordBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.proto.rev151105.mapping.record.container.MappingRecord;
import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.proto.rev151105.mapping.record.container.MappingRecordBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.lisp.proto.rev151105.rloc.container.Rloc;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.mappingservice.rev150906.MappingChange;
import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.mappingservice.rev150906.MappingOrigin;
import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.mappingservice.rev150906.db.instance.AuthenticationKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.lfm.mappingservice.rev150906.db.instance.Mapping;
* mapping lookups.
*
* @author Florin Coras
+ * @author Lorand Jakab
*
*/
public class MappingSystem implements IMappingSystem {
private static final Logger LOG = LoggerFactory.getLogger(MappingSystem.class);
- private boolean iterateMask;
- private boolean notificationService;
- private boolean overwrite;
+ private static final String AUTH_KEY_TABLE = "authentication";
+ private static final int TTL_RLOC_TIMED_OUT = 1;
+ private static final int TTL_NO_RLOC_KNOWN = 15;
+ private NotificationPublishService notificationPublishService;
+ private boolean mappingMerge;
private ILispDAO dao;
- private IMapCache smc;
+ private ILispDAO sdao;
+ private ILispMapCache smc;
private IMapCache pmc;
+ private IAuthKeyDb akdb;
private final EnumMap<MappingOrigin, IMapCache> tableMap = new EnumMap<>(MappingOrigin.class);
private DataStoreBackEnd dsbe;
+ private boolean isMaster = false;
- public MappingSystem(ILispDAO dao, boolean iterateMask, boolean notifications, boolean overwrite) {
+ private ISouthBoundMappingTimeoutService sbMappingTimeoutService;
+
+ public MappingSystem(ILispDAO dao, boolean iterateMask, NotificationPublishService nps, boolean mappingMerge) {
this.dao = dao;
- this.iterateMask = iterateMask;
- this.notificationService = notifications;
- this.overwrite = overwrite;
+ this.notificationPublishService = nps;
+ this.mappingMerge = mappingMerge;
buildMapCaches();
+
+ sbMappingTimeoutService = new TimeBucketMappingTimeoutService(ConfigIni.getInstance()
+ .getNumberOfBucketsInTimeBucketWheel(), ConfigIni.getInstance().getRegistrationValiditySb(),
+ this);
}
public void setDataStoreBackEnd(DataStoreBackEnd dsbe) {
}
@Override
- public void setOverwritePolicy(boolean overwrite) {
- this.overwrite = overwrite;
+ public void setMappingMerge(boolean mappingMerge) {
+ this.mappingMerge = mappingMerge;
}
@Override
public void setIterateMask(boolean iterate) {
- this.iterateMask = iterate;
- if (smc != null || pmc != null) {
- buildMapCaches();
- }
+ LOG.error("Non-longest prefix match lookups are not properly supported, variable is set to true");
}
public void initialize() {
private void buildMapCaches() {
/*
* There exists a direct relationship between MappingOrigins and the tables that are part of the MappingSystem.
- * Therefore, if a new origin is added, probably a new table should be instantiate here as well.
+ * Therefore, if a new origin is added, probably a new table should be instantiated here as well. Here we
+ * instantiate a SimpleMapCache for southbound originated LISP mappings and a MultiTableMapCache for northbound
+ * originated mappings. Use of FlatMapCache would be possible when no longest prefix match is needed at all,
+ * but that option is no longer supported in the code, since it was never tested and may lead to unexpected
+ * results.
*/
- if (iterateMask) {
- smc = new SimpleMapCache(dao.putTable(MappingOrigin.Southbound.toString()));
- pmc = new MultiTableMapCache(dao.putTable(MappingOrigin.Northbound.toString()));
- } else {
- smc = new FlatMapCache(dao.putTable(MappingOrigin.Southbound.toString()));
- pmc = new FlatMapCache(dao.putTable(MappingOrigin.Northbound.toString()));
- }
+ sdao = dao.putTable(MappingOrigin.Southbound.toString());
+ pmc = new MultiTableMapCache(dao.putTable(MappingOrigin.Northbound.toString()));
+ smc = new SimpleMapCache(sdao);
+ akdb = new AuthKeyDb(dao.putTable(AUTH_KEY_TABLE));
tableMap.put(MappingOrigin.Northbound, pmc);
tableMap.put(MappingOrigin.Southbound, smc);
}
- public void addMapping(MappingOrigin origin, Eid key, Object value, boolean merge) {
- tableMap.get(origin).addMapping(key, value, origin == MappingOrigin.Southbound ? overwrite : true, merge);
+ public void addMapping(MappingOrigin origin, Eid key, MappingData mappingData) {
+
+ sbMappingTimeoutService.removeExpiredMappings();
+
+ if (mappingData == null) {
+ LOG.warn("addMapping() called with null mapping, ignoring");
+ return;
+ }
+
+ if (origin == MappingOrigin.Southbound) {
+ XtrId xtrId = mappingData.getXtrId();
+ if (xtrId == null && mappingMerge && mappingData.isMergeEnabled()) {
+ LOG.warn("addMapping() called will null xTR-ID in MappingRecord, while merge is set, ignoring");
+ return;
+ }
+ if (xtrId != null && mappingMerge) {
+ if (mappingData.isMergeEnabled()) {
+ smc.addMapping(key, xtrId, mappingData);
+ handleMergedMapping(key);
+ return;
+ } else {
+ clearPresentXtrIdMappings(key);
+ smc.addMapping(key, xtrId, mappingData);
+ }
+ }
+ addOrRefreshMappingInTimeoutService(key, mappingData);
+ }
+
+ tableMap.get(origin).addMapping(key, mappingData);
}
- public void updateMappingRegistration(MappingOrigin origin, Eid key, Long timestamp) {
- tableMap.get(origin).updateMappingRegistration(key, timestamp);
+ private void clearPresentXtrIdMappings(Eid key) {
+ List<MappingData> allXtrMappingList = (List<MappingData>) (List<?>) smc.getAllXtrIdMappings(key);
+
+ if (((MappingData) smc.getMapping(key, (XtrId) null)).isMergeEnabled()) {
+ LOG.trace("Different xTRs have different merge configuration!");
+ }
+
+ for (MappingData mappingData : allXtrMappingList) {
+ removeSbXtrIdSpecificMapping(key, mappingData.getXtrId(), mappingData);
+ }
}
- private MappingRecord updateServicePathMappingRecord(MappingRecord mapping, Eid eid) {
+ private void addOrRefreshMappingInTimeoutService(Eid key, MappingData mappingData) {
+ Integer oldBucketId = (Integer) smc.getData(key, SubKeys.TIME_BUCKET_ID);
+ Integer updatedBucketId;
+
+ if (oldBucketId != null) {
+ //refresh mapping
+ updatedBucketId = sbMappingTimeoutService.refreshMapping(key, mappingData, oldBucketId);
+ } else {
+ updatedBucketId = sbMappingTimeoutService.addMapping(key, mappingData);
+ }
+
+ smc.addData(key, SubKeys.TIME_BUCKET_ID, updatedBucketId);
+ }
+
+ @Override
+ public MappingData addNegativeMapping(Eid key) {
+ MappingRecord mapping = buildNegativeMapping(key);
+ MappingData mappingData = new MappingData(mapping);
+ smc.addMapping(mapping.getEid(), mappingData);
+ dsbe.addMapping(DSBEInputUtil.toMapping(MappingOrigin.Southbound, mapping.getEid(), null, mappingData));
+ return mappingData;
+ }
+
+ private MappingRecord buildNegativeMapping(Eid eid) {
+ MappingRecordBuilder recordBuilder = new MappingRecordBuilder();
+ recordBuilder.setAuthoritative(false);
+ recordBuilder.setMapVersion((short) 0);
+ recordBuilder.setEid(eid);
+ if (eid.getAddressType().equals(Ipv4PrefixBinaryAfi.class)
+ || eid.getAddressType().equals(Ipv6PrefixBinaryAfi.class)) {
+ Eid widestNegativePrefix = getWidestNegativePrefix(eid);
+ if (widestNegativePrefix != null) {
+ recordBuilder.setEid(widestNegativePrefix);
+ }
+ }
+ recordBuilder.setAction(LispMessage.NEGATIVE_MAPPING_ACTION);
+ if (getAuthenticationKey(eid) != null) {
+ recordBuilder.setRecordTtl(TTL_RLOC_TIMED_OUT);
+ } else {
+ recordBuilder.setRecordTtl(TTL_NO_RLOC_KNOWN);
+ }
+ return recordBuilder.build();
+ }
+
+ /*
+ * Since this method is only called when there is a hit in the southbound Map-Register cache, and that cache is
+ * not used when merge is on, it's OK to ignore the effects of timestamp changes on merging for now.
+ */
+ public void refreshMappingRegistration(Eid key, XtrId xtrId, Long timestamp) {
+
+ sbMappingTimeoutService.removeExpiredMappings();
+
+ if (timestamp == null) {
+ timestamp = System.currentTimeMillis();
+ }
+ MappingData mappingData = (MappingData) smc.getMapping(null, key);
+ if (mappingData != null) {
+ mappingData.setTimestamp(new Date(timestamp));
+ addOrRefreshMappingInTimeoutService(key, mappingData);
+ } else {
+ LOG.warn("Could not update timestamp for EID {}, no mapping found", LispAddressStringifier.getString(key));
+ }
+ if (mappingMerge && xtrId != null) {
+ MappingData xtrIdMappingData = (MappingData) smc.getMapping(key, xtrId);
+ if (xtrIdMappingData != null) {
+ xtrIdMappingData.setTimestamp(new Date(timestamp));
+ } else {
+ LOG.warn("Could not update timestamp for EID {} xTR-ID {}, no mapping found",
+ LispAddressStringifier.getString(key), LispAddressStringifier.getString(xtrId));
+ }
+ }
+ }
+
+ private MappingData updateServicePathMappingRecord(MappingData mappingData, Eid eid) {
// keep properties of original record
- MappingRecordBuilder recordBuilder = new MappingRecordBuilder(mapping);
+ MappingRecordBuilder recordBuilder = new MappingRecordBuilder(mappingData.getRecord());
recordBuilder.setLocatorRecord(new ArrayList<LocatorRecord>());
// there should only be one locator record
- if (mapping.getLocatorRecord().size() != 1) {
+ if (mappingData.getRecord().getLocatorRecord().size() != 1) {
LOG.warn("MappingRecord associated to ServicePath EID has more than one locator!");
- return mapping;
+ return mappingData;
}
- LocatorRecord locatorRecord = mapping.getLocatorRecord().get(0);
+ LocatorRecord locatorRecord = mappingData.getRecord().getLocatorRecord().get(0);
long serviceIndex = ((ServicePath) eid.getAddress()).getServicePath().getServiceIndex();
int index = LispAddressUtil.STARTING_SERVICE_INDEX - (int) serviceIndex;
Rloc rloc = locatorRecord.getRloc();
if (index != 0) {
LOG.warn("Service Index should be 255 for simple IP RLOCs!");
}
- return mapping;
+ return mappingData;
} else if (rloc.getAddress() instanceof ExplicitLocatorPath) {
ExplicitLocatorPath elp = (ExplicitLocatorPath) rloc.getAddress();
List<Hop> hops = elp.getExplicitLocatorPath().getHop();
if (index < 0 || index > hops.size()) {
LOG.warn("Service Index out of bounds!");
- return mapping;
+ return mappingData;
}
SimpleAddress nextHop = hops.get(index).getAddress();
LocatorRecordBuilder lrb = new LocatorRecordBuilder(locatorRecord);
lrb.setRloc(LispAddressUtil.toRloc(nextHop));
recordBuilder.getLocatorRecord().add(lrb.build());
- return recordBuilder.build();
+ return new MappingData(recordBuilder.build());
} else {
LOG.warn("Nothing to do with ServicePath mapping record");
- return mapping;
+ return mappingData;
}
}
+ private MappingData handleMergedMapping(Eid key) {
+ List<MappingData> expiredMappingDataList = new ArrayList<>();
+ Set<IpAddressBinary> sourceRlocs = new HashSet<>();
+
+ MappingData mergedMappingData = MappingMergeUtil.mergeXtrIdMappings(smc.getAllXtrIdMappings(key),
+ expiredMappingDataList, sourceRlocs);
+
+ for (MappingData mappingData : expiredMappingDataList) {
+ removeSbXtrIdSpecificMapping(key, mappingData.getXtrId(), mappingData);
+ }
+
+ if (mergedMappingData != null) {
+ smc.addMapping(key, mergedMappingData, sourceRlocs);
+ dsbe.addMapping(DSBEInputUtil.toMapping(MappingOrigin.Southbound, key, mergedMappingData));
+ addOrRefreshMappingInTimeoutService(key, mergedMappingData);
+ } else {
+ removeSbMapping(key, mergedMappingData);
+ }
+ return mergedMappingData;
+ }
+
@Override
- public Object getMapping(Eid src, Eid dst) {
+ public MappingData getMapping(Eid src, Eid dst) {
// NOTE: Currently we have two lookup algorithms implemented, which are configurable
if (ConfigIni.getInstance().getLookupPolicy() == IMappingService.LookupPolicy.NB_AND_SB) {
}
@Override
- public Object getMapping(Eid dst) {
- return getMapping((Eid)null, dst);
+ public MappingData getMapping(Eid dst) {
+ return getMapping((Eid) null, dst);
+ }
+
+ @Override
+ public MappingData getMapping(Eid src, Eid dst, XtrId xtrId) {
+ // Note: If xtrId is null, we need to go through regular policy checking else Policy doesn't matter
+
+ if (xtrId == null) {
+ return getMapping(src, dst);
+ }
+
+ return getSbMappingWithExpiration(src, dst, xtrId);
}
@Override
- public Object getMapping(MappingOrigin origin, Eid key) {
+ public MappingData getMapping(MappingOrigin origin, Eid key) {
if (origin.equals(MappingOrigin.Southbound)) {
- return getSbMappingWithExpiration(null, key);
+ return getSbMappingWithExpiration(null, key, null);
}
- return tableMap.get(origin).getMapping(null, key);
+ return (MappingData) tableMap.get(origin).getMapping(null, key);
}
- private Object getMappingNbFirst(Eid src, Eid dst) {
+ private MappingData getMappingNbFirst(Eid src, Eid dst) {
// Default lookup policy is northboundFirst
//lookupPolicy == NB_FIRST
- Object nbMapping = pmc.getMapping(src, dst);
+ MappingData nbMappingData = (MappingData) pmc.getMapping(src, dst);
- if (nbMapping == null) {
- return getSbMappingWithExpiration(src, dst);
+ if (nbMappingData == null) {
+ return getSbMappingWithExpiration(src, dst, null);
}
if (dst.getAddress() instanceof ServicePath) {
- return updateServicePathMappingRecord((MappingRecord) nbMapping, dst);
+ return updateServicePathMappingRecord(nbMappingData, dst);
}
- return nbMapping;
+ return nbMappingData;
}
- private Object getMappingNbSbIntersection(Eid src, Eid dst) {
+ private MappingData getMappingNbSbIntersection(Eid src, Eid dst) {
//lookupPolicy == NB_AND_SB, we return intersection
//of NB and SB mappings, or NB mapping if intersection is empty.
- Object nbMapping = pmc.getMapping(src, dst);
- if (nbMapping == null) {
- return nbMapping;
+ MappingData nbMappingData = (MappingData) pmc.getMapping(src, dst);
+ if (nbMappingData == null) {
+ return nbMappingData;
}
// no intersection for Service Path mappings
if (dst.getAddress() instanceof ServicePath) {
- return updateServicePathMappingRecord((MappingRecord)nbMapping, dst);
+ return updateServicePathMappingRecord(nbMappingData, dst);
}
- Object sbMapping = getSbMappingWithExpiration(src, dst);
- if (sbMapping == null) {
- return nbMapping;
+ MappingData sbMappingData = getSbMappingWithExpiration(src, dst, null);
+ if (sbMappingData == null) {
+ return nbMappingData;
}
// both NB and SB mappings exist. Compute intersection of the mappings
- return MappingMergeUtil.computeNbSbIntersection((MappingRecord)nbMapping, (MappingRecord)sbMapping);
+ return MappingMergeUtil.computeNbSbIntersection(nbMappingData, sbMappingData);
}
- private Object getSbMappingWithExpiration(Eid src, Eid dst) {
- Object mappingObject = smc.getMapping(src, dst);
- if (mappingObject instanceof MappingRecord) {
- MappingRecord mapping = (MappingRecord) mappingObject;
- if (MappingMergeUtil.mappingIsExpired(mapping)) {
- dsbe.removeMapping(DSBEInputUtil.toMapping(MappingOrigin.Southbound, mapping.getEid(),
- new SiteId(mapping.getSiteId()), mapping));
- return null;
- }
+ private MappingData getSbMappingWithExpiration(Eid src, Eid dst, XtrId xtrId) {
+ MappingData mappingData = (MappingData) smc.getMapping(dst, xtrId);
+ if (mappingData != null && MappingMergeUtil.mappingIsExpired(mappingData)) {
+ return handleSbExpiredMapping(dst, xtrId, mappingData);
+ } else {
+ return mappingData;
+ }
+ }
+
+ public MappingData handleSbExpiredMapping(Eid key, XtrId xtrId, MappingData mappingData) {
+ if (mappingMerge && mappingData.isMergeEnabled()) {
+ return handleMergedMapping(key);
+ }
+
+ if (xtrId != null) {
+ removeSbXtrIdSpecificMapping(key, xtrId, mappingData);
+ } else {
+ removeSbMapping(key, mappingData);
+ }
+ return null;
+ }
+
+ private void removeSbXtrIdSpecificMapping(Eid key, XtrId xtrId, MappingData mappingData) {
+ smc.removeMapping(key, xtrId);
+ dsbe.removeXtrIdMapping(DSBEInputUtil.toXtrIdMapping(mappingData));
+ }
+
+ private void removeSbMapping(Eid key, MappingData mappingData) {
+ if (mappingData != null && mappingData.getXtrId() != null) {
+ removeSbXtrIdSpecificMapping(key, mappingData.getXtrId(), mappingData);
+ }
+ removeFromSbTimeoutService(key);
+ smc.removeMapping(key);
+ dsbe.removeMapping(DSBEInputUtil.toMapping(MappingOrigin.Southbound, key, mappingData));
+ }
+
+ private void removeFromSbTimeoutService(Eid key) {
+ Integer bucketId = (Integer) smc.getData(key, SubKeys.TIME_BUCKET_ID);
+ if (bucketId != null) {
+ sbMappingTimeoutService.removeMappingFromTimeoutService(key, bucketId);
}
- return mappingObject;
}
@Override
@Override
public void removeMapping(MappingOrigin origin, Eid key) {
- tableMap.get(origin).removeMapping(key, origin == MappingOrigin.Southbound ? overwrite : true);
- if (notificationService) {
- // TODO
+ Set<Subscriber> subscribers = null;
+ if (origin == MappingOrigin.Southbound) {
+ removeFromSbTimeoutService(key);
+ MappingData mapping = (MappingData) smc.getMapping(null, key);
+ if (mapping != null && !mapping.isNegative()) {
+ SimpleImmutableEntry<Eid, Set<Subscriber>> mergedNegativePrefix = computeMergedNegativePrefix(key);
+ if (mergedNegativePrefix != null) {
+ addNegativeMapping(mergedNegativePrefix.getKey());
+ subscribers = mergedNegativePrefix.getValue();
+ try {
+ notificationPublishService.putNotification(
+ MSNotificationInputUtil.toMappingChanged(mapping, subscribers, MappingChange.Created));
+ } catch (InterruptedException e) {
+ LOG.warn("Notification publication interrupted!");
+ }
+ }
+ }
}
+ tableMap.get(origin).removeMapping(key);
+ }
+
+ @SuppressWarnings("unchecked")
+ /*
+ * Returns the "merged" prefix and the subscribers of the prefixes that were merged.
+ */
+ private SimpleImmutableEntry<Eid, Set<Subscriber>> computeMergedNegativePrefix(Eid eid) {
+ // Variable to hold subscribers we collect along the way
+ Set<Subscriber> subscribers = null;
+
+ // If prefix sibling has a negative mapping, save its subscribers
+ Eid sibling = smc.getSiblingPrefix(eid);
+ MappingData mapping = (MappingData) smc.getMapping(null, sibling);
+ if (mapping != null && mapping.isNegative()) {
+ subscribers = (Set<Subscriber>) getData(MappingOrigin.Southbound, eid, SubKeys.SUBSCRIBERS);
+ } else {
+ return null;
+ }
+
+ Eid currentNode = sibling;
+ Eid previousNode = sibling;
+ while ((currentNode = smc.getVirtualParentSiblingPrefix(currentNode)) != null) {
+ mapping = (MappingData) smc.getMapping(null, currentNode);
+ if (mapping != null && mapping.isNegative()) {
+ subscribers.addAll((Set<Subscriber>)
+ getData(MappingOrigin.Southbound, currentNode, SubKeys.SUBSCRIBERS));
+ removeSbMapping(currentNode, mapping);
+ } else {
+ break;
+ }
+ previousNode = currentNode;
+ }
+ return new SimpleImmutableEntry<>(getVirtualParent(previousNode), subscribers);
+ }
+
+ private static Eid getVirtualParent(Eid eid) {
+ if (eid.getAddress() instanceof Ipv4PrefixBinary) {
+ Ipv4PrefixBinary prefix = (Ipv4PrefixBinary) eid.getAddress();
+ short parentPrefixLength = (short) (prefix.getIpv4MaskLength() - 1);
+ byte[] parentPrefix = MaskUtil.normalizeByteArray(prefix.getIpv4AddressBinary().getValue(),
+ parentPrefixLength);
+ return LispAddressUtil.asIpv4PrefixBinaryEid(eid, parentPrefix, parentPrefixLength);
+ } else if (eid.getAddress() instanceof Ipv6PrefixBinary) {
+ Ipv6PrefixBinary prefix = (Ipv6PrefixBinary) eid.getAddress();
+ short parentPrefixLength = (short) (prefix.getIpv6MaskLength() - 1);
+ byte[] parentPrefix = MaskUtil.normalizeByteArray(prefix.getIpv6AddressBinary().getValue(),
+ parentPrefixLength);
+ return LispAddressUtil.asIpv6PrefixBinaryEid(eid, parentPrefix, parentPrefixLength);
+ }
+ return null;
}
@Override
public void addAuthenticationKey(Eid key, MappingAuthkey authKey) {
LOG.debug("Adding authentication key '{}' with key-ID {} for {}", authKey.getKeyString(), authKey.getKeyType(),
LispAddressStringifier.getString(key));
- smc.addAuthenticationKey(key, authKey);
+ akdb.addAuthenticationKey(key, authKey);
}
@Override
if (LOG.isDebugEnabled()) {
LOG.debug("Retrieving authentication key for {}", LispAddressStringifier.getString(key));
}
- return smc.getAuthenticationKey(key);
+ return akdb.getAuthenticationKey(key);
}
@Override
if (LOG.isDebugEnabled()) {
LOG.debug("Removing authentication key for {}", LispAddressStringifier.getString(key));
}
- smc.removeAuthenticationKey(key);
+ akdb.removeAuthenticationKey(key);
}
@Override
tableMap.get(origin).removeData(key, subKey);
}
+ @Override
+ public Eid getParentPrefix(Eid key) {
+ return smc.getParentPrefix(key);
+ }
+
/**
* Restore all mappings and keys from mdsal datastore.
*/
private void restoreDaoFromDatastore() {
- List<Mapping> mappings = dsbe.getAllMappings();
List<AuthenticationKey> authKeys = dsbe.getAllAuthenticationKeys();
+ List<Mapping> mappings = dsbe.getAllMappings(LogicalDatastoreType.CONFIGURATION);
+
+ /*
+ * XXX By default, the operational datastore is not persisted to disk, either at run-time, or on shutdown,
+ * so the following will have no effect (getLastUpdateTimestamp() will fail, since it's reading from
+ * the operational datastore, and even if it didn't getAllMappings() will fail anyway). According to rovarga it
+ * should be possible to turn on persistence for the operational datastore editing
+ * etc/opendaylight/karaf/05-clustering.xml, by setting <persistence>true</persistence>. At the time of writing
+ * the below code block that didn't seem to work though.
+ */
+ Long lastUpdateTimestamp = dsbe.getLastUpdateTimestamp();
+ if (lastUpdateTimestamp != null && System.currentTimeMillis() - lastUpdateTimestamp
+ > ConfigIni.getInstance().getRegistrationValiditySb()) {
+ LOG.warn("Restore threshold passed, not restoring operational datastore into DAO");
+ } else {
+ mappings.addAll(dsbe.getAllMappings(LogicalDatastoreType.OPERATIONAL));
+ }
+ dsbe.removeLastUpdateTimestamp();
LOG.info("Restoring {} mappings and {} keys from datastore into DAO", mappings.size(), authKeys.size());
- int expiredMappings = 0;
for (Mapping mapping : mappings) {
- if (MappingMergeUtil.mappingIsExpired(mapping.getMappingRecord())) {
- dsbe.removeMapping(mapping);
- expiredMappings++;
- continue;
- }
- addMapping(mapping.getOrigin(), mapping.getMappingRecord().getEid(), mapping.getMappingRecord(), false);
+ addMapping(mapping.getOrigin(), mapping.getMappingRecord().getEid(),
+ new MappingData(mapping.getMappingRecord()));
}
- LOG.info("{} mappings were expired and were not restored", expiredMappings);
for (AuthenticationKey authKey : authKeys) {
addAuthenticationKey(authKey.getEid(), authKey.getMappingAuthkey());
public void destroy() {
LOG.info("Mapping System is being destroyed!");
+ dsbe.saveLastUpdateTimestamp();
}
@Override
return sb.toString();
}
+ @Override
+ public String printKeys() {
+ return akdb.printKeys();
+ }
+
public void cleanCaches() {
dao.removeAll();
buildMapCaches();
}
+
+ /*
+ * XXX Mappings and keys should be separated for this to work properly, as is it will remove northbound originated
+ * authentication keys too, since they are currently stored in smc.
+ */
+ public void cleanSBMappings() {
+ smc = new SimpleMapCache(sdao);
+ }
+
+ @Override
+ public void setIsMaster(boolean isMaster) {
+ this.isMaster = isMaster;
+ }
+
+ @Override
+ public boolean isMaster() {
+ return isMaster;
+ }
}