<tag>HEAD</tag>
</scm>
<artifactId>arphandler</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.1-SNAPSHOT</version>
<packaging>bundle</packaging>
<build>
<configuration>
<instructions>
<Import-Package>
+ org.opendaylight.controller.sal.packet.address,
org.opendaylight.controller.connectionmanager,
org.opendaylight.controller.sal.connection,
org.opendaylight.controller.sal.core,
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import org.opendaylight.controller.clustering.services.IClusterContainerServices;
import org.opendaylight.controller.clustering.services.IClusterServices;
import org.opendaylight.controller.connectionmanager.IConnectionManager;
+import org.opendaylight.controller.hosttracker.HostIdFactory;
+import org.opendaylight.controller.hosttracker.IHostId;
import org.opendaylight.controller.hosttracker.IfHostListener;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
private BlockingQueue<ARPCacheEvent> ARPCacheEvents = new LinkedBlockingQueue<ARPCacheEvent>();
private Thread cacheEventHandler;
private boolean stopping = false;
+
/*
* A cluster allocated cache. Used for synchronizing ARP request/reply
- * events across all cluster controllers. To raise an event, we put() a specific
- * event object (as key) and all nodes handle it in the entryUpdated callback.
+ * events across all cluster controllers. To raise an event, we put() a
+ * specific event object (as key) and all nodes handle it in the
+ * entryUpdated callback.
*
* In case of ARPReply, we put true value to send replies to any requestors
* by calling generateAndSendReply
*/
private ConcurrentMap<ARPEvent, Boolean> arpRequestReplyEvent;
- void setConnectionManager(IConnectionManager cm){
+ void setConnectionManager(IConnectionManager cm) {
this.connectionManager = cm;
}
- void unsetConnectionManager(IConnectionManager cm){
- if (this.connectionManager == cm){
+ void unsetConnectionManager(IConnectionManager cm) {
+ if (this.connectionManager == cm) {
connectionManager = null;
}
}
- void setClusterContainerService(IClusterContainerServices s){
+ void setClusterContainerService(IClusterContainerServices s) {
this.clusterContainerService = s;
}
}
}
- protected void sendARPReply(NodeConnector p, byte[] sMAC, InetAddress sIP,
- byte[] tMAC, InetAddress tIP) {
+ protected void sendARPReply(NodeConnector p, byte[] sMAC, InetAddress sIP, byte[] tMAC, InetAddress tIP) {
byte[] senderIP = sIP.getAddress();
byte[] targetIP = tIP.getAddress();
- ARP arp = createARP(ARP.REPLY,sMAC,senderIP,tMAC,targetIP);
+ ARP arp = createARP(ARP.REPLY, sMAC, senderIP, tMAC, targetIP);
Ethernet ethernet = createEthernet(sMAC, tMAC, arp);
// Make sure that the host is a legitimate member of this subnet
if (!subnet.hasNodeConnector(p)) {
- log.debug("{} showing up on {} does not belong to {}",
- new Object[] { sourceIP, p, subnet });
+ log.debug("{} showing up on {} does not belong to {}", new Object[] { sourceIP, p, subnet });
return;
}
}
/*
- * OpCode != request -> ARP Reply. If there are hosts (in
- * arpRequestors) waiting for the ARP reply for this sourceIP, it's
- * time to generate the reply and send it to these hosts.
+ * OpCode != request -> ARP Reply. If there are hosts (in arpRequestors)
+ * waiting for the ARP reply for this sourceIP, it's time to generate
+ * the reply and send it to these hosts.
*
* If sourceIP==targetIP, it is a Gratuitous ARP. If there are hosts (in
* arpRequestors) waiting for the ARP reply for this sourceIP, it's time
*/
if (pkt.getOpCode() != ARP.REQUEST || sourceIP.equals(targetIP)) {
- // Raise a reply event so that any waiting requestors will be sent a reply
- // the true value indicates we should generate replies to requestors across the cluster
+ // Raise a reply event so that any waiting requestors will be sent a
+ // reply
+ // the true value indicates we should generate replies to requestors
+ // across the cluster
log.trace("Received ARP reply packet from {}, reply to all requestors.", sourceIP);
arpRequestReplyEvent.put(new ARPReply(sourceIP, sourceMAC), true);
return;
}
/*
- * ARP Request Handling:
- * If targetIP is the IP of the subnet, reply with ARP REPLY
- * If targetIP is a known host, PROXY ARP (by sending ARP REPLY) on behalf of known target hosts.
- * For unknown target hosts, generate and send an ARP request to ALL switches/ports using
- * the IP address defined in the subnet as source address
+ * ARP Request Handling: If targetIP is the IP of the subnet, reply with
+ * ARP REPLY If targetIP is a known host, PROXY ARP (by sending ARP
+ * REPLY) on behalf of known target hosts. For unknown target hosts,
+ * generate and send an ARP request to ALL switches/ports using the IP
+ * address defined in the subnet as source address
*/
/*
* If target IP is gateway IP, Send ARP reply
*/
if ((targetIP.equals(subnet.getNetworkAddress()))
&& (NetUtils.isBroadcastMACAddr(targetMAC) || Arrays.equals(targetMAC, getControllerMAC()))) {
- if (connectionManager.getLocalityStatus(p.getNode()) == ConnectionLocality.LOCAL){
- if (log.isTraceEnabled()){
+ if (connectionManager.getLocalityStatus(p.getNode()) == ConnectionLocality.LOCAL) {
+ if (log.isTraceEnabled()) {
log.trace("Received local ARP req. for default gateway. Replying with controller MAC: {}",
HexEncode.bytesToHexString(getControllerMAC()));
}
return;
}
-
- HostNodeConnector host = hostTracker.hostQuery(targetIP);
+ // Hosttracker hosts db key implementation
+ IHostId id = HostIdFactory.create(targetIP, null);
+ HostNodeConnector host = hostTracker.hostQuery(id);
// unknown host, initiate ARP request
if (host == null) {
// add the requestor to the list so that we can replay the reply
arpRequestors.put(targetIP, requestorSet);
}
requestorSet.add(requestor);
- countDownTimers.put(targetIP, (short) 2); // reset timeout to 2sec
+ countDownTimers.put(targetIP, (short) 2); // reset timeout to
+ // 2sec
}
- //Raise a bcast request event, all controllers need to send one
+ // Raise a bcast request event, all controllers need to send one
log.trace("Sending a bcast ARP request for {}", targetIP);
arpRequestReplyEvent.put(new ARPRequest(targetIP, subnet), false);
} else {
/*
- * Target host known (across the cluster), send ARP REPLY make sure that targetMAC
- * matches the host's MAC if it is not broadcastMAC
+ * Target host known (across the cluster), send ARP REPLY make sure
+ * that targetMAC matches the host's MAC if it is not broadcastMAC
*/
if (NetUtils.isBroadcastMACAddr(targetMAC) || Arrays.equals(host.getDataLayerAddressBytes(), targetMAC)) {
log.trace("Received ARP req. for known host {}, sending reply...", targetIP);
if (connectionManager.getLocalityStatus(p.getNode()) == ConnectionLocality.LOCAL) {
- sendARPReply(p,
- host.getDataLayerAddressBytes(),
- host.getNetworkAddress(),
- pkt.getSenderHardwareAddress(),
- sourceIP);
+ sendARPReply(p, host.getDataLayerAddressBytes(), host.getNetworkAddress(),
+ pkt.getSenderHardwareAddress(), sourceIP);
} else {
- arpRequestReplyEvent.put(new ARPReply(
- p,
- host.getNetworkAddress(),
- host.getDataLayerAddressBytes(),
- sourceIP,
- pkt.getSenderHardwareAddress()), false);
+ arpRequestReplyEvent.put(new ARPReply(p, host.getNetworkAddress(), host.getDataLayerAddressBytes(),
+ sourceIP, pkt.getSenderHardwareAddress()), false);
}
} else {
/*
- * Target MAC has been changed. For now, discard it.
- * TODO: We may need to send unicast ARP REQUEST on behalf of
- * the target back to the sender to trigger the sender to update
- * its table
+ * Target MAC has been changed. For now, discard it. TODO: We
+ * may need to send unicast ARP REQUEST on behalf of the target
+ * back to the sender to trigger the sender to update its table
*/
}
}
}
/**
- * Send a broadcast ARP Request to the switch/ ports using
- * the networkAddress of the subnet as sender IP
- * the controller's MAC as sender MAC
- * the targetIP as the target Network Address
+ * Send a broadcast ARP Request to the switch/ ports using the
+ * networkAddress of the subnet as sender IP the controller's MAC as sender
+ * MAC the targetIP as the target Network Address
*/
protected void sendBcastARPRequest(InetAddress targetIP, Subnet subnet) {
log.trace("sendBcatARPRequest targetIP:{} subnet:{}", targetIP, subnet);
}
byte[] targetHardwareAddress = new byte[] { (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 0 };
- //TODO: should use IBroadcastHandler instead
+ // TODO: should use IBroadcastHandler instead
for (NodeConnector p : nodeConnectors) {
- //filter out any non-local or internal ports
- if (!(connectionManager.getLocalityStatus(p.getNode()) == ConnectionLocality.LOCAL) || topologyManager.isInternal(p)) {
+ // filter out any non-local or internal ports
+ if (!(connectionManager.getLocalityStatus(p.getNode()) == ConnectionLocality.LOCAL)
+ || topologyManager.isInternal(p)) {
continue;
}
log.trace("Sending toward nodeConnector:{}", p);
/**
* Send a unicast ARP Request to the known host on a specific switch/port as
- * defined in the host.
- * The sender IP is the networkAddress of the subnet
+ * defined in the host. The sender IP is the networkAddress of the subnet
* The sender MAC is the controller's MAC
*/
protected void sendUcastARPRequest(HostNodeConnector host, Subnet subnet) {
Subnet subnet = null;
if (switchManager != null) {
- subnet = switchManager.getSubnetByNetworkAddress(host
- .getNetworkAddress());
+ subnet = switchManager.getSubnetByNetworkAddress(host.getNetworkAddress());
}
if (subnet == null) {
log.debug("can't find subnet matching {}", host.getNetworkAddress());
return;
}
- if (connectionManager.getLocalityStatus(host.getnodeconnectorNode()) == ConnectionLocality.LOCAL){
+ if (connectionManager.getLocalityStatus(host.getnodeconnectorNode()) == ConnectionLocality.LOCAL) {
log.trace("Send a ucast ARP req. to: {}", host);
sendUcastARPRequest(host, subnet);
} else {
}
/**
- * An IP packet is punted to the controller, this means that the
- * destination host is not known to the controller.
- * Need to discover it by sending a Broadcast ARP Request
+ * An IP packet is punted to the controller, this means that the destination
+ * host is not known to the controller. Need to discover it by sending a
+ * Broadcast ARP Request
*
* @param pkt
* @param p
InetAddress dIP = NetUtils.getInetAddress(pkt.getDestinationAddress());
if (dIP == null) {
- return;
+ return;
}
// try to find a matching subnet
}
// see if we know about the host
- HostNodeConnector host = hostTracker.hostFind(dIP);
+ // Hosttracker hosts db key implementation
+ IHostId id = HostIdFactory.create(dIP, null);
+ HostNodeConnector host = hostTracker.hostFind(id);
if (host == null) {
// if we don't, know about the host, try to find it
- log.trace("Punted IP pkt to {}, sending bcast ARP event...",
- dIP);
+ log.trace("Punted IP pkt to {}, sending bcast ARP event...", dIP);
/*
* unknown destination host, initiate bcast ARP request
*/
arpRequestReplyEvent.put(new ARPRequest(dIP, subnet), false);
- } else if (routing == null ||
- routing.getRoute(p.getNode(), host.getnodeconnectorNode()) != null) {
- /* if IRouting is available, make sure that this packet can get it's
+ } else if (routing == null || routing.getRoute(p.getNode(), host.getnodeconnectorNode()) != null) {
+ /*
+ * if IRouting is available, make sure that this packet can get it's
* destination normally before teleporting it there. If it's not
* available, then assume it's reachable.
*
log.trace("forwarding punted IP pkt to {} received at {}", dIP, p);
- /* if we know where the host is and there's a path from where this
+ /*
+ * if we know where the host is and there's a path from where this
* packet was punted to where the host is, then deliver it to the
- * host for now */
+ * host for now
+ */
NodeConnector nc = host.getnodeConnector();
// re-encode the Ethernet packet (the parent of the IPv4 packet)
rp.setOutgoingNodeConnector(nc);
this.dataPacketService.transmitDataPacket(rp);
} else {
- log.trace("ignoring punted IP pkt to {} because there is no route from {}",
- dIP, p);
+ log.trace("ignoring punted IP pkt to {} because there is no route from {}", dIP, p);
}
}
allocateCaches();
retrieveCaches();
+
}
@SuppressWarnings({ "unchecked" })
private void retrieveCaches() {
- ConcurrentMap<?,?> map;
+ ConcurrentMap<?, ?> map;
- if (this.clusterContainerService == null){
+ if (this.clusterContainerService == null) {
log.error("Cluster service unavailable, can't retieve ARPHandler caches!");
return;
}
map = clusterContainerService.getCache(ARP_EVENT_CACHE_NAME);
- if (map != null){
+ if (map != null) {
this.arpRequestReplyEvent = (ConcurrentMap<ARPEvent, Boolean>) map;
} else {
log.error("Cache allocation failed for {}", ARP_EVENT_CACHE_NAME);
}
private void allocateCaches() {
- if (clusterContainerService == null){
+ if (clusterContainerService == null) {
nonClusterObjectCreate();
log.error("Clustering service unavailable. Allocated non-cluster caches for ARPHandler.");
return;
}
- try{
+ try {
clusterContainerService.createCache(ARP_EVENT_CACHE_NAME,
EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
- } catch (CacheConfigException e){
+ } catch (CacheConfigException e) {
log.error("ARPHandler cache configuration invalid!");
- } catch (CacheExistException e){
+ } catch (CacheExistException e) {
log.debug("ARPHandler cache exists, skipped allocation.");
}
}
- private void nonClusterObjectCreate(){
+ private void nonClusterObjectCreate() {
arpRequestReplyEvent = new ConcurrentHashMap<ARPEvent, Boolean>();
}
+
/**
- * Function called by the dependency manager when at least one
- * dependency become unsatisfied or when the component is shutting
- * down because for example bundle is being stopped.
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
*
*/
void destroy() {
}
/**
- * Function called by dependency manager after "init ()" is called
- * and after the services provided by the class are registered in
- * the service registry
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
*
*/
void start() {
}
/**
- * Function called by the dependency manager before the services
- * exported by the component are unregistered, this will be
- * followed by a "destroy ()" calls
+ * Function called by the dependency manager before the services exported by
+ * the component are unregistered, this will be followed by a "destroy ()"
+ * calls
*
*/
- void stop(){
+ void stop() {
}
void stopping() {
handlePuntedIPPacket((IPv4) nextPak, inPkt.getIncomingNodeConnector());
} else if (nextPak instanceof ARP) {
log.trace("Handle ARP packet: {}", formattedPak);
- handleARPPacket((Ethernet) formattedPak, (ARP) nextPak, inPkt
- .getIncomingNodeConnector());
+ handleARPPacket((Ethernet) formattedPak, (ARP) nextPak, inPkt.getIncomingNodeConnector());
}
}
return PacketResult.IGNORED;
}
- private ARP createARP(short opCode, byte[] senderMacAddress, byte[] senderIP, byte[] targetMacAddress, byte[] targetIP) {
- ARP arp = new ARP();
- arp.setHardwareType(ARP.HW_TYPE_ETHERNET);
- arp.setProtocolType(EtherTypes.IPv4.shortValue());
- arp.setHardwareAddressLength((byte) 6);
- arp.setProtocolAddressLength((byte) 4);
- arp.setOpCode(opCode);
- arp.setSenderHardwareAddress(senderMacAddress) ;
- arp.setSenderProtocolAddress(senderIP);
- arp.setTargetHardwareAddress(targetMacAddress);
- arp.setTargetProtocolAddress(targetIP);
- return arp;
+ private ARP createARP(short opCode, byte[] senderMacAddress, byte[] senderIP, byte[] targetMacAddress,
+ byte[] targetIP) {
+ ARP arp = new ARP();
+ arp.setHardwareType(ARP.HW_TYPE_ETHERNET);
+ arp.setProtocolType(EtherTypes.IPv4.shortValue());
+ arp.setHardwareAddressLength((byte) 6);
+ arp.setProtocolAddressLength((byte) 4);
+ arp.setOpCode(opCode);
+ arp.setSenderHardwareAddress(senderMacAddress);
+ arp.setSenderProtocolAddress(senderIP);
+ arp.setTargetHardwareAddress(targetMacAddress);
+ arp.setTargetProtocolAddress(targetIP);
+ return arp;
}
private Ethernet createEthernet(byte[] sourceMAC, byte[] targetMAC, ARP arp) {
// Clean up ARP event cache
try {
- if (clusterContainerService.amICoordinator() && ! arpRequestReplyEvent.isEmpty()){
+ if (clusterContainerService.amICoordinator() && !arpRequestReplyEvent.isEmpty()) {
arpRequestReplyEvent.clear();
}
- } catch (Exception e){
+ } catch (Exception e) {
log.warn("ARPHandler: A cluster member failed to clear event cache.");
}
}
private void generateAndSendReply(InetAddress sourceIP, byte[] sourceMAC) {
if (log.isTraceEnabled()) {
log.trace("generateAndSendReply called with params sourceIP:{} sourceMAC:{}", sourceIP,
- HexEncode.bytesToHexString(sourceMAC));
+ HexEncode.bytesToHexString(sourceMAC));
}
Set<HostNodeConnector> hosts = arpRequestors.remove(sourceIP);
if ((hosts == null) || hosts.isEmpty()) {
countDownTimers.remove(sourceIP);
for (HostNodeConnector host : hosts) {
if (log.isTraceEnabled()) {
- log.trace("Sending ARP Reply with src {}/{}, target {}/{}",
- new Object[] {
- HexEncode.bytesToHexString(sourceMAC),
- sourceIP,
- HexEncode.bytesToHexString(host.getDataLayerAddressBytes()),
- host.getNetworkAddress() });
+ log.trace(
+ "Sending ARP Reply with src {}/{}, target {}/{}",
+ new Object[] { HexEncode.bytesToHexString(sourceMAC), sourceIP,
+ HexEncode.bytesToHexString(host.getDataLayerAddressBytes()), host.getNetworkAddress() });
}
- if (connectionManager.getLocalityStatus(host.getnodeconnectorNode()) == ConnectionLocality.LOCAL){
- sendARPReply(host.getnodeConnector(),
- sourceMAC,
- sourceIP,
- host.getDataLayerAddressBytes(),
+ if (connectionManager.getLocalityStatus(host.getnodeconnectorNode()) == ConnectionLocality.LOCAL) {
+ sendARPReply(host.getnodeConnector(), sourceMAC, sourceIP, host.getDataLayerAddressBytes(),
host.getNetworkAddress());
} else {
/*
- * In the remote event a requestor moved to another
- * controller it may turn out it now we need to send
- * the ARP reply from a different controller, this
- * cover the case
+ * In the remote event a requestor moved to another controller
+ * it may turn out it now we need to send the ARP reply from a
+ * different controller, this cover the case
*/
arpRequestReplyEvent.put(
- new ARPReply(
- host.getnodeConnector(),
- sourceIP,
- sourceMAC,
- host.getNetworkAddress(),
- host.getDataLayerAddressBytes()), false);
+ new ARPReply(host.getnodeConnector(), sourceIP, sourceMAC, host.getNetworkAddress(), host
+ .getDataLayerAddressBytes()), false);
}
}
}
-
@Override
public void entryUpdated(ARPEvent key, Boolean new_value, String cacheName, boolean originLocal) {
log.trace("Got and entryUpdated for cacheName {} key {} isNew {}", cacheName, key, new_value);
public void entryCreated(ARPEvent key, String cacheName, boolean originLocal) {
// nothing to do
}
+
@Override
public void entryDeleted(ARPEvent key, String cacheName, boolean originLocal) {
// nothing to do
}
- private void enqueueARPCacheEvent (ARPEvent event, boolean new_value) {
+ private void enqueueARPCacheEvent(ARPEvent event, boolean new_value) {
try {
ARPCacheEvent cacheEvent = new ARPCacheEvent(event, new_value);
if (!ARPCacheEvents.contains(cacheEvent)) {
}
/*
- * this thread monitors the connectionEvent queue for new incoming events from
+ * this thread monitors the connectionEvent queue for new incoming events
+ * from
*/
private class ARPCacheEventHandler implements Runnable {
@Override
log.trace("Trigger and ARP Broadcast Request upon receipt of {}", req);
sendBcastARPRequest(req.getTargetIP(), req.getSubnet());
- //If unicast and local, send reply
+ // If unicast and local, send reply
} else if (connectionManager.getLocalityStatus(req.getHost().getnodeconnectorNode()) == ConnectionLocality.LOCAL) {
log.trace("ARPCacheEventHandler - sendUcatARPRequest upon receipt of {}", req);
sendUcastARPRequest(req.getHost(), req.getSubnet());
}
} else if (event instanceof ARPReply) {
ARPReply rep = (ARPReply) event;
- // New reply received by controller, notify all awaiting requestors across the cluster
+ // New reply received by controller, notify all awaiting
+ // requestors across the cluster
if (ev.isNewReply()) {
log.trace("Trigger a generateAndSendReply in response to {}", rep);
generateAndSendReply(rep.getTargetIP(), rep.getTargetMac());
- // Otherwise, a specific reply. If local, send out.
+ // Otherwise, a specific reply. If local, send out.
} else if (connectionManager.getLocalityStatus(rep.getPort().getNode()) == ConnectionLocality.LOCAL) {
log.trace("ARPCacheEventHandler - sendUcatARPReply locally in response to {}", rep);
- sendARPReply(rep.getPort(),
- rep.getSourceMac(),
- rep.getSourceIP(),
- rep.getTargetMac(),
+ sendARPReply(rep.getPort(), rep.getSourceMac(), rep.getSourceIP(), rep.getTargetMac(),
rep.getTargetIP());
}
}
<yangtools.binding.version>0.6.0-SNAPSHOT</yangtools.binding.version>
<!--versions for bits of the controller -->
<controller.version>0.4.1-SNAPSHOT</controller.version>
+ <hosttracker.version>0.5.1-SNAPSHOT</hosttracker.version>
+ <arphandler.version>0.5.1-SNAPSHOT</arphandler.version>
+ <forwarding.staticrouting>0.5.1-SNAPSHOT</forwarding.staticrouting>
+ <samples.loadbalancer>0.5.1-SNAPSHOT</samples.loadbalancer>
<config.version>0.2.3-SNAPSHOT</config.version>
<netconf.version>0.2.3-SNAPSHOT</netconf.version>
<mdsal.version>1.0-SNAPSHOT</mdsal.version>
*/
package org.opendaylight.controller.config.yang.logback.config;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.commons.lang3.StringUtils;
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.DependencyResolverFactory;
+import org.opendaylight.controller.config.api.ModuleIdentifier;
+import org.osgi.framework.BundleContext;
+import org.slf4j.LoggerFactory;
+
import ch.qos.logback.classic.Logger;
import ch.qos.logback.classic.LoggerContext;
import ch.qos.logback.classic.encoder.PatternLayoutEncoder;
import ch.qos.logback.core.rolling.FixedWindowRollingPolicy;
import ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy;
import ch.qos.logback.core.rolling.TimeBasedRollingPolicy;
+
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
-import org.apache.commons.lang3.StringUtils;
-import org.opendaylight.controller.config.api.DependencyResolver;
-import org.opendaylight.controller.config.api.ModuleIdentifier;
-import org.osgi.framework.BundleContext;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
+import com.google.common.collect.Sets;
/**
*
public class LogbackModuleFactory extends
org.opendaylight.controller.config.yang.logback.config.AbstractLogbackModuleFactory {
- private static final String INSTANCE_NAME = "singleton";
+ public static final String INSTANCE_NAME = "singleton";
private Map<String, LoggerTO> loggersDTOs;
private Map<String, RollingFileAppenderTO> rollingDTOs;
private Map<String, ConsoleAppenderTO> consoleDTOs;
private Map<String, FileAppenderTO> fileDTOs;
@Override
- public LogbackModule instantiateModule(String instanceName,
- DependencyResolver dependencyResolver, BundleContext bundleContext) {
+ public LogbackModule instantiateModule(String instanceName, DependencyResolver dependencyResolver,
+ BundleContext bundleContext) {
Preconditions.checkArgument(instanceName.equals(INSTANCE_NAME),
"There should be just one instance of logback, named " + INSTANCE_NAME);
prepareDTOs();
}
@Override
- public LogbackModule instantiateModule(String instanceName,
- DependencyResolver dependencyResolver, LogbackModule oldModule,
- AutoCloseable oldInstance, BundleContext bundleContext) {
+ public LogbackModule instantiateModule(String instanceName, DependencyResolver dependencyResolver,
+ LogbackModule oldModule, AutoCloseable oldInstance, BundleContext bundleContext) {
Preconditions.checkArgument(instanceName.equals(INSTANCE_NAME),
"There should be just one instance of logback, named " + INSTANCE_NAME);
prepareDTOs();
return Lists.newArrayList(loggersToReturn.values());
}
+ @Override
+ public Set<LogbackModule> getDefaultModules(DependencyResolverFactory dependencyResolverFactory,
+ BundleContext bundleContext) {
+ DependencyResolver resolver = dependencyResolverFactory.createDependencyResolver(new ModuleIdentifier(
+ getImplementationName(), INSTANCE_NAME));
+ LogbackModule defaultLogback = instantiateModule(INSTANCE_NAME, resolver, bundleContext);
+ Set<LogbackModule> defaultModules = Sets.newHashSet(defaultLogback);
+ return defaultModules;
+ }
+
}
*/
package org.opendaylight.controller.config.yang.logback.config;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import javax.management.InstanceAlreadyExistsException;
-import javax.management.InstanceNotFoundException;
-import javax.management.ObjectName;
-
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
-import org.opendaylight.controller.config.api.ConflictingVersionException;
import org.opendaylight.controller.config.api.ValidationException;
import org.opendaylight.controller.config.api.jmx.CommitStatus;
import org.opendaylight.controller.config.manager.impl.AbstractConfigTest;
import org.opendaylight.controller.config.manager.impl.factoriesresolver.HardcodedModuleFactoriesResolver;
import org.opendaylight.controller.config.util.ConfigTransactionJMXClient;
+import javax.management.ObjectName;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+import static org.junit.matchers.JUnitMatchers.containsString;
+
public class LogbackModuleTest extends AbstractConfigTest {
private static final String INSTANCE_NAME = "singleton";
private LogbackModuleFactory factory;
@Before
- public void setUp() throws IOException, ClassNotFoundException, InterruptedException {
+ public void setUp() throws Exception {
factory = new LogbackModuleFactory();
super.initConfigTransactionManagerImpl(new HardcodedModuleFactoriesResolver(factory));
}
@Test
- public void testCreateBean() throws InstanceAlreadyExistsException {
+ public void testCreateBean() throws Exception {
CommitStatus status = createBeans(true, "target/rollingApp",
"%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5, "target/%i.log", "rolling",
}
@Test
- public void testReusingInstance() throws InstanceAlreadyExistsException {
+ public void testReusingInstance() throws Exception {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy", 0,
"FileAppender").commit();
}
@Test
- public void testRecreateInstance() throws InstanceAlreadyExistsException, ValidationException,
- ConflictingVersionException, InstanceNotFoundException {
+ public void testRecreateInstance() throws Exception {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy", 0,
"FileAppender").commit();
}
@Test
- public void testDestroyInstance() throws InstanceNotFoundException, InstanceAlreadyExistsException {
+ public void testDestroyInstance() throws Exception {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy", 0,
"FileAppender").commit();
@Ignore
@Test
- public void testValidation1() throws InstanceAlreadyExistsException {
+ public void testValidation1() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy",
}
@Test
- public void testValidation2() throws InstanceAlreadyExistsException {
+ public void testValidation2() throws Exception {
try {
createBeans(true, "target/rollingApp", null, "30MB", 1, 5, "target/%i.log", "rolling", "consoleName",
"ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy", 0, "FileAppender").commit();
}
@Test
- public void testValidation4() throws InstanceAlreadyExistsException {
+ public void testValidation4() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", null, 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy",
}
@Test
- public void testValidation6() throws InstanceAlreadyExistsException {
+ public void testValidation6() throws Exception {
try {
createBeans(true, "", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5, "target/%i.log",
"rolling", "consoleName", "ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy", 0, "FileAppender")
}
@Test
- public void testValidation7() throws InstanceAlreadyExistsException {
+ public void testValidation7() throws Exception {
try {
createBeans(
}
@Test
- public void testValidation8() throws InstanceAlreadyExistsException {
+ public void testValidation8() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", null, "DEBUG", "FixedWindowRollingPolicy", 0,
}
@Test
- public void testValidation9() throws InstanceAlreadyExistsException {
+ public void testValidation9() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", "", "DEBUG", "FixedWindowRollingPolicy", 0,
}
@Test
- public void testValidation10() throws InstanceAlreadyExistsException {
+ public void testValidation10() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", null,
5, "target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG",
}
@Test
- public void testValidation11() throws InstanceAlreadyExistsException {
+ public void testValidation11() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1,
null, "target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG",
}
@Test
- public void testValidation12() throws InstanceAlreadyExistsException {
+ public void testValidation12() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1,
null, "target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", null, 1, "FileAppender")
}
@Test
- public void testValidation13() throws InstanceAlreadyExistsException {
+ public void testValidation13() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1,
null, "target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "", 1, "FileAppender")
}
@Test
- public void testValidation14() throws InstanceAlreadyExistsException {
+ public void testValidation14() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1,
null, "target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "RollingPolicy", 1,
}
@Test
- public void testTimeBasedRollingPolicy() throws InstanceAlreadyExistsException {
+ public void testTimeBasedRollingPolicy() throws Exception {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", null,
null, "target/%d.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "TimeBasedRollingPolicy",
1, "FileAppender").commit();
private ConfigTransactionJMXClient createBeans(Boolean isAppend, String rollingFileName, String encoderPattern,
String maxFileSize, Integer minIndex, Integer maxIndex, String fileNamePattern, String rollingName,
String consoleName, String thresholdFilter, String loggerName, String level, String rollingPolicyType,
- int maxHistory, String fileAppName) throws InstanceAlreadyExistsException {
+ int maxHistory, String fileAppName) throws Exception {
ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
- ObjectName nameCreated = transaction.createModule(factory.getImplementationName(), INSTANCE_NAME);
- LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameCreated, LogbackModuleMXBean.class);
+ ObjectName nameRetrieved = transaction.lookupConfigBean(factory.getImplementationName(), INSTANCE_NAME);
+ LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameRetrieved, LogbackModuleMXBean.class);
List<RollingFileAppenderTO> rollingAppenders = new ArrayList<>();
RollingFileAppenderTO rollingAppender = new RollingFileAppenderTO();
import java.util.List;
import javax.management.InstanceAlreadyExistsException;
+import javax.management.InstanceNotFoundException;
import javax.management.JMX;
+import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.apache.commons.io.FileUtils;
}
- public ObjectName createBeans() throws JoranException, InstanceAlreadyExistsException, IOException {
+ public ObjectName createBeans() throws JoranException, InstanceAlreadyExistsException, IOException,
+ MalformedObjectNameException, InstanceNotFoundException {
LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory();
loggersDTOs.add(log);
ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
- ObjectName nameCreated = transaction.createModule(factory.getImplementationName(), "singleton");
- LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameCreated, LogbackModuleMXBean.class);
+ ObjectName nameRetrieved = transaction.lookupConfigBean(factory.getImplementationName(), LogbackModuleFactory.INSTANCE_NAME);
+ LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameRetrieved, LogbackModuleMXBean.class);
bean.setLoggerTO(loggersDTOs);
bean.setRollingFileAppenderTO(rollingAppenders);
transaction.commit();
- return nameCreated;
+ return nameRetrieved;
}
}
import javax.management.InstanceAlreadyExistsException;
import javax.management.InstanceNotFoundException;
import javax.management.JMX;
+import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.apache.commons.io.FileUtils;
import org.opendaylight.controller.config.util.ConfigTransactionJMXClient;
import org.slf4j.LoggerFactory;
-import com.google.common.collect.Lists;
-
import ch.qos.logback.classic.LoggerContext;
import ch.qos.logback.classic.joran.JoranConfigurator;
import ch.qos.logback.core.joran.spi.JoranException;
+import com.google.common.collect.Lists;
+
public class LogbackWithXmlConfigModuleTest extends AbstractConfigTest {
private LogbackModuleFactory factory;
/**
* Tests configuration of Logger factory.
+ *
+ * @throws MalformedObjectNameException
*/
@Test
- public void test() throws InstanceAlreadyExistsException, InstanceNotFoundException {
+ public void test() throws InstanceAlreadyExistsException, InstanceNotFoundException, MalformedObjectNameException {
ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
- ObjectName nameCreated = transaction.createModule(factory.getImplementationName(), "singleton");
+ ObjectName nameRetrieved = transaction.lookupConfigBean(factory.getImplementationName(), LogbackModuleFactory.INSTANCE_NAME);
- LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameCreated, LogbackModuleMXBean.class);
+ LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameRetrieved, LogbackModuleMXBean.class);
assertEquals(1, bean.getConsoleAppenderTO().size());
transaction = configRegistryClient.createTransaction();
- nameCreated = transaction.lookupConfigBean(factory.getImplementationName(), "singleton");
+ nameRetrieved = transaction.lookupConfigBean(factory.getImplementationName(), "singleton");
- bean = JMX.newMXBeanProxy(platformMBeanServer, nameCreated, LogbackModuleMXBean.class);
+ bean = JMX.newMXBeanProxy(platformMBeanServer, nameRetrieved, LogbackModuleMXBean.class);
assertEquals(1, bean.getConsoleAppenderTO().size());
assertEquals(1, bean.getRollingFileAppenderTO().size());
@Test
public void testAllLoggers() throws InstanceAlreadyExistsException, InstanceNotFoundException {
ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
- transaction.createModule(factory.getImplementationName(), "singleton");
-
- transaction.commit();
-
- transaction = configRegistryClient.createTransaction();
LogbackModuleMXBean bean = JMX.newMXBeanProxy(ManagementFactory.getPlatformMBeanServer(),
transaction.lookupConfigBean("logback", "singleton"), LogbackModuleMXBean.class);
/**
* Add new logger using FileAppender
+ *
+ * @throws MalformedObjectNameException
*/
@Test
- public void testAddNewLogger() throws InstanceAlreadyExistsException, InstanceNotFoundException {
+ public void testAddNewLogger() throws InstanceAlreadyExistsException, InstanceNotFoundException,
+ MalformedObjectNameException {
ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
- ObjectName nameCreated = transaction.createModule(factory.getImplementationName(), "singleton");
- LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameCreated, LogbackModuleMXBean.class);
+ ObjectName nameRetrieved = transaction.lookupConfigBean(factory.getImplementationName(), LogbackModuleFactory.INSTANCE_NAME);
+ LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameRetrieved, LogbackModuleMXBean.class);
assertEquals(5, bean.getLoggerTO().size());
transaction.commit();
transaction = configRegistryClient.createTransaction();
- nameCreated = transaction.lookupConfigBean(factory.getImplementationName(), "singleton");
- bean = JMX.newMXBeanProxy(platformMBeanServer, nameCreated, LogbackModuleMXBean.class);
+ nameRetrieved = transaction.lookupConfigBean(factory.getImplementationName(), "singleton");
+ bean = JMX.newMXBeanProxy(platformMBeanServer, nameRetrieved, LogbackModuleMXBean.class);
assertEquals(6, bean.getLoggerTO().size());
}
// inconsistent state
inconsistentBundlesToYangURLs.putAll(bundle, addedURLs);
- logger.debug("Yang store is falling back on last consistent state containing {}, inconsistent yang files {}, reason {}",
+ logger.debug("Yang store is falling back on last consistent state containing {}, inconsistent yang files {}",
consistentBundlesToYangURLs, inconsistentBundlesToYangURLs, failureReason);
logger.warn("Yang store is falling back on last consistent state containing {} files, inconsistent yang files size is {}, reason {}",
consistentBundlesToYangURLs.size(), inconsistentBundlesToYangURLs.size(), failureReason.toString());
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
* files through java serialization API when saving to/reading from Container
* Manager startup configuration file.
*/
-@XmlRootElement(name = "container-config")
+@XmlRootElement(name = "containerConfig")
@XmlAccessorType(XmlAccessType.NONE)
public class ContainerConfig implements Serializable {
private static final long serialVersionUID = 2L;
<artifactId>sal-common-util</artifactId>
<version>${mdsal.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-netconf-connector</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-core-api</artifactId>
<artifactId>forwardingrules-manager</artifactId>
<version>${mdsal.version}</version>
</dependency>
-
+ <dependency>
+ <groupId>org.opendaylight.controller.md</groupId>
+ <artifactId>statistics-manager</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>concepts</artifactId>
<artifactId>yang-jmx-generator</artifactId>
<version>${config.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-event-executor-config</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-threadgroup-config</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>threadpool-config-api</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>threadpool-config-impl</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-store-api</artifactId>
+ <version>${config.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>yang-store-api</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>forwarding.staticrouting</artifactId>
- <version>${controller.version}</version>
+ <version>${forwarding.staticrouting}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>arphandler</artifactId>
- <version>${controller.version}</version>
+ <version>${arphandler.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>${controller.version}</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker.implementation</artifactId>
- <version>${controller.version}</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>samples.loadbalancer</artifactId>
- <version>${controller.version}</version>
+ <version>${samples.loadbalancer}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
# Logging configuration for Tomcat-JUL logging
java.util.logging.config.file=configuration/tomcat-logging.properties
+
+#Hosttracker hostsdb key scheme setting
+hosttracker.keyscheme=IP
urn:opendaylight:yang:extension:yang-ext?module=yang-ext&revision=2013-07-09
urn:opendaylight:params:xml:ns:yang:iana?module=iana&revision=2013-08-16
urn:opendaylight:params:xml:ns:yang:controller:md:sal:common?module=opendaylight-md-sal-common&revision=2013-10-28
-http://netconfcentral.org/ns/toaster?module=toaster&revision=2009-11-20
urn:opendaylight:params:xml:ns:yang:ieee754?module=ieee754&revision=2013-08-19
-urn:opendaylight:params:xml:ns:yang:nps-concepts?module=nps-concepts&revision=2013-09-30
//END OF CONFIG
<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
- <version>1.4.1-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
- </parent>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
- <tag>HEAD</tag>
- </scm>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.opendaylight</artifactId>
+ <version>1.4.1-SNAPSHOT</version>
+ <relativePath>../../commons/opendaylight</relativePath>
+ </parent>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
+ <tag>HEAD</tag>
+ </scm>
- <artifactId>forwarding.staticrouting</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- <packaging>bundle</packaging>
+ <artifactId>forwarding.staticrouting</artifactId>
+ <version>0.5.1-SNAPSHOT</version>
+ <packaging>bundle</packaging>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>${bundle.plugin.version}</version>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Import-Package>
- org.opendaylight.controller.sal.utils,
- org.opendaylight.controller.sal.core,
- org.opendaylight.controller.configuration,
- org.opendaylight.controller.forwardingrulesmanager,
- org.opendaylight.controller.hosttracker,
- org.opendaylight.controller.hosttracker.hostAware,
- org.opendaylight.controller.clustering.services,
- org.opendaylight.controller.sal.packet,
- org.opendaylight.controller.sal.routing,
- org.opendaylight.controller.topologymanager,
- org.eclipse.osgi.framework.console,
- org.osgi.framework,
- org.slf4j,
- org.apache.felix.dm,
- org.apache.commons.lang3.builder
- </Import-Package>
- <Export-Package>
- org.opendaylight.controller.forwarding.staticrouting
- </Export-Package>
- <Bundle-Activator>
- org.opendaylight.controller.forwarding.staticrouting.internal.Activator
- </Bundle-Activator>
- </instructions>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- </plugins>
- </build>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>topologymanager</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>forwardingrulesmanager</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>configuration</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>0.5.1-SNAPSHOT</version>
- </dependency>
- </dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Import-Package>
+ org.opendaylight.controller.sal.packet.address,
+ org.opendaylight.controller.sal.utils,
+ org.opendaylight.controller.sal.core,
+ org.opendaylight.controller.configuration,
+ org.opendaylight.controller.forwardingrulesmanager,
+ org.opendaylight.controller.hosttracker,
+ org.opendaylight.controller.hosttracker.hostAware,
+ org.opendaylight.controller.clustering.services,
+ org.opendaylight.controller.sal.packet,
+ org.opendaylight.controller.sal.routing,
+ org.opendaylight.controller.topologymanager,
+ org.eclipse.osgi.framework.console,
+ org.osgi.framework,
+ org.slf4j,
+ org.apache.felix.dm,
+ org.apache.commons.lang3.builder
+ </Import-Package>
+ <Export-Package>
+ org.opendaylight.controller.forwarding.staticrouting
+ </Export-Package>
+ <Bundle-Activator>
+ org.opendaylight.controller.forwarding.staticrouting.internal.Activator
+ </Bundle-Activator>
+ </instructions>
+ <manifestLocation>${project.basedir}/META-INF</manifestLocation>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>topologymanager</artifactId>
+ <version>0.4.1-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>forwardingrulesmanager</artifactId>
+ <version>0.4.1-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>hosttracker</artifactId>
+ <version>${hosttracker.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>configuration</artifactId>
+ <version>0.4.1-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal</artifactId>
+ <version>0.5.1-SNAPSHOT</version>
+ </dependency>
+ </dependencies>
</project>
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import org.opendaylight.controller.forwarding.staticrouting.IStaticRoutingAware;
import org.opendaylight.controller.forwarding.staticrouting.StaticRoute;
import org.opendaylight.controller.forwarding.staticrouting.StaticRouteConfig;
+import org.opendaylight.controller.hosttracker.HostIdFactory;
+import org.opendaylight.controller.hosttracker.IHostId;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.IfNewHostNotify;
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
/**
* Static Routing feature provides the bridge between SDN and Non-SDN networks.
*/
-public class StaticRoutingImplementation implements IfNewHostNotify,
- IForwardingStaticRouting, IObjectReader, IConfigurationContainerAware {
- private static Logger log = LoggerFactory
- .getLogger(StaticRoutingImplementation.class);
+public class StaticRoutingImplementation implements IfNewHostNotify, IForwardingStaticRouting, IObjectReader,
+ IConfigurationContainerAware {
+ private static Logger log = LoggerFactory.getLogger(StaticRoutingImplementation.class);
private static String ROOT = GlobalConstants.STARTUPHOME.toString();
ConcurrentMap<String, StaticRoute> staticRoutes;
ConcurrentMap<String, StaticRouteConfig> staticRouteConfigs;
}
@Override
- public Object readObject(ObjectInputStream ois)
- throws FileNotFoundException, IOException, ClassNotFoundException {
+ public Object readObject(ObjectInputStream ois) throws FileNotFoundException, IOException, ClassNotFoundException {
// Perform the class deserialization locally, from inside the package
// where the class is defined
return ois.readObject();
@SuppressWarnings("unchecked")
private void loadConfiguration() {
ObjectReader objReader = new ObjectReader();
- ConcurrentMap<String, StaticRouteConfig> confList = (ConcurrentMap<String, StaticRouteConfig>) objReader
- .read(this, staticRoutesFileName);
+ ConcurrentMap<String, StaticRouteConfig> confList = (ConcurrentMap<String, StaticRouteConfig>) objReader.read(
+ this, staticRoutesFileName);
if (confList == null) {
return;
}
}
-
private Status saveConfig() {
return saveConfigInternal();
}
Status status;
ObjectWriter objWriter = new ObjectWriter();
- status = objWriter.write(
- new ConcurrentHashMap<String, StaticRouteConfig>(
- staticRouteConfigs), staticRoutesFileName);
+ status = objWriter.write(new ConcurrentHashMap<String, StaticRouteConfig>(staticRouteConfigs),
+ staticRoutesFileName);
if (status.isSuccess()) {
return status;
}
@SuppressWarnings("deprecation")
- private void allocateCaches() {
+ private void allocateCaches() {
if (this.clusterContainerService == null) {
- log
- .info("un-initialized clusterContainerService, can't create cache");
+ log.info("un-initialized clusterContainerService, can't create cache");
return;
}
try {
- clusterContainerService.createCache(
- "forwarding.staticrouting.routes", EnumSet
- .of(IClusterServices.cacheMode.TRANSACTIONAL));
- clusterContainerService.createCache(
- "forwarding.staticrouting.configs", EnumSet
- .of(IClusterServices.cacheMode.TRANSACTIONAL));
+ clusterContainerService.createCache("forwarding.staticrouting.routes",
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+ clusterContainerService.createCache("forwarding.staticrouting.configs",
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
} catch (CacheExistException cee) {
- log
- .error("\nCache already exists - destroy and recreate if needed");
+ log.error("\nCache already exists - destroy and recreate if needed");
} catch (CacheConfigException cce) {
log.error("\nCache configuration invalid - check cache mode");
}
@SuppressWarnings({ "unchecked", "deprecation" })
private void retrieveCaches() {
if (this.clusterContainerService == null) {
- log
- .info("un-initialized clusterContainerService, can't retrieve cache");
+ log.info("un-initialized clusterContainerService, can't retrieve cache");
return;
}
try {
ra.staticRouteUpdate(s, update);
} catch (Exception e) {
- log.error("",e);
+ log.error("", e);
}
}
}
@Override
public Object call() throws Exception {
- if (!added
- || (staticRoute.getType() == StaticRoute.NextHopType.SWITCHPORT)) {
+ if (!added || (staticRoute.getType() == StaticRoute.NextHopType.SWITCHPORT)) {
notifyStaticRouteUpdate(staticRoute, added);
} else {
InetAddress nh = staticRoute.getNextHopAddress();
- HostNodeConnector host = hostTracker.hostQuery(nh);
+ // HostTracker hosts db key scheme implementation
+ IHostId id = HostIdFactory.create(nh, null);
+ HostNodeConnector host = hostTracker.hostQuery(id);
if (host == null) {
log.debug("Next hop {} is not present, try to discover it", nh.getHostAddress());
- Future<HostNodeConnector> future = hostTracker.discoverHost(nh);
+ Future<HostNodeConnector> future = hostTracker.discoverHost(id);
if (future != null) {
try {
host = future.get();
public StaticRoute getBestMatchStaticRoute(InetAddress ipAddress) {
ByteBuffer bblongestPrefix = null;
try {
- bblongestPrefix = ByteBuffer.wrap(InetAddress.getByName("0.0.0.0")
- .getAddress());
+ bblongestPrefix = ByteBuffer.wrap(InetAddress.getByName("0.0.0.0").getAddress());
} catch (Exception e) {
return null;
}
return status;
}
if (staticRouteConfigs.get(config.getName()) != null) {
- return new Status(StatusCode.CONFLICT,
- "A valid Static Route configuration with this name " +
- "already exists. Please use a different name");
+ return new Status(StatusCode.CONFLICT, "A valid Static Route configuration with this name "
+ + "already exists. Please use a different name");
}
// Update database
for (Map.Entry<String, StaticRoute> entry : staticRoutes.entrySet()) {
if (entry.getValue().compareTo(sRoute) == 0) {
- return new Status(StatusCode.CONFLICT,
- "This conflicts with an existing Static Route " +
- "Configuration. Please check the configuration " +
- "and try again");
+ return new Status(StatusCode.CONFLICT, "This conflicts with an existing Static Route "
+ + "Configuration. Please check the configuration " + "and try again");
}
}
staticRoutes.put(config.getName(), sRoute);
checkAndUpdateListeners(name, sRoute, false);
return new Status(StatusCode.SUCCESS, null);
}
- return new Status(StatusCode.NOTFOUND,
- "Static Route with name " + name + " is not found");
+ return new Status(StatusCode.NOTFOUND, "Static Route with name " + name + " is not found");
}
void setClusterContainerService(IClusterContainerServices s) {
containerName = "";
}
- staticRoutesFileName = ROOT + "staticRouting_" + containerName
- + ".conf";
+ staticRoutesFileName = ROOT + "staticRouting_" + containerName + ".conf";
- log.debug("forwarding.staticrouting starting on container {}",
- containerName);
+ log.debug("forwarding.staticrouting starting on container {}", containerName);
allocateCaches();
retrieveCaches();
this.executor = Executors.newFixedThreadPool(1);
}
/*
- * Slow probe to identify any gateway that might have silently appeared
- * after the Static Routing Configuration.
+ * Slow probe to identify any gateway that might have silently appeared
+ * after the Static Routing Configuration.
*/
gatewayProbeTimer = new Timer();
gatewayProbeTimer.schedule(new TimerTask() {
public void run() {
for (Map.Entry<String, StaticRoute> s : staticRoutes.entrySet()) {
StaticRoute route = s.getValue();
- if ((route.getType() == StaticRoute.NextHopType.IPADDRESS)
- && route.getHost() == null) {
+ if ((route.getType() == StaticRoute.NextHopType.IPADDRESS) && route.getHost() == null) {
checkAndUpdateListeners(s.getKey(), route, true);
}
}
}
}, 60 * 1000, 60 * 1000);
+
}
/**
- * Function called by the dependency manager when at least one
- * dependency become unsatisfied or when the component is shutting
- * down because for example bundle is being stopped.
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
*
*/
void destroy() {
- log.debug("Destroy all the Static Routing Rules given we are "
- + "shutting down");
+ log.debug("Destroy all the Static Routing Rules given we are " + "shutting down");
gatewayProbeTimer.cancel();
}
/**
- * Function called by dependency manager after "init ()" is called
- * and after the services provided by the class are registered in
- * the service registry
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
*
*/
void start() {
}
/**
- * Function called by the dependency manager before the services
- * exported by the component are unregistered, this will be
- * followed by a "destroy ()" calls
+ * Function called by the dependency manager before the services exported by
+ * the component are unregistered, this will be followed by a "destroy ()"
+ * calls
*
*/
void stop() {
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker.implementation</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
- <version>1.4.1-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
- </parent>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
- <tag>HEAD</tag>
- </scm>
- <artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- <packaging>bundle</packaging>
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.opendaylight</artifactId>
+ <version>1.4.1-SNAPSHOT</version>
+ <relativePath>../../commons/opendaylight</relativePath>
+ </parent>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
+ <tag>HEAD</tag>
+ </scm>
+ <artifactId>hosttracker</artifactId>
+ <version>0.5.1-SNAPSHOT</version>
+ <packaging>bundle</packaging>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>${bundle.plugin.version}</version>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Export-Package>
- org.opendaylight.controller.hosttracker,
- org.opendaylight.controller.hosttracker.hostAware
- </Export-Package>
- <Import-Package>
- org.opendaylight.controller.sal.core,
- org.opendaylight.controller.sal.utils,
- org.opendaylight.controller.topologymanager,
- org.opendaylight.controller.sal.packet.address,
- org.opendaylight.controller.switchmanager,
- org.opendaylight.controller.clustering.services,
- javax.xml.bind.annotation,
- javax.xml.bind,
- org.apache.felix.dm,
- org.apache.commons.lang3.builder,
- org.osgi.service.component,
- org.slf4j,
- org.eclipse.osgi.framework.console,
- org.osgi.framework
- </Import-Package>
- </instructions>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- </plugins>
- </build>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>topologymanager</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>switchmanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>0.5.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- </dependency>
- </dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Export-Package>
+ org.opendaylight.controller.hosttracker,
+ org.opendaylight.controller.hosttracker.hostAware
+ </Export-Package>
+ <Import-Package>
+ org.opendaylight.controller.sal.core,
+ org.opendaylight.controller.sal.utils,
+ org.opendaylight.controller.topologymanager,
+ org.opendaylight.controller.sal.packet.address,
+ org.opendaylight.controller.switchmanager,
+ org.opendaylight.controller.clustering.services,
+ javax.xml.bind.annotation,
+ javax.xml.bind,
+ org.apache.felix.dm,
+ org.apache.commons.lang3.builder,
+ org.osgi.service.component,
+ org.slf4j,
+ org.eclipse.osgi.framework.console,
+ org.osgi.framework
+ </Import-Package>
+ </instructions>
+ <manifestLocation>${project.basedir}/META-INF</manifestLocation>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>topologymanager</artifactId>
+ <version>0.4.1-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>switchmanager</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>clustering.services</artifactId>
+ <version>0.4.1-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal</artifactId>
+ <version>0.5.1-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
+ </dependencies>
</project>
+
--- /dev/null
+/*
+ * Copyright IBM Corporation, 2013. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.hosttracker;
+
+import java.net.InetAddress;
+
+import org.opendaylight.controller.sal.packet.address.DataLinkAddress;
+
+/*
+ * Class used to generate a key based on the scheme choosen for hostsdb storage in hosttracker.
+ * @author Deepak Udapudi
+ */
+public class HostIdFactory {
+ public static final String DEFAULT_IP_KEY_SCHEME = "IP";
+ public static final String IP_MAC_KEY_SCHEME = "IP+MAC";
+ private static String scheme = null;
+ static {
+ scheme = System.getProperty("hosttracker.keyscheme");
+ }
+
+ public static String getScheme() {
+ return scheme;
+ }
+
+ public static IHostId create(InetAddress ip, DataLinkAddress mac) {
+ IHostId ipHostId = new IPHostId(ip);
+ if (scheme != null) {
+ switch (scheme) {
+
+ case DEFAULT_IP_KEY_SCHEME:
+ return ipHostId;
+ case IP_MAC_KEY_SCHEME:
+ IHostId ipMacHostId = new IPMacHostId(ip, mac);
+ return ipMacHostId;
+ default:
+ return ipHostId;
+
+ }
+ }
+ return ipHostId;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright IBM Corporation, 2013. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.hosttracker;
+
+import java.io.Serializable;
+
+/*
+ * Marker interface used by the key classes for their implementation
+ * @author Deepak Udapudi
+ */
+
+public interface IHostId extends Serializable {
+
+}
--- /dev/null
+/*
+ * Copyright IBM Corporation, 2013. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.hosttracker;
+
+import java.io.Serializable;
+import java.net.InetAddress;
+
+/*
+ * IP only key class implementation using the marker interface IHostId
+ * @author Deepak Udapudi
+ */
+
+public class IPHostId implements IHostId, Serializable {
+ private static final long serialVersionUID = 1L;
+ private InetAddress ipAddress;
+
+ public InetAddress getIpAddress() {
+ return ipAddress;
+ }
+
+ public void setIpAddress(InetAddress ipAddress) {
+ this.ipAddress = ipAddress;
+ }
+
+ public IPHostId(InetAddress ipAddress) {
+ super();
+ this.ipAddress = ipAddress;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((ipAddress == null) ? 0 : ipAddress.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ IPHostId other = (IPHostId) obj;
+ if (ipAddress == null) {
+ if (other.ipAddress != null)
+ return false;
+ } else if (!ipAddress.equals(other.ipAddress))
+ return false;
+ return true;
+ }
+
+ public static IHostId fromIP(InetAddress addr) {
+ return new IPHostId(addr);
+ }
+
+}
--- /dev/null
+/*
+ * Copyright IBM Corporation, 2013. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.hosttracker;
+
+import java.io.Serializable;
+import java.net.InetAddress;
+
+/*
+ * IP + Mac key class implementation using the marker interface IHostId
+ * @author Deepak Udapudi
+ */
+
+import org.opendaylight.controller.sal.packet.address.DataLinkAddress;
+
+public class IPMacHostId implements IHostId, Serializable {
+
+ private static final long serialVersionUID = 1L;
+ private InetAddress ipAddress;
+ private DataLinkAddress macAddr;
+
+ public IPMacHostId(InetAddress ipAddress, DataLinkAddress macAddr) {
+ super();
+ this.ipAddress = ipAddress;
+ this.macAddr = macAddr;
+ }
+
+ public InetAddress getIpAddress() {
+ return ipAddress;
+ }
+
+ public void setIpAddress(InetAddress ipAddress) {
+ this.ipAddress = ipAddress;
+ }
+
+ public DataLinkAddress getMacAddr() {
+ return macAddr;
+ }
+
+ public void setMacAddr(DataLinkAddress macAddr) {
+ this.macAddr = macAddr;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((ipAddress == null) ? 0 : ipAddress.hashCode());
+ result = prime * result + ((macAddr == null) ? 0 : macAddr.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ IPMacHostId other = (IPMacHostId) obj;
+ if (ipAddress == null) {
+ if (other.ipAddress != null)
+ return false;
+ } else if (!ipAddress.equals(other.ipAddress))
+ return false;
+ if (macAddr == null) {
+ if (other.macAddr != null)
+ return false;
+ } else if (!macAddr.equals(other.macAddr))
+ return false;
+ return true;
+ }
+
+ public static IHostId fromIPAndMac(InetAddress ip, DataLinkAddress mac) {
+ return new IPMacHostId(ip, mac);
+ }
+
+}
* statically through Northbound APIs. If a binding is unknown, then an ARP
* request is initiated immediately to discover the host.
*
- * @param networkAddress
- * IP Address of the Host encapsulated in class InetAddress
+ * @param id
+ * IP address and Mac Address combination encapsulated in IHostId
+ * interface
* @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
* Class that contains the Host info such as its MAC address, Switch
* ID, port, VLAN. If Host is not found, returns NULL
*/
- public HostNodeConnector hostFind(InetAddress networkAddress);
+ public HostNodeConnector hostFind(IHostId id);
+
+ /**
+ * Applications call this interface methods to determine IP address to MAC
+ * binding and its connectivity to an OpenFlow switch in term of Node, Port,
+ * and VLAN. These bindings are learned dynamically as well as can be added
+ * statically through Northbound APIs. If a binding is unknown, then an ARP
+ * request is initiated immediately to discover the host.
+ *
+ * @param addr
+ * IP address of the host
+ * @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * Class that contains the Host info such as its MAC address, Switch
+ * ID, port, VLAN. If Host is not found, returns NULL
+ */
+ public HostNodeConnector hostFind(InetAddress addr);
/**
* Checks the local Host Database to see if a Host has been learned for a
- * given IP address.
+ * given IP address and Mac combination using the HostId.
*
- * @param networkAddress
- * IP Address of the Host encapsulated in class InetAddress
+ * @param id
+ * IP address and Mac Address combination encapsulated in IHostId
+ * interface
+ * @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * Class that contains the Host info such as its MAC address, Switch
+ * ID, port, VLAN. If Host is not found, returns NULL
+ *
+ */
+ public HostNodeConnector hostQuery(IHostId id);
+
+ /**
+ * Checks the local Host Database to see if a Host has been learned for a
+ * given IP address and Mac combination using the HostId.
+ *
+ * @param addr
+ * IP address of the Host
* @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
* Class that contains the Host info such as its MAC address, Switch
* ID, port, VLAN. If Host is not found, returns NULL
*
*/
- public HostNodeConnector hostQuery(InetAddress networkAddress);
+ public HostNodeConnector hostQuery(InetAddress addr);
/**
- * Initiates an immediate discovery of the Host for a given IP address. This
+ * Initiates an immediate discovery of the Host for a given Host id. This
* provides for the calling applications to block on the host discovery.
*
- * @param networkAddress
- * IP address encapsulated in InetAddress class
+ * @param id
+ * IP address and Mac Address combination encapsulated in IHostId
+ * interface
+ * @return Future
+ * {@link org.opendaylight.controller.hosttracker.HostTrackerCallable}
+ */
+ public Future<HostNodeConnector> discoverHost(IHostId id);
+
+ /**
+ * Initiates an immediate discovery of the Host for a given Host id. This
+ * provides for the calling applications to block on the host discovery.
+ *
+ * @param addr
+ * IP address of the host
* @return Future
* {@link org.opendaylight.controller.hosttracker.HostTrackerCallable}
*/
- public Future<HostNodeConnector> discoverHost(InetAddress networkAddress);
+ public Future<HostNodeConnector> discoverHost(InetAddress addr);
/**
* Returns the Network Hierarchy for a given Host. This API is typically
* used by applications like Hadoop for Rack Awareness functionality.
*
- * @param IP
- * address of the Host encapsulated in InetAddress class
+ * @param id
+ * IP address and Mac Address combination encapsulated in IHostId
+ * interface
* @return List of String ArrayList containing the Hierarchies.
*/
- public List<List<String>> getHostNetworkHierarchy(InetAddress hostAddress);
+ public List<List<String>> getHostNetworkHierarchy(IHostId id);
+
+ /**
+ * Returns the Network Hierarchy for a given Host. This API is typically
+ * used by applications like Hadoop for Rack Awareness functionality.
+ *
+ * @param addr
+ * IP address of the host
+ * @return List of String ArrayList containing the Hierarchies.
+ */
+ public List<List<String>> getHostNetworkHierarchy(InetAddress addr);
/**
* Returns all the the Hosts either learned dynamically or added statically
* @return The status object as described in {@code Status} indicating the
* result of this action.
*/
- public Status addStaticHost(String networkAddress, String dataLayerAddress,
- NodeConnector nc, String vlan);
+ public Status addStaticHost(String networkAddress, String dataLayerAddress, NodeConnector nc, String vlan);
/**
* Allows the deletion of statically learned Host
* result of this action.
*/
public Status removeStaticHost(String networkAddress);
+
+ /**
+ * Allows the deletion of statically learned Host
+ *
+ * @param networkAddress
+ * @param macAddress
+ * @return The status object as described in {@code Status} indicating the
+ * result of this action.
+ */
+ public Status removeStaticHostUsingIPAndMac(String networkAddress, String macAddress);
}
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker.implementation</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.1-SNAPSHOT</version>
<packaging>bundle</packaging>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
import org.opendaylight.controller.clustering.services.ICacheUpdateAware;
import org.opendaylight.controller.clustering.services.IClusterContainerServices;
import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.hosttracker.HostIdFactory;
+import org.opendaylight.controller.hosttracker.IHostId;
+import org.opendaylight.controller.hosttracker.IPHostId;
+import org.opendaylight.controller.hosttracker.IPMacHostId;
import org.opendaylight.controller.hosttracker.IfHostListener;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.IfNewHostNotify;
* removed the database
*/
+/***
+ *
+ * HostTracker db key scheme implementation support. Support has been added for
+ * IP only or IP + MAC scheme as of now. User can use either of the schemes
+ * based on the configuration done in config.ini file. By default IP only key
+ * scheme is choosen. The attribute to be set in config.ini is
+ * hosttracker.keyscheme. It could have a value of 0 or 1 as of now. 0 is for IP
+ * only scheme. 1 is for IP + MAC scheme.
+ *
+ *
+ */
+
public class HostTracker implements IfIptoHost, IfHostListener, ISwitchManagerAware, IInventoryListener,
- ITopologyManagerAware, ICacheUpdateAware<InetAddress, HostNodeConnector>, CommandProvider {
+ ITopologyManagerAware, ICacheUpdateAware<IHostId, HostNodeConnector>, CommandProvider {
static final String ACTIVE_HOST_CACHE = "hosttracker.ActiveHosts";
static final String INACTIVE_HOST_CACHE = "hosttracker.InactiveHosts";
private static final Logger logger = LoggerFactory.getLogger(HostTracker.class);
protected final Set<IHostFinder> hostFinder = new CopyOnWriteArraySet<IHostFinder>();;
- protected ConcurrentMap<InetAddress, HostNodeConnector> hostsDB;
+ protected ConcurrentMap<IHostId, HostNodeConnector> hostsDB;
/*
* Following is a list of hosts which have been requested by NB APIs to be
* added, but either the switch or the port is not sup, so they will be
protected boolean stopping;
private static boolean hostRefresh = true;
private static int hostRetryCount = 5;
+ private String keyScheme = null;
+
private static class ARPPending {
- protected InetAddress hostIP;
+ protected IHostId hostId;
protected short sent_count;
protected HostTrackerCallable hostTrackerCallable;
- public InetAddress getHostIP() {
- return hostIP;
+ public IHostId getHostId() {
+ return hostId;
}
public short getSent_count() {
return hostTrackerCallable;
}
- public void setHostIP(InetAddress networkAddr) {
- this.hostIP = networkAddr;
+ public void setHostId(IHostId id) {
+ this.hostId = id;
}
public void setSent_count(short count) {
// This list contains the hosts for which ARP requests are being sent
// periodically
- ConcurrentMap<InetAddress, ARPPending> ARPPendingList;
+ ConcurrentMap<IHostId, ARPPending> ARPPendingList;
/*
* This list below contains the hosts which were initially in ARPPendingList
* above, but ARP response didn't come from there hosts after multiple
*
* We can't recover from condition 3 above
*/
- ConcurrentMap<InetAddress, ARPPending> failedARPReqList;
+ ConcurrentMap<IHostId, ARPPending> failedARPReqList;
public HostTracker() {
}
/* ARP Refresh Timer to go off every 5 seconds to implement ARP aging */
arpRefreshTimer = new Timer();
arpRefreshTimer.schedule(new ARPRefreshHandler(), 5000, 5000);
+ keyScheme = HostIdFactory.getScheme();
logger.debug("startUp: Caches created, timers started");
}
return;
}
logger.debug("Retrieving cache for HostTrackerAH");
- hostsDB = (ConcurrentMap<InetAddress, HostNodeConnector>) this.clusterContainerService
- .getCache(ACTIVE_HOST_CACHE);
+ hostsDB = (ConcurrentMap<IHostId, HostNodeConnector>) this.clusterContainerService.getCache(ACTIVE_HOST_CACHE);
if (hostsDB == null) {
logger.error("Cache couldn't be retrieved for HostTracker");
}
}
public void nonClusterObjectCreate() {
- hostsDB = new ConcurrentHashMap<InetAddress, HostNodeConnector>();
+ hostsDB = new ConcurrentHashMap<IHostId, HostNodeConnector>();
inactiveStaticHosts = new ConcurrentHashMap<NodeConnector, HostNodeConnector>();
- ARPPendingList = new ConcurrentHashMap<InetAddress, ARPPending>();
- failedARPReqList = new ConcurrentHashMap<InetAddress, ARPPending>();
+ ARPPendingList = new ConcurrentHashMap<IHostId, ARPPending>();
+ failedARPReqList = new ConcurrentHashMap<IHostId, ARPPending>();
}
public void shutDown() {
}
private boolean hostExists(HostNodeConnector host) {
- HostNodeConnector lhost = hostsDB.get(host.getNetworkAddress());
+ IHostId id = HostIdFactory.create(host.getNetworkAddress(), host.getDataLayerAddress());
+ HostNodeConnector lhost = hostsDB.get(id);
return host.equals(lhost);
}
- private HostNodeConnector getHostFromOnActiveDB(InetAddress networkAddress) {
- return hostsDB.get(networkAddress);
+ private HostNodeConnector getHostFromOnActiveDB(IHostId id) {
+ return hostsDB.get(id);
}
- private Entry<NodeConnector, HostNodeConnector> getHostFromInactiveDB(InetAddress networkAddress) {
+ private Entry<NodeConnector, HostNodeConnector> getHostFromInactiveDB(IHostId id) {
for (Entry<NodeConnector, HostNodeConnector> entry : inactiveStaticHosts.entrySet()) {
- if (entry.getValue().equalsByIP(networkAddress)) {
- logger.debug("getHostFromInactiveDB(): Inactive Host found for IP:{} ", networkAddress.getHostAddress());
+ HostNodeConnector hnc = entry.getValue();
+ IHostId cmpId = HostIdFactory.create(hnc.getNetworkAddress(), hnc.getDataLayerAddress());
+ if (cmpId.equals(id)) {
+ logger.debug("getHostFromInactiveDB(): Inactive Host found for ID:{} ", decodeIPFromId(id));
return entry;
}
}
- logger.debug("getHostFromInactiveDB() Inactive Host Not found for IP: {}", networkAddress.getHostAddress());
+ logger.debug("getHostFromInactiveDB() Inactive Host Not found for ID: {}", decodeIPFromId(id));
return null;
}
- private void removeHostFromInactiveDB(InetAddress networkAddress) {
+ private void removeHostFromInactiveDB(IHostId id) {
NodeConnector nodeConnector = null;
for (Entry<NodeConnector, HostNodeConnector> entry : inactiveStaticHosts.entrySet()) {
- if (entry.getValue().equalsByIP(networkAddress)) {
+ HostNodeConnector hnc = entry.getValue();
+ IHostId cmpId = HostIdFactory.create(hnc.getNetworkAddress(), hnc.getDataLayerAddress());
+ if (cmpId.equals(id)) {
nodeConnector = entry.getKey();
break;
}
}
if (nodeConnector != null) {
inactiveStaticHosts.remove(nodeConnector);
- logger.debug("removeHostFromInactiveDB(): Host Removed for IP: {}", networkAddress.getHostAddress());
+ logger.debug("removeHostFromInactiveDB(): Host Removed for IP: {}", decodeIPFromId(id));
return;
}
- logger.debug("removeHostFromInactiveDB(): Host Not found for IP: {}", networkAddress.getHostAddress());
+ logger.debug("removeHostFromInactiveDB(): Host Not found for IP: {}", decodeIPFromId(id));
}
protected boolean hostMoved(HostNodeConnector host) {
- if (hostQuery(host.getNetworkAddress()) != null) {
+ IHostId id = HostIdFactory.create(host.getNetworkAddress(), host.getDataLayerAddress());
+ if (hostQuery(id) != null) {
return true;
}
return false;
}
@Override
- public HostNodeConnector hostQuery(InetAddress networkAddress) {
- return hostsDB.get(networkAddress);
+ public HostNodeConnector hostQuery(IHostId id) {
+ return hostsDB.get(id);
}
@Override
- public Future<HostNodeConnector> discoverHost(InetAddress networkAddress) {
+ public Future<HostNodeConnector> discoverHost(IHostId id) {
if (executor == null) {
logger.debug("discoverHost: Null executor");
return null;
}
- Callable<HostNodeConnector> worker = new HostTrackerCallable(this, networkAddress);
+ Callable<HostNodeConnector> worker = new HostTrackerCallable(this, id);
Future<HostNodeConnector> submit = executor.submit(worker);
return submit;
}
@Override
- public HostNodeConnector hostFind(InetAddress networkAddress) {
+ public HostNodeConnector hostFind(IHostId id) {
/*
* Sometimes at boot with containers configured in the startup we hit
* this path (from TIF) when hostFinder has not been set yet Caller
return null;
}
- HostNodeConnector host = hostQuery(networkAddress);
+ HostNodeConnector host = hostQuery(id);
if (host != null) {
- logger.debug("hostFind(): Host found for IP: {}", networkAddress.getHostAddress());
+ logger.debug("hostFind(): Host found for IP: {}", id);
return host;
}
/* Add this host to ARPPending List for any potential retries */
- addToARPPendingList(networkAddress);
- logger.debug("hostFind(): Host Not Found for IP: {}, Inititated Host Discovery ...",
- networkAddress.getHostAddress());
+ addToARPPendingList(id);
+ logger.debug("hostFind(): Host Not Found for IP: {}, Inititated Host Discovery ...", id);
/* host is not found, initiate a discovery */
for (IHostFinder hf : hostFinder) {
- hf.find(networkAddress);
+ InetAddress addr = decodeIPFromId(id);
+ hf.find(addr);
}
return null;
}
@Override
public Set<HostNodeConnector> getActiveStaticHosts() {
Set<HostNodeConnector> list = new HashSet<HostNodeConnector>();
- for (Entry<InetAddress, HostNodeConnector> entry : hostsDB.entrySet()) {
+ for (Entry<IHostId, HostNodeConnector> entry : hostsDB.entrySet()) {
HostNodeConnector host = entry.getValue();
if (host.isStaticHost()) {
list.add(host);
return list;
}
- private void addToARPPendingList(InetAddress networkAddr) {
+ private void addToARPPendingList(IHostId id) {
ARPPending arphost = new ARPPending();
- arphost.setHostIP(networkAddr);
+ arphost.setHostId(id);
arphost.setSent_count((short) 1);
- ARPPendingList.put(networkAddr, arphost);
- logger.debug("Host Added to ARPPending List, IP: {}", networkAddr);
+ ARPPendingList.put(id, arphost);
+ logger.debug("Host Added to ARPPending List, IP: {}", decodeIPFromId(id));
+
}
- public void setCallableOnPendingARP(InetAddress networkAddr, HostTrackerCallable callable) {
+ public void setCallableOnPendingARP(IHostId id, HostTrackerCallable callable) {
ARPPending arphost;
- for (Entry<InetAddress, ARPPending> entry : ARPPendingList.entrySet()) {
+ for (Entry<IHostId, ARPPending> entry : ARPPendingList.entrySet()) {
arphost = entry.getValue();
- if (arphost.getHostIP().equals(networkAddr)) {
+ if (arphost.getHostId().equals(id)) {
arphost.setHostTrackerCallable(callable);
}
}
}
- private void processPendingARPReqs(InetAddress networkAddr) {
+ private void processPendingARPReqs(IHostId id) {
ARPPending arphost;
- if ((arphost = ARPPendingList.remove(networkAddr)) != null) {
+ if ((arphost = ARPPendingList.remove(id)) != null) {
// Remove the arphost from ARPPendingList as it has been learned now
- logger.debug("Host Removed from ARPPending List, IP: {}", networkAddr);
+ logger.debug("Host Removed from ARPPending List, IP: {}", id);
HostTrackerCallable htCallable = arphost.getHostTrackerCallable();
if (htCallable != null) {
htCallable.wakeup();
* It could have been a host from the FailedARPReqList
*/
- if (failedARPReqList.containsKey(networkAddr)) {
- failedARPReqList.remove(networkAddr);
- logger.debug("Host Removed from FailedARPReqList List, IP: {}", networkAddr);
+ if (failedARPReqList.containsKey(id)) {
+ failedARPReqList.remove(id);
+ logger.debug("Host Removed from FailedARPReqList List, IP: {}", decodeIPFromId(id));
}
}
// Learn a new Host
private void learnNewHost(HostNodeConnector host) {
+ IHostId id = HostIdFactory.create(host.getNetworkAddress(), host.getDataLayerAddress());
host.initArpSendCountDown();
- HostNodeConnector rHost = hostsDB.putIfAbsent(host.getNetworkAddress(), host);
+ HostNodeConnector rHost = hostsDB.putIfAbsent(id, host);
if (rHost != null) {
// Another host is already learned for this IP address, replace it
- replaceHost(host.getNetworkAddress(), rHost, host);
+ replaceHost(id, rHost, host);
} else {
logger.debug("New Host Learned: MAC: {} IP: {}", HexEncode.bytesToHexString(host
.getDataLayerAddressBytes()), host.getNetworkAddress().getHostAddress());
}
}
- private void replaceHost(InetAddress networkAddr, HostNodeConnector removedHost, HostNodeConnector newHost) {
+ private void replaceHost(IHostId id, HostNodeConnector removedHost, HostNodeConnector newHost) {
// Ignore ARP messages from internal nodes
NodeConnector newHostNc = newHost.getnodeConnector();
boolean newHostIsInternal = topologyManager.isInternal(newHostNc);
newHost.initArpSendCountDown();
- if (hostsDB.replace(networkAddr, removedHost, newHost)) {
+ if (hostsDB.replace(id, removedHost, newHost)) {
logger.debug("Host move occurred: Old Host IP:{}, New Host IP: {}", removedHost.getNetworkAddress()
.getHostAddress(), newHost.getNetworkAddress().getHostAddress());
logger.debug("Old Host MAC: {}, New Host MAC: {}",
/*
* Host replacement has failed, do the recovery
*/
- hostsDB.put(networkAddr, newHost);
+ hostsDB.put(id, newHost);
logger.error("Host replacement failed. Overwrite the host. Repalced Host: {}, New Host: {}", removedHost,
newHost);
}
notifyHostLearnedOrRemoved(removedHost, false);
notifyHostLearnedOrRemoved(newHost, true);
if (!newHost.isStaticHost()) {
- processPendingARPReqs(networkAddr);
+ processPendingARPReqs(id);
}
}
// Remove known Host
- private void removeKnownHost(InetAddress key) {
+ private void removeKnownHost(IHostId key) {
HostNodeConnector host = hostsDB.get(key);
if (host != null) {
logger.debug("Removing Host: IP:{}", host.getNetworkAddress().getHostAddress());
hostsDB.remove(key);
} else {
- logger.error("removeKnownHost(): Host for IP address {} not found in hostsDB", key.getHostAddress());
+ logger.error("removeKnownHost(): Host for IP address {} not found in hostsDB", decodeIPFromId(key));
}
}
public void run() {
HostNodeConnector removedHost = null;
InetAddress networkAddr = host.getNetworkAddress();
-
+ IHostId id = HostIdFactory.create(networkAddr, host.getDataLayerAddress());
/* Check for Host Move case */
if (hostMoved(host)) {
/*
* location parameters with new information, and notify the
* applications listening to host move.
*/
- removedHost = hostsDB.get(networkAddr);
+
+ removedHost = hostsDB.get(id);
if (removedHost != null) {
- replaceHost(networkAddr, removedHost, host);
+ replaceHost(id, removedHost, host);
return;
} else {
logger.error("Host to be removed not found in hostsDB");
learnNewHost(host);
/* check if there is an outstanding request for this host */
- processPendingARPReqs(networkAddr);
+ processPendingARPReqs(id);
notifyHostLearnedOrRemoved(host, true);
}
}
logger.debug("Received for Host: IP {}, MAC {}, {}", host.getNetworkAddress().getHostAddress(),
HexEncode.bytesToHexString(host.getDataLayerAddressBytes()), host);
if (hostExists(host)) {
- HostNodeConnector existinghost = hostsDB.get(host.getNetworkAddress());
+ IHostId id = HostIdFactory.create(host.getNetworkAddress(), host.getDataLayerAddress());
+ HostNodeConnector existinghost = hostsDB.get(id);
existinghost.initArpSendCountDown();
// Update the host
- hostsDB.put(host.getNetworkAddress(), existinghost);
+
+ hostsDB.put(id, existinghost);
+ logger.debug("hostListener returned without adding the host");
return;
}
new NotifyHostThread(host).start();
* Switch-Ids as String).
*/
@Override
- public List<List<String>> getHostNetworkHierarchy(InetAddress hostAddress) {
- HostNodeConnector host = hostQuery(hostAddress);
+ public List<List<String>> getHostNetworkHierarchy(IHostId id) {
+ HostNodeConnector host = hostQuery(id);
if (host == null) {
return null;
}
public void subnetNotify(Subnet sub, boolean add) {
logger.debug("Received subnet notification: {} add={}", sub, add);
if (add) {
- for (Entry<InetAddress, ARPPending> entry : failedARPReqList.entrySet()) {
+ for (Entry<IHostId, ARPPending> entry : failedARPReqList.entrySet()) {
ARPPending arphost;
arphost = entry.getValue();
if (hostFinder == null) {
logger.warn("ARPHandler Services are not available on subnet addition");
continue;
}
- logger.debug("Sending the ARP from FailedARPReqList fors IP: {}", arphost.getHostIP().getHostAddress());
+ logger.debug("Sending the ARP from FailedARPReqList fors IP: {}", decodeIPFromId(arphost.getHostId()));
for (IHostFinder hf : hostFinder) {
- hf.find(arphost.getHostIP());
+ hf.find(decodeIPFromId(arphost.getHostId()));
}
}
}
/* This routine runs every 4 seconds */
logger.trace("Number of Entries in ARP Pending/Failed Lists: ARPPendingList = {}, failedARPReqList = {}",
ARPPendingList.size(), failedARPReqList.size());
- for (Entry<InetAddress, ARPPending> entry : ARPPendingList.entrySet()) {
+ for (Entry<IHostId, ARPPending> entry : ARPPendingList.entrySet()) {
arphost = entry.getValue();
- if (hostsDB.containsKey(arphost.getHostIP())) {
+ if (hostsDB.containsKey(arphost.getHostId())) {
// this host is already learned, shouldn't be in
// ARPPendingList
// Remove it and continue
- logger.warn("Learned Host {} found in ARPPendingList", arphost.getHostIP());
+ logger.warn("Learned Host {} found in ARPPendingList", decodeIPFromId(arphost.getHostId()));
ARPPendingList.remove(entry.getKey());
continue;
}
continue;
}
for (IHostFinder hf : hostFinder) {
- hf.find(arphost.getHostIP());
+ hf.find(decodeIPFromId(arphost.getHostId()));
}
arphost.sent_count++;
- logger.debug("ARP Sent from ARPPending List, IP: {}", arphost.getHostIP().getHostAddress());
+ logger.debug("ARP Sent from ARPPending List, IP: {}", decodeIPFromId(arphost.getHostId()));
} else if (arphost.getSent_count() >= hostRetryCount) {
/*
* ARP requests have been sent without receiving a reply,
*/
ARPPendingList.remove(entry.getKey());
logger.debug("ARP reply not received after multiple attempts, removing from Pending List IP: {}",
- arphost.getHostIP().getHostAddress());
+ decodeIPFromId(arphost.getHostId()));
/*
* Add this host to a different list which will be processed
* on link up events
*/
- logger.debug("Adding the host to FailedARPReqList IP: {}", arphost.getHostIP().getHostAddress());
+ logger.debug("Adding the host to FailedARPReqList IP: {}", decodeIPFromId(arphost.getHostId()));
failedARPReqList.put(entry.getKey(), arphost);
} else {
logger.error("ARPRefreshHandler(): hostsDB is not allocated yet:");
return;
}
- for (Entry<InetAddress, HostNodeConnector> entry : hostsDB.entrySet()) {
+ for (Entry<IHostId, HostNodeConnector> entry : hostsDB.entrySet()) {
HostNodeConnector host = entry.getValue();
if (host.isStaticHost()) {
/* this host was learned via API3, don't age it out */
HostNodeConnector host = null;
try {
host = new HostNodeConnector(dataLayerAddress, networkAddr, nc, vlan);
+ IHostId id = HostIdFactory.create(networkAddr, new EthernetAddress(dataLayerAddress));
if (hostExists(host)) {
// This host is already learned either via ARP or through a
// northbound request
return new Status(StatusCode.SUCCESS);
}
- if (hostsDB.get(networkAddr) != null) {
+ if (hostsDB.get(id) != null) {
// There is already a host with this IP address (but behind
// a different (switch, port, vlan) tuple. Return an error
return new Status(StatusCode.CONFLICT, "Host with this IP already exists.");
*/
if (switchManager.isNodeConnectorEnabled(nc)) {
learnNewHost(host);
- processPendingARPReqs(networkAddr);
+ processPendingARPReqs(id);
notifyHostLearnedOrRemoved(host, true);
} else {
inactiveStaticHosts.put(nc, host);
return new Status(StatusCode.BADREQUEST, "Host already exists");
}
+ IHostId id = HostIdFactory.create(networkAddr, new EthernetAddress(dataLayerAddress));
+
if ((tobeUpdatedHost = hostsDB.get(networkAddr)) != null) {
- if (hostsDB.replace(networkAddr, tobeUpdatedHost, host)) {
+ if (hostsDB.replace(id, tobeUpdatedHost, host)) {
logger.debug("Host replaced from hostsDB. Old host: {} New Host: {}", tobeUpdatedHost, host);
notifyHostLearnedOrRemoved(tobeUpdatedHost, false);
notifyHostLearnedOrRemoved(host, true);
* otherwise
*/
- public Status removeStaticHostReq(InetAddress networkAddress) {
+ public Status removeStaticHostReq(InetAddress networkAddress, DataLinkAddress mac) {
// Check if host is in active hosts database
- HostNodeConnector host = getHostFromOnActiveDB(networkAddress);
+ IHostId id = HostIdFactory.create(networkAddress, mac);
+ HostNodeConnector host = getHostFromOnActiveDB(id);
if (host != null) {
// Validation check
if (!host.isStaticHost()) {
}
// Remove and notify
notifyHostLearnedOrRemoved(host, false);
- removeKnownHost(networkAddress);
+ removeKnownHost(id);
return new Status(StatusCode.SUCCESS, null);
}
// Check if host is in inactive hosts database
- Entry<NodeConnector, HostNodeConnector> entry = getHostFromInactiveDB(networkAddress);
+ Entry<NodeConnector, HostNodeConnector> entry = getHostFromInactiveDB(id);
if (entry != null) {
host = entry.getValue();
// Validation check
if (!host.isStaticHost()) {
return new Status(StatusCode.FORBIDDEN, "Host " + networkAddress.getHostName() + " is not static");
}
- this.removeHostFromInactiveDB(networkAddress);
+ this.removeHostFromInactiveDB(id);
return new Status(StatusCode.SUCCESS, null);
}
switch (type) {
case REMOVED:
logger.debug("Received removed node {}", node);
- for (Entry<InetAddress, HostNodeConnector> entry : hostsDB.entrySet()) {
+ for (Entry<IHostId, HostNodeConnector> entry : hostsDB.entrySet()) {
HostNodeConnector host = entry.getValue();
if (host.getnodeconnectorNode().equals(node)) {
logger.debug("Node: {} is down, remove from Hosts_DB", node);
@Override
public Status removeStaticHost(String networkAddress) {
try {
+ if ((keyScheme != null) && (!keyScheme.equals(HostIdFactory.DEFAULT_IP_KEY_SCHEME))) {
+ return new Status(StatusCode.NOTALLOWED, "Host DB Key scheme used is not IP only scheme.");
+ }
InetAddress address = InetAddress.getByName(networkAddress);
- return removeStaticHostReq(address);
+ return removeStaticHostReq(address, null);
} catch (UnknownHostException e) {
logger.debug("Invalid IP Address when trying to remove host", e);
return new Status(StatusCode.BADREQUEST, "Invalid IP Address when trying to remove host");
}
}
+ @Override
+ public Status removeStaticHostUsingIPAndMac(String networkAddress, String macAddress) {
+ try {
+ if ((keyScheme != null) && (keyScheme.equals(HostIdFactory.DEFAULT_IP_KEY_SCHEME))) {
+ return new Status(StatusCode.NOTALLOWED, "Host DB Key scheme used is not IP only scheme.");
+ }
+ InetAddress address = InetAddress.getByName(networkAddress);
+ DataLinkAddress mac = new EthernetAddress(HexEncode.bytesFromHexString(macAddress));
+ return removeStaticHostReq(address, mac);
+ } catch (UnknownHostException e) {
+ logger.debug("Invalid IP Address when trying to remove host", e);
+ return new Status(StatusCode.BADREQUEST, "Invalid IP Address when trying to remove host");
+ } catch (ConstructionException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ return new Status(StatusCode.BADREQUEST, "Invalid Input parameters have been passed.");
+ }
+ }
+
+ private InetAddress decodeIPFromId(IHostId id) {
+ if ((keyScheme != null) && (keyScheme.equals(HostIdFactory.DEFAULT_IP_KEY_SCHEME))) {
+ IPHostId ipId = (IPHostId) id;
+ return (ipId.getIpAddress());
+ } else if ((keyScheme != null) && (keyScheme.equals(HostIdFactory.IP_MAC_KEY_SCHEME))) {
+ IPMacHostId ipMacId = (IPMacHostId) id;
+ return (ipMacId.getIpAddress());
+ }
+ return null;
+ }
+
+ private DataLinkAddress decodeMacFromId(IHostId id) {
+ if ((keyScheme != null) && (!keyScheme.equals(HostIdFactory.DEFAULT_IP_KEY_SCHEME))) {
+ IPMacHostId ipMacId = (IPMacHostId) id;
+ return (ipMacId.getMacAddr());
+ }
+
+ return null;
+ }
+
private void handleNodeConnectorStatusUp(NodeConnector nodeConnector) {
ARPPending arphost;
HostNodeConnector host = null;
logger.trace("handleNodeConnectorStatusUp {}", nodeConnector);
- for (Entry<InetAddress, ARPPending> entry : failedARPReqList.entrySet()) {
+ for (Entry<IHostId, ARPPending> entry : failedARPReqList.entrySet()) {
arphost = entry.getValue();
- logger.trace("Sending the ARP from FailedARPReqList fors IP: {}", arphost.getHostIP().getHostAddress());
+ logger.trace("Sending the ARP from FailedARPReqList fors IP: {}", arphost.getHostId());
if (hostFinder == null) {
logger.warn("ARPHandler is not available at interface up");
logger.warn("Since this event is missed, host(s) connected to interface {} may not be discovered",
// Use hostFinder's "probe" method
try {
byte[] dataLayerAddress = NetUtils.getBroadcastMACAddr();
- host = new HostNodeConnector(dataLayerAddress, arphost.getHostIP(), nodeConnector, (short) 0);
+ host = new HostNodeConnector(dataLayerAddress, decodeIPFromId(arphost.getHostId()), nodeConnector,
+ (short) 0);
for (IHostFinder hf : hostFinder) {
hf.probe(host);
}
} catch (ConstructionException e) {
logger.debug("HostNodeConnector couldn't be created for Host: {}, NodeConnector: {}",
- arphost.getHostIP(), nodeConnector);
+ arphost.getHostId(), nodeConnector);
logger.error("", e);
}
}
if (host != null) {
inactiveStaticHosts.remove(nodeConnector);
learnNewHost(host);
- processPendingARPReqs(host.getNetworkAddress());
+ IHostId id = HostIdFactory.create(host.getNetworkAddress(), host.getDataLayerAddress());
+ processPendingARPReqs(id);
notifyHostLearnedOrRemoved(host, true);
}
}
private void handleNodeConnectorStatusDown(NodeConnector nodeConnector) {
logger.trace("handleNodeConnectorStatusDown {}", nodeConnector);
- for (Entry<InetAddress, HostNodeConnector> entry : hostsDB.entrySet()) {
+ for (Entry<IHostId, HostNodeConnector> entry : hostsDB.entrySet()) {
HostNodeConnector host = entry.getValue();
if (host.getnodeConnector().equals(nodeConnector)) {
logger.debug(" NodeConnector: {} is down, remove from Hosts_DB", nodeConnector);
this.containerName = "";
}
startUp();
+
+ logger.debug("key Scheme in hosttracker is {}", keyScheme);
}
/**
}
@Override
- public void entryCreated(InetAddress key, String cacheName, boolean originLocal) {
+ public void entryCreated(IHostId key, String cacheName, boolean originLocal) {
if (originLocal) {
return;
}
}
@Override
- public void entryUpdated(InetAddress key, HostNodeConnector new_value, String cacheName, boolean originLocal) {
+ public void entryUpdated(IHostId key, HostNodeConnector new_value, String cacheName, boolean originLocal) {
}
@Override
- public void entryDeleted(InetAddress key, String cacheName, boolean originLocal) {
+ public void entryDeleted(IHostId key, String cacheName, boolean originLocal) {
}
private void registerWithOSGIConsole() {
public void _dumpPendingARPReqList(CommandInterpreter ci) {
ARPPending arphost;
- for (Entry<InetAddress, ARPPending> entry : ARPPendingList.entrySet()) {
+ for (Entry<IHostId, ARPPending> entry : ARPPendingList.entrySet()) {
arphost = entry.getValue();
- ci.println(arphost.getHostIP().toString());
+ ci.println(arphost.getHostId().toString());
}
}
public void _dumpFailedARPReqList(CommandInterpreter ci) {
ARPPending arphost;
- for (Entry<InetAddress, ARPPending> entry : failedARPReqList.entrySet()) {
+ for (Entry<IHostId, ARPPending> entry : failedARPReqList.entrySet()) {
arphost = entry.getValue();
- ci.println(arphost.getHostIP().toString());
+ ci.println(arphost.getHostId().toString());
}
}
+
+ @Override
+ public HostNodeConnector hostFind(InetAddress addr) {
+ IHostId id = HostIdFactory.create(addr, null);
+ return (hostFind(id));
+ }
+
+ @Override
+ public HostNodeConnector hostQuery(InetAddress addr) {
+ IHostId id = HostIdFactory.create(addr, null);
+ return (hostQuery(id));
+ }
+
+ @Override
+ public Future<HostNodeConnector> discoverHost(InetAddress addr) {
+ IHostId id = HostIdFactory.create(addr, null);
+ return discoverHost(id);
+ }
+
+ @Override
+ public List<List<String>> getHostNetworkHierarchy(InetAddress addr) {
+ IHostId id = HostIdFactory.create(addr, null);
+ return getHostNetworkHierarchy(id);
+ }
}
* find a host in HostTracker's database and want to discover the host
* in the same thread without being called by a callback function.
*/
-import java.net.InetAddress;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
+import org.opendaylight.controller.hosttracker.IHostId;
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
+/**
+ *
+ *
+ */
public class HostTrackerCallable implements Callable<HostNodeConnector> {
- InetAddress trackedHost;
+ //host id which could be ip or a combination of ip + mac based on the scheme chosen.
+ IHostId trackedHost;
HostTracker hostTracker;
protected CountDownLatch latch;
- public HostTrackerCallable(HostTracker tracker, InetAddress inet) {
+ public HostTrackerCallable(HostTracker tracker, IHostId inet) {
trackedHost = inet;
hostTracker = tracker;
latch = new CountDownLatch(1);
\r
import java.net.InetAddress;\r
import java.net.UnknownHostException;\r
+\r
import junit.framework.TestCase;\r
\r
import org.junit.Assert;\r
import org.junit.Test;\r
+import org.opendaylight.controller.hosttracker.IHostId;\r
+import org.opendaylight.controller.hosttracker.IPHostId;\r
\r
public class HostTrackerTest extends TestCase {\r
\r
Assert.assertFalse(hostTracker == null);\r
\r
InetAddress hostIP = InetAddress.getByName("192.168.0.8");\r
+ IHostId id = IPHostId.fromIP(hostIP);\r
\r
HostTrackerCallable htCallable = new HostTrackerCallable(hostTracker,\r
- hostIP);\r
- Assert.assertTrue(htCallable.trackedHost.equals(hostIP));\r
+ id);\r
+ Assert.assertTrue(htCallable.trackedHost.equals(id));\r
Assert.assertTrue(htCallable.hostTracker.equals(hostTracker));\r
\r
long count = htCallable.latch.getCount();\r
Assert.assertFalse(hostTracker == null);\r
\r
InetAddress hostIP_1 = InetAddress.getByName("192.168.0.8");\r
+ IHostId id1 = IPHostId.fromIP(hostIP_1);\r
InetAddress hostIP_2 = InetAddress.getByName("192.168.0.18");\r
- hostTracker.discoverHost(hostIP_1);\r
- hostTracker.discoverHost(hostIP_2);\r
+ IHostId id2 = IPHostId.fromIP(hostIP_2);\r
+ hostTracker.discoverHost(id1);\r
+ hostTracker.discoverHost(id2);\r
hostTracker.nonClusterObjectCreate();\r
}\r
\r
</scm>
<artifactId>hosttracker.integrationtest</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.1-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker.implementation</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>arphandler</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${arphandler.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
+import org.opendaylight.controller.hosttracker.IHostId;
+import org.opendaylight.controller.hosttracker.IPHostId;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
import org.opendaylight.controller.sal.core.Node;
import org.osgi.framework.ServiceReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
//import org.opendaylight.controller.hosttracker.*;
@RunWith(PaxExam.class)
private IfIptoHost hosttracker = null;
private IInventoryListener invtoryListener = null;
+
// Configure the OSGi container
@Configuration
public Option[] config() {
st = this.hosttracker.addStaticHost("192.168.0.13", "11:22:33:44:55:77", nc1_2, "0");
Assert.assertFalse(st.isSuccess());
-
this.invtoryListener.notifyNodeConnector(nc1_1, UpdateType.ADDED, null);
// check all host list
Status st = this.hosttracker.addStaticHost("192.168.0.8", "11:22:33:44:55:66", nc1_1, null);
st = this.hosttracker.addStaticHost("192.168.0.13", "11:22:33:44:55:77", nc1_2, "");
- HostNodeConnector hnc_1 = this.hosttracker.hostFind(InetAddress.getByName("192.168.0.8"));
+ IHostId id1 = IPHostId.fromIP(InetAddress.getByName("192.168.0.8"));
+ HostNodeConnector hnc_1 = this.hosttracker.hostFind(id1);
assertNull(hnc_1);
this.invtoryListener.notifyNodeConnector(nc1_1, UpdateType.ADDED, null);
- hnc_1 = this.hosttracker.hostFind(InetAddress.getByName("192.168.0.8"));
+ IHostId id2 = IPHostId.fromIP(InetAddress.getByName("192.168.0.8"));
+ hnc_1 = this.hosttracker.hostFind(id2);
+
assertNotNull(hnc_1);
}
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.GroupAdded;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.GroupRemoved;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.GroupUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.RemoveGroupInputBuilder;
+
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.SalGroupListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.SalGroupService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupTypes.GroupType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.Buckets;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.buckets.Bucket;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.config.rev131024.meters.Meter;
import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
return new Status(StatusCode.BADREQUEST, "Group Name is invalid");
}
- returnResult = doesGroupEntryExists(group.getKey(), groupName, containerName);
+ /* returnResult = doesGroupEntryExists(group.getKey(), groupName, containerName);
if (FRMUtil.operation.ADD == operation && returnResult) {
logger.error("Record with same Group Name exists");
} else if (!returnResult) {
logger.error("Group record does not exist");
return new Status(StatusCode.BADREQUEST, "Group record does not exist");
- }
+ }*/
if (!(group.getGroupType().getIntValue() >= GroupType.GroupAll.getIntValue() && group.getGroupType()
.getIntValue() <= GroupType.GroupFf.getIntValue())) {
}
- private boolean doesGroupEntryExists(GroupKey key, String groupName, String containerName) {
+ /* private boolean doesGroupEntryExists(GroupKey key, String groupName, String containerName) {
if (!originalSwGroupView.containsKey(key)) {
return false;
}
}
}
}
- return false;
- }
+ return true;
+ }*/
/**
* Update Group entries to the southbound plugin/inventory and our internal
return groupOperationStatus;
}
- if (originalSwGroupView.containsKey(groupKey)) {
+ /*if (originalSwGroupView.containsKey(groupKey)) {
originalSwGroupView.remove(groupKey);
originalSwGroupView.put(groupKey, groupUpdateDataObject);
}
-
+*/
if (groupUpdateDataObject.isInstall()) {
UpdateGroupInputBuilder groupData = new UpdateGroupInputBuilder();
updateGroupBuilder = new UpdatedGroupBuilder();
groupData.setUpdatedGroup(updateGroupBuilder.build());
// TODO how to get original group and modified group.
- if (installedSwGroupView.containsKey(groupKey)) {
+ /* if (installedSwGroupView.containsKey(groupKey)) {
installedSwGroupView.remove(groupKey);
installedSwGroupView.put(groupKey, groupUpdateDataObject);
- }
+ }*/
groupService.updateGroup(groupData.build());
}
return groupOperationStatus;
}
- originalSwGroupView.put(groupKey, groupAddDataObject);
+ //originalSwGroupView.put(groupKey, groupAddDataObject);
if (groupAddDataObject.isInstall()) {
AddGroupInputBuilder groupData = new AddGroupInputBuilder();
groupData.setGroupId(groupAddDataObject.getGroupId());
groupData.setGroupType(groupAddDataObject.getGroupType());
groupData.setNode(groupAddDataObject.getNode());
- installedSwGroupView.put(groupKey, groupAddDataObject);
+ // installedSwGroupView.put(groupKey, groupAddDataObject);
groupService.addGroup(groupData.build());
}
return groupOperationStatus;
}
- private RpcResult<Void> commitToPlugin(internalTransaction transaction) {
+ /**
+ * Remove Group to the southbound plugin and our internal database
+ *
+ * @param path
+ * @param dataObject
+ */
+ private Status removeGroup(InstanceIdentifier<?> path, Group groupRemoveDataObject) {
+ GroupKey groupKey = groupRemoveDataObject.getKey();
+ Status groupOperationStatus = validateGroup(groupRemoveDataObject, FRMUtil.operation.ADD);
+
+ if (!groupOperationStatus.isSuccess()) {
+ logger.error("Group data object validation failed %s" + groupRemoveDataObject.getGroupName());
+ return groupOperationStatus;
+ }
+ //originalSwGroupView.put(groupKey, groupAddDataObject);
+
+ if (groupRemoveDataObject.isInstall()) {
+ RemoveGroupInputBuilder groupData = new RemoveGroupInputBuilder();
+ groupData.setBuckets(groupRemoveDataObject.getBuckets());
+ groupData.setContainerName(groupRemoveDataObject.getContainerName());
+ groupData.setGroupId(groupRemoveDataObject.getGroupId());
+ groupData.setGroupType(groupRemoveDataObject.getGroupType());
+ groupData.setNode(groupRemoveDataObject.getNode());
+ // installedSwGroupView.put(groupKey, groupAddDataObject);
+ groupService.removeGroup(groupData.build());
+ }
+
+ return groupOperationStatus;
+ }
+
+ private RpcResult<Void> commitToPlugin(InternalTransaction transaction) {
for (Entry<InstanceIdentifier<?>, Group> entry : transaction.additions.entrySet()) {
if (!addGroup(entry.getKey(), entry.getValue()).isSuccess()) {
transaction.additions.remove(entry.getKey());
- return Rpcs.getRpcResult(false, null, null);
+ return Rpcs.getRpcResult(false, null, Collections.<RpcError>emptySet());
}
}
if (!updateGroup(entry.getKey(), entry.getValue()).isSuccess()) {
transaction.updates.remove(entry.getKey());
- return Rpcs.getRpcResult(false, null, null);
+ return Rpcs.getRpcResult(false, null, Collections.<RpcError>emptySet());
}
}
- for (InstanceIdentifier<?> removal : transaction.removals) {
- // removeFlow(removal);
+ for (InstanceIdentifier<?> groupId : transaction.removals) {
+ DataObject removeValue = transaction.getModification().getOriginalConfigurationData().get(groupId);
+
+ if(removeValue instanceof Group) {
+ if(!removeGroup(groupId, (Group)removeValue).isSuccess()) {
+ return Rpcs.getRpcResult(false, null, Collections.<RpcError>emptySet());
+ }
+ }
}
return Rpcs.getRpcResult(true, null, null);
private final class GroupDataCommitHandler implements DataCommitHandler<InstanceIdentifier<?>, DataObject> {
- @SuppressWarnings("unchecked")
@Override
public DataCommitTransaction<InstanceIdentifier<?>, DataObject> requestCommit(
DataModification<InstanceIdentifier<?>, DataObject> modification) {
// We should verify transaction
System.out.println("Coming in GroupDatacommitHandler");
- internalTransaction transaction = new internalTransaction(modification);
+ InternalTransaction transaction = new InternalTransaction(modification);
transaction.prepareUpdate();
return transaction;
}
}
- private final class internalTransaction implements DataCommitTransaction<InstanceIdentifier<?>, DataObject> {
+ private final class InternalTransaction implements DataCommitTransaction<InstanceIdentifier<?>, DataObject> {
private final DataModification<InstanceIdentifier<?>, DataObject> modification;
return modification;
}
- public internalTransaction(DataModification<InstanceIdentifier<?>, DataObject> modification) {
+ public InternalTransaction(DataModification<InstanceIdentifier<?>, DataObject> modification) {
this.modification = modification;
}
*
*/
void prepareUpdate() {
-
- Set<Entry<InstanceIdentifier<?>, DataObject>> puts = modification.getUpdatedConfigurationData().entrySet();
- for (Entry<InstanceIdentifier<?>, DataObject> entry : puts) {
+
+ Set<Entry<InstanceIdentifier<?>, DataObject>> groupAdded = modification.getCreatedConfigurationData().entrySet();
+ for (Entry<InstanceIdentifier<?>, DataObject> entry : groupAdded) {
+ if (entry.getValue() instanceof Group) {
+ Group group = (Group) entry.getValue();
+ additions.put(entry.getKey(), group);
+ }
+
+ }
+
+ Set<Entry<InstanceIdentifier<?>, DataObject>> groupUpdate = modification.getUpdatedConfigurationData().entrySet();
+ for (Entry<InstanceIdentifier<?>, DataObject> entry : groupUpdate) {
if (entry.getValue() instanceof Group) {
Group group = (Group) entry.getValue();
- preparePutEntry(entry.getKey(), group);
+ ///will be fixed once getUpdatedConfigurationData returns only updated data not created data with it.
+ if (additions.containsKey(entry.getKey())) {
+ updates.put(entry.getKey(), group);
+ }
}
}
removals = modification.getRemovedConfigurationData();
}
-
- private void preparePutEntry(InstanceIdentifier<?> key, Group group) {
-
- Group original = originalSwGroupView.get(key);
- if (original != null) {
- // It is update for us
-
- updates.put(key, group);
- } else {
- // It is addition for us
-
- additions.put(key, group);
- }
- }
-
+
/**
* We are OK to go with execution of plan
*
// NOOP - we did not modified any internal state during
// requestCommit phase
// return Rpcs.getRpcResult(true, null, Collections.emptySet());
- return Rpcs.getRpcResult(true, null, null);
+ return Rpcs.getRpcResult(true, null, Collections.<RpcError>emptySet());
}
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.MeterAdded;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.MeterRemoved;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.MeterUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.RemoveMeterInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.SalMeterListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.SalMeterService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.UpdateMeterInputBuilder;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private MeterDataCommitHandler commitHandler;
private ConcurrentMap<MeterKey, Meter> originalSwMeterView;
+ @SuppressWarnings("unused")
private ConcurrentMap<MeterKey, Meter> installedSwMeterView;
-
+ @SuppressWarnings("unused")
private ConcurrentMap<Node, List<Meter>> nodeMeters;
+ @SuppressWarnings("unused")
private ConcurrentMap<MeterKey, Meter> inactiveMeters;
+ @SuppressWarnings("unused")
+ private IContainer container;
private IClusterContainerServices clusterMeterContainerService = null;
- private IContainer container;
+
public MeterConsumerImpl() {
InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder(Meters.class).child(Meter.class)
meterBuilder.setMeterBandHeaders(meterAddDataObject.getMeterBandHeaders());
meterBuilder.setMeterId(meterAddDataObject.getMeterId());
meterBuilder.setNode(meterAddDataObject.getNode());
- originalSwMeterView.put(meterKey, meterAddDataObject);
+ // originalSwMeterView.put(meterKey, meterAddDataObject);
meterService.addMeter(meterBuilder.build());
}
- originalSwMeterView.put(meterKey, meterAddDataObject);
+ // originalSwMeterView.put(meterKey, meterAddDataObject);
} else {
return new Status(StatusCode.BADREQUEST, "Meter Key or attribute validation failed");
}
if (null != meterKey && validateMeter(meterUpdateDataObject, FRMUtil.operation.UPDATE).isSuccess()) {
- if (originalSwMeterView.containsKey(meterKey)) {
+ /* if (originalSwMeterView.containsKey(meterKey)) {
originalSwMeterView.remove(meterKey);
originalSwMeterView.put(meterKey, meterUpdateDataObject);
- }
+ }*/
if (meterUpdateDataObject.isInstall()) {
UpdateMeterInputBuilder updateMeterInputBuilder = new UpdateMeterInputBuilder();
updateMeterBuilder.fieldsFrom(meterUpdateDataObject);
updateMeterInputBuilder.setUpdatedMeter(updateMeterBuilder.build());
- if (installedSwMeterView.containsKey(meterKey)) {
+ /* if (installedSwMeterView.containsKey(meterKey)) {
installedSwMeterView.remove(meterKey);
installedSwMeterView.put(meterKey, meterUpdateDataObject);
- }
+ }*/
meterService.updateMeter(updateMeterInputBuilder.build());
}
*
* @param dataObject
*/
- private Status RemoveMeter(InstanceIdentifier<?> path, Meter meterUpdateDataObject) {
- MeterKey meterKey = meterUpdateDataObject.getKey();
-
- if (null != meterKey && validateMeter(meterUpdateDataObject, FRMUtil.operation.ADD).isSuccess()) {
- if (meterUpdateDataObject.isInstall()) {
- UpdateMeterInputBuilder updateMeterBuilder = new UpdateMeterInputBuilder();
-
- installedSwMeterView.put(meterKey, meterUpdateDataObject);
- meterService.updateMeter(updateMeterBuilder.build());
+ private Status removeMeter(InstanceIdentifier<?> path, Meter meterRemoveDataObject) {
+ MeterKey meterKey = meterRemoveDataObject.getKey();
+
+ if (null != meterKey && validateMeter(meterRemoveDataObject, FRMUtil.operation.DELETE).isSuccess()) {
+ if (meterRemoveDataObject.isInstall()) {
+ RemoveMeterInputBuilder meterBuilder = new RemoveMeterInputBuilder();
+ meterBuilder.setContainerName(meterRemoveDataObject.getContainerName());
+ meterBuilder.setNode(meterRemoveDataObject.getNode());
+ meterBuilder.setFlags(meterRemoveDataObject.getFlags());
+ meterBuilder.setMeterBandHeaders(meterRemoveDataObject.getMeterBandHeaders());
+ meterBuilder.setMeterId(meterRemoveDataObject.getMeterId());
+ meterBuilder.setNode(meterRemoveDataObject.getNode());
+ // originalSwMeterView.put(meterKey, meterAddDataObject);
+ meterService.removeMeter(meterBuilder.build());
}
- originalSwMeterView.put(meterKey, meterUpdateDataObject);
+ // originalSwMeterView.put(meterKey, meterAddDataObject);
} else {
return new Status(StatusCode.BADREQUEST, "Meter Key or attribute validation failed");
}
String containerName;
String meterName;
Status returnStatus = null;
- boolean returnResult;
if (null != meter) {
containerName = meter.getContainerName();
return returnStatus;
}
- returnResult = doesMeterEntryExists(meter.getKey(), meterName, containerName);
+ /* returnResult = doesMeterEntryExists(meter.getKey(), meterName, containerName);
if (FRMUtil.operation.ADD == operation && returnResult) {
logger.error("Record with same Meter Name exists");
logger.error("Group record does not exist");
returnStatus = new Status(StatusCode.BADREQUEST, "Meter record does not exist");
return returnStatus;
- }
+ }*/
for (int i = 0; i < meter.getMeterBandHeaders().getMeterBandHeader().size(); i++) {
if (!meter.getFlags().isMeterBurst()) {
return new Status(StatusCode.SUCCESS);
}
- private boolean doesMeterEntryExists(MeterKey key, String meterName, String containerName) {
+ /*private boolean doesMeterEntryExists(MeterKey key, String meterName, String containerName) {
if (!originalSwMeterView.containsKey(key)) {
return false;
}
}
}
return false;
- }
-
- private RpcResult<Void> commitToPlugin(internalTransaction transaction) {
- for (Entry<InstanceIdentifier<?>, Meter> entry : transaction.additions.entrySet()) {
-
- if (!addMeter(entry.getKey(), entry.getValue()).isSuccess()) {
- return Rpcs.getRpcResult(false, null, null);
- }
- }
- for (@SuppressWarnings("unused")
- Entry<InstanceIdentifier<?>, Meter> entry : transaction.updates.entrySet()) {
-
- if (!updateMeter(entry.getKey(), entry.getValue()).isSuccess()) {
- return Rpcs.getRpcResult(false, null, null);
- }
- }
-
- for (InstanceIdentifier<?> removal : transaction.removals) {
- /*
- * if (!removeMeter(entry.getKey(),entry.getValue()).isSuccess()) {
- * return Rpcs.getRpcResult(false, null, null); }
- */
- }
-
- return Rpcs.getRpcResult(true, null, null);
- }
+ }*/
- private final class internalTransaction implements DataCommitTransaction<InstanceIdentifier<?>, DataObject> {
+ private final class InternalTransaction implements DataCommitTransaction<InstanceIdentifier<?>, DataObject> {
private final DataModification<InstanceIdentifier<?>, DataObject> modification;
return modification;
}
- public internalTransaction(DataModification<InstanceIdentifier<?>, DataObject> modification) {
+ public InternalTransaction(DataModification<InstanceIdentifier<?>, DataObject> modification) {
this.modification = modification;
}
*
*/
void prepareUpdate() {
-
- Set<Entry<InstanceIdentifier<?>, DataObject>> puts = modification.getUpdatedConfigurationData().entrySet();
- for (Entry<InstanceIdentifier<?>, DataObject> entry : puts) {
+
+ Set<Entry<InstanceIdentifier<?>, DataObject>> addMeter = modification.getCreatedConfigurationData().entrySet();
+ for (Entry<InstanceIdentifier<?>, DataObject> entry : addMeter) {
if (entry.getValue() instanceof Meter) {
- Meter Meter = (Meter) entry.getValue();
- preparePutEntry(entry.getKey(), Meter);
+ Meter meter = (Meter) entry.getValue();
+ additions.put(entry.getKey(), meter);
}
}
+
+ Set<Entry<InstanceIdentifier<?>, DataObject>> updateMeter = modification.getUpdatedConfigurationData().entrySet();
+ for (Entry<InstanceIdentifier<?>, DataObject> entry : updateMeter) {
+ if (entry.getValue() instanceof Meter) {
+ Meter meter = (Meter) entry.getValue();
+ ///will be fixed once getUpdatedConfigurationData returns only updated data not created data with it.
+ if (!additions.containsKey(entry.getKey())) {
+ updates.put(entry.getKey(), meter);
+ }
+ }
+ }
removals = modification.getRemovedConfigurationData();
}
- private void preparePutEntry(InstanceIdentifier<?> key, Meter meter) {
-
- Meter original = originalSwMeterView.get(key);
- if (original != null) {
- // It is update for us
-
- updates.put(key, meter);
- } else {
- // It is addition for us
-
- additions.put(key, meter);
- }
- }
-
/**
* We are OK to go with execution of plan
*
// NOOP - we did not modified any internal state during
// requestCommit phase
// return Rpcs.getRpcResult(true, null, Collections.emptySet());
- return Rpcs.getRpcResult(true, null, null);
+ return Rpcs.getRpcResult(true, null, Collections.<RpcError>emptySet());
+
+ }
+
+ }
+
+ private RpcResult<Void> commitToPlugin(InternalTransaction transaction) {
+ for (Entry<InstanceIdentifier<?>, Meter> entry : transaction.additions.entrySet()) {
+
+ if (!addMeter(entry.getKey(), entry.getValue()).isSuccess()) {
+ return Rpcs.getRpcResult(false, null, Collections.<RpcError>emptySet());
+ }
+ }
+ for (Entry<InstanceIdentifier<?>, Meter> entry : transaction.updates.entrySet()) {
+
+ if (!updateMeter(entry.getKey(), entry.getValue()).isSuccess()) {
+ return Rpcs.getRpcResult(false, null, Collections.<RpcError>emptySet());
+ }
+ }
+ for (InstanceIdentifier<?> meterId : transaction.removals) {
+ DataObject removeValue = transaction.getModification().getOriginalConfigurationData().get(meterId);
+
+ if(removeValue instanceof Meter) {
+ if(!removeMeter(meterId, (Meter)removeValue).isSuccess()) {
+ return Rpcs.getRpcResult(false, null, Collections.<RpcError>emptySet());
+ }
+ }
}
+ return Rpcs.getRpcResult(true, null, Collections.<RpcError>emptySet());
}
private final class MeterDataCommitHandler implements DataCommitHandler<InstanceIdentifier<?>, DataObject> {
DataModification<InstanceIdentifier<?>, DataObject> modification) {
// We should verify transaction
System.out.println("Coming in MeterDataCommitHandler");
- internalTransaction transaction = new internalTransaction(modification);
+ InternalTransaction transaction = new InternalTransaction(modification);
transaction.prepareUpdate();
return transaction;
}
import ietf-inet-types {prefix inet; revision-date "2010-09-24";}
import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
- import opendaylight-match-types {prefix match; revision-date 2013-10-26";}
+ import opendaylight-match-types {prefix match; revision-date "2013-10-26";}
import opendaylight-action-types {prefix action;}
revision "2013-10-26" {
--- /dev/null
+module opendaylight-queue-types {
+ namespace "urn:opendaylight:flow:types:queue";
+ prefix queue-types;
+
+ import ietf-inet-types {prefix inet; revision-date "2010-09-24";}
+ import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
+
+ revision "2013-09-25" {
+ description "Initial revision of Queue Inventory model";
+ }
+
+ typedef queue-properties {
+ type enumeration {
+ enum min_rate;
+ enum max_rate;
+ }
+ }
+
+
+ grouping common-queue {
+
+ leaf property {
+ type uint16;
+ description "One of OFPQT_.";
+ }
+
+ }
+
+
+ grouping queue-prop-min-rate {
+
+ uses common-queue;
+
+ leaf rate {
+ type uint16;
+ description "OFPQT_MIN, len: 16";
+ }
+
+ }
+
+
+
+ grouping queue-prop-max-rate {
+
+ uses common-queue;
+
+ leaf rate {
+ type uint16;
+ description "OFPQT_MAX, len: 16";
+ }
+
+ }
+ grouping queue-packet {
+
+
+ leaf queue-id {
+ type uint32;
+ description "id for the specific queue.";
+ }
+
+ leaf port {
+ type uint32;
+ description "Port this queue is attached to.";
+ }
+ uses common-queue;
+ }
+
+ grouping queue-config-request
+ {
+ leaf port {
+ type uint32;
+ description "Port to be queried.";
+ }
+
+ }
+ }
+}
\ No newline at end of file
import ietf-inet-types {prefix inet; revision-date "2010-09-24";}
import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
- import opendaylight-flow-types {prefix flow;revision-date 2013-10-26";}
+ import opendaylight-flow-types {prefix flow;revision-date "2013-10-26";}
import opendaylight-action-types {prefix action;}
revision "2013-10-26" {
--- /dev/null
+module queue-management {
+ namespace "urn:opendaylight:queue:config";
+ prefix queue-cfg;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+
+ import opendaylight-queue-types {prefix queue; revision-date "2013-09-25";}
+
+
+ revision "2013-10-24" {
+ description "Initial revision of queue service";
+ }
+
+ grouping queue-entry {
+ leaf node {
+ type inv:node-connector-ref;
+
+ }
+ uses queue:queue-config-request;
+ }
+
+ container queues {
+ list queue {
+ key "id node";
+
+ leaf id {
+ type uint32;
+ }
+
+ uses queue-entry;
+ }
+ }
+}
\ No newline at end of file
rpc remove-group {
input {
- uses group-update;
+ uses node-group;
uses tr:transaction-aware;
}
output {
import yang-ext {prefix ext; revision-date "2013-07-09";}
import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
import opendaylight-port-types {prefix port-type;revision-date "2013-09-25";}
+ import flow-capable-transaction {prefix tr;}
revision "2013-11-07" {
description "Initial revision of port service";
rpc update-port {
input {
uses port-update;
+ uses tr:transaction-aware;
+ }
+ output {
+ uses tr:transaction-aware;
}
}
--- /dev/null
+module sal-queue {
+ namespace "urn:opendaylight:queue:service";
+ prefix queue;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv; revision-date "2013-08-19";}
+ import opendaylight-queue-types {prefix queue-type; revision-date "2013-09-25";}
+
+ revision "2013-11-07" {
+ description "Initial revision of queue service";
+ }
+
+ grouping node-queue {
+ uses "inv:node-context-ref";
+
+ uses queue-type:queue-packet;
+ }
+
+
+ rpc get-queue {
+ output {
+ uses queue-type:queue-packet;
+ }
+ }
+
+ notification queue-get-config-reply {
+ uses node-queue;
+ }
+}
\ No newline at end of file
target/generated-sources/sal
</outputBaseDir>
</generator>
+ <generator>
+ <codeGeneratorClass>org.opendaylight.yangtools.yang.unified.doc.generator.maven.DocumentationGeneratorImpl</codeGeneratorClass>
+ <outputBaseDir>target/site/restconf</outputBaseDir>
+ </generator>
</codeGenerators>
<inspectDependencies>true</inspectDependencies>
</configuration>
<!-- Connectors -->
<module>sal-connector-api</module>
<module>sal-rest-connector</module>
-
+ <module>sal-netconf-connector</module>
+
<!-- Clustered Data Store -->
<module>clustered-data-store/implementation</module>
import org.osgi.framework.BundleContext
import java.util.Hashtable
import org.osgi.framework.ServiceRegistration
+import org.opendaylight.controller.sal.binding.impl.connect.dom.DeserializationException
+import java.util.concurrent.Callable
class RuntimeGeneratedMappingServiceImpl implements BindingIndependentMappingService, SchemaServiceListener, AutoCloseable {
val promisedTypeDefinitions = HashMultimap.<Type, SettableFuture<GeneratedTypeBuilder>>create;
val promisedSchemas = HashMultimap.<Type, SettableFuture<SchemaNode>>create;
-
+
ServiceRegistration<SchemaServiceListener> listenerRegistration
override onGlobalContextUpdated(SchemaContext arg0) {
val context = entry.value;
updateBindingFor(context.childNodes, schemaContext);
updateBindingFor(context.cases, schemaContext);
-
val typedefs = context.typedefs;
for (typedef : typedefs.values) {
for (augmentation : augmentations) {
binding.typeToDefinition.put(augmentation, augmentation);
}
-
+
binding.typeToAugmentation.putAll(context.typeToAugmentation);
}
}
}
override dataObjectFromDataDom(InstanceIdentifier<? extends DataObject> path, CompositeNode node) {
- if (node == null) {
- return null;
- }
- val targetType = path.targetType
- val transformer = registry.getCodecForDataObject(targetType);
- val ret = transformer.deserialize(node)?.value as DataObject;
- return ret;
+ return tryDeserialization[ |
+ if (node == null) {
+ return null;
+ }
+ val targetType = path.targetType
+ val transformer = registry.getCodecForDataObject(targetType);
+ val ret = transformer.deserialize(node)?.value as DataObject;
+ return ret;
+ ]
}
-
+
override fromDataDom(org.opendaylight.yangtools.yang.data.api.InstanceIdentifier entry) {
- return registry.instanceIdentifierCodec.deserialize(entry);
+ return tryDeserialization[ |
+ registry.instanceIdentifierCodec.deserialize(entry);
+ ]
+ }
+
+ private static def <T> T tryDeserialization(Callable<T> deserializationBlock) throws DeserializationException {
+ try {
+ deserializationBlock.call()
+ } catch (Exception e) {
+ // FIXME: Make this block providing more information.
+ throw new DeserializationException(e);
+ }
}
private def void updateBindingFor(Map<SchemaPath, GeneratedTypeBuilder> map, SchemaContext module) {
for (entry : map.entrySet) {
val schemaNode = SchemaContextUtil.findDataSchemaNode(module, entry.key);
+
//LOG.info("{} : {}",entry.key,entry.value.fullyQualifiedName)
if (schemaNode != null) {
typeToSchemaNode.put(entry.value, schemaNode);
binding.typeToDefinition = typeToDefinition
binding.typeToSchemaNode = typeToSchemaNode
binding.typeDefinitions = typeDefinitions
- if(ctx !== null) {
- listenerRegistration = ctx.registerService(SchemaServiceListener,this,new Hashtable<String,String>());
+ if (ctx !== null) {
+ listenerRegistration = ctx.registerService(SchemaServiceListener, this, new Hashtable<String, String>());
}
}
}
promisedSchemas.removeAll(builder);
}
-
+
override close() throws Exception {
listenerRegistration?.unregister();
}
-
+
}
public class BindingIndependentDataServiceConnector implements //
RuntimeDataProvider, //
- Provider {
+ Provider, AutoCloseable {
private final Logger LOG = LoggerFactory.getLogger(BindingIndependentDataServiceConnector.class);
@Override
public DataObject readOperationalData(InstanceIdentifier<? extends DataObject> path) {
- org.opendaylight.yangtools.yang.data.api.InstanceIdentifier biPath = mappingService.toDataDom(path);
- CompositeNode result = biDataService.readOperationalData(biPath);
- return mappingService.dataObjectFromDataDom(path, result);
+ try {
+ org.opendaylight.yangtools.yang.data.api.InstanceIdentifier biPath = mappingService.toDataDom(path);
+ CompositeNode result = biDataService.readOperationalData(biPath);
+ return mappingService.dataObjectFromDataDom(path, result);
+ } catch (DeserializationException e) {
+ throw new IllegalStateException(e);
+ }
}
@Override
public DataObject readConfigurationData(InstanceIdentifier<? extends DataObject> path) {
- org.opendaylight.yangtools.yang.data.api.InstanceIdentifier biPath = mappingService.toDataDom(path);
- CompositeNode result = biDataService.readConfigurationData(biPath);
- return mappingService.dataObjectFromDataDom(path, result);
+ try {
+ org.opendaylight.yangtools.yang.data.api.InstanceIdentifier biPath = mappingService.toDataDom(path);
+ CompositeNode result = biDataService.readConfigurationData(biPath);
+ return mappingService.dataObjectFromDataDom(path, result);
+ } catch (DeserializationException e) {
+ throw new IllegalStateException(e);
+ }
}
private DataModificationTransaction createBindingToDomTransaction(
.beginTransaction();
for (Entry<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> entry : source
.getUpdatedConfigurationData().entrySet()) {
- InstanceIdentifier<?> baKey = mappingService.fromDataDom(entry.getKey());
- DataObject baData = mappingService.dataObjectFromDataDom(baKey, entry.getValue());
- target.putConfigurationData(baKey, baData);
+ try {
+ InstanceIdentifier<?> baKey = mappingService.fromDataDom(entry.getKey());
+ DataObject baData = mappingService.dataObjectFromDataDom(baKey, entry.getValue());
+ target.putConfigurationData(baKey, baData);
+ } catch (DeserializationException e) {
+ LOG.error("Ommiting from BA transaction: {}. Reason{}.", entry.getKey(), e);
+ }
}
for (Entry<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> entry : source
.getUpdatedOperationalData().entrySet()) {
- InstanceIdentifier<?> baKey = mappingService.fromDataDom(entry.getKey());
- DataObject baData = mappingService.dataObjectFromDataDom(baKey, entry.getValue());
- target.putOperationalData(baKey, baData);
+ try {
+
+ InstanceIdentifier<?> baKey = mappingService.fromDataDom(entry.getKey());
+ DataObject baData = mappingService.dataObjectFromDataDom(baKey, entry.getValue());
+ target.putOperationalData(baKey, baData);
+ } catch (DeserializationException e) {
+ LOG.error("Ommiting from BA transaction: {}. Reason{}.", entry.getKey(), e);
+ }
}
for (org.opendaylight.yangtools.yang.data.api.InstanceIdentifier entry : source.getRemovedConfigurationData()) {
- InstanceIdentifier<?> baEntry = mappingService.fromDataDom(entry);
- target.removeConfigurationData(baEntry);
+ try {
+
+ InstanceIdentifier<?> baEntry = mappingService.fromDataDom(entry);
+ target.removeConfigurationData(baEntry);
+ } catch (DeserializationException e) {
+ LOG.error("Ommiting from BA transaction: {}. Reason{}.", entry, e);
+ }
}
for (org.opendaylight.yangtools.yang.data.api.InstanceIdentifier entry : source.getRemovedOperationalData()) {
- InstanceIdentifier<?> baEntry = mappingService.fromDataDom(entry);
- target.removeOperationalData(baEntry);
+ try {
+
+ InstanceIdentifier<?> baEntry = mappingService.fromDataDom(entry);
+ target.removeOperationalData(baEntry);
+ } catch (DeserializationException e) {
+ LOG.error("Ommiting from BA transaction: {}. Reason{}.", entry, e);
+ }
}
return target;
}
start();
}
+ @Override
+ public void close() throws Exception {
+
+ if (baCommitHandlerRegistration != null) {
+ baCommitHandlerRegistration.close();
+ }
+ if (biCommitHandlerRegistration != null) {
+ biCommitHandlerRegistration.close();
+ }
+
+ }
+
private class DomToBindingTransaction implements
DataCommitTransaction<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> {
@Override
public void onRegister(DataCommitHandlerRegistration<InstanceIdentifier<?>, DataObject> registration) {
-
- org.opendaylight.yangtools.yang.data.api.InstanceIdentifier domPath = mappingService.toDataDom(registration.getPath());
+
+ org.opendaylight.yangtools.yang.data.api.InstanceIdentifier domPath = mappingService.toDataDom(registration
+ .getPath());
// FIXME: do registration based on only active commit handlers.
-
+
}
@Override
org.opendaylight.yangtools.yang.data.api.InstanceIdentifier toDataDom(InstanceIdentifier<? extends DataObject> path);
- DataObject dataObjectFromDataDom(InstanceIdentifier<? extends DataObject> path, CompositeNode result);
+ DataObject dataObjectFromDataDom(InstanceIdentifier<? extends DataObject> path, CompositeNode result) throws DeserializationException;
- InstanceIdentifier<?> fromDataDom(org.opendaylight.yangtools.yang.data.api.InstanceIdentifier entry);
+ InstanceIdentifier<?> fromDataDom(org.opendaylight.yangtools.yang.data.api.InstanceIdentifier entry) throws DeserializationException;
}
--- /dev/null
+package org.opendaylight.controller.sal.binding.impl.connect.dom;
+
+public class DeserializationException extends Exception {
+
+ public DeserializationException() {
+ }
+
+ public DeserializationException(String message) {
+ super(message);
+ }
+
+ public DeserializationException(Throwable cause) {
+ super(cause);
+ }
+
+ public DeserializationException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public DeserializationException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {
+ super(message, cause, enableSuppression, writableStackTrace);
+ }
+
+}
import javassist.ClassPool;
+import org.junit.After;
import org.junit.Before;
import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
import org.opendaylight.controller.sal.binding.impl.DataBrokerImpl;
import org.opendaylight.controller.sal.binding.impl.connect.dom.BindingIndependentMappingService;
import org.opendaylight.controller.sal.binding.dom.serializer.impl.RuntimeGeneratedMappingServiceImpl;
import org.opendaylight.controller.sal.core.api.data.DataBrokerService;
+import org.opendaylight.controller.sal.core.api.data.DataStore;
+import org.opendaylight.controller.sal.dom.broker.impl.DataStoreStatsWrapper;
import org.opendaylight.controller.sal.dom.broker.impl.HashMapDataStore;
+import org.opendaylight.controller.sal.dom.broker.impl.SchemaAwareDataStoreAdapter;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
import org.reflections.Reflections;
import org.reflections.scanners.ResourcesScanner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.base.Predicate;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
-public abstract class AbstractDataServiceTest {
+public abstract class AbstractDataServiceTest {
+ private static Logger log = LoggerFactory.getLogger(AbstractDataServiceTest.class);
+
protected org.opendaylight.controller.sal.core.api.data.DataProviderService biDataService;
protected DataProviderService baDataService;
-
+
/**
* Workaround for JUNIT sharing classloaders
*
*/
protected static final ClassPool POOL = new ClassPool();
-
+
protected RuntimeGeneratedMappingServiceImpl mappingServiceImpl;
protected BindingIndependentMappingService mappingService;
protected DataBrokerImpl baDataImpl;
protected org.opendaylight.controller.sal.dom.broker.DataBrokerImpl biDataImpl;
protected ListeningExecutorService executor;
protected BindingIndependentDataServiceConnector connectorServiceImpl;
- protected HashMapDataStore dataStore;
-
-
+ protected HashMapDataStore rawDataStore;
+ private SchemaAwareDataStoreAdapter schemaAwareDataStore;
+ private DataStoreStatsWrapper dataStoreStats;
+
+ protected DataStore dataStore;
+
@Before
public void setUp() {
executor = MoreExecutors.sameThreadExecutor();
baDataImpl = new DataBrokerImpl();
baDataService = baDataImpl;
baDataImpl.setExecutor(executor);
-
+
biDataImpl = new org.opendaylight.controller.sal.dom.broker.DataBrokerImpl();
- biDataService = biDataImpl;
+ biDataService = biDataImpl;
biDataImpl.setExecutor(executor);
-
- dataStore = new HashMapDataStore();
- org.opendaylight.yangtools.yang.data.api.InstanceIdentifier treeRoot = org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.builder().toInstance();
+
+ rawDataStore = new HashMapDataStore();
+ schemaAwareDataStore = new SchemaAwareDataStoreAdapter();
+ schemaAwareDataStore.changeDelegate(rawDataStore);
+ dataStoreStats = new DataStoreStatsWrapper(schemaAwareDataStore);
+ dataStore = dataStoreStats;
+
+ org.opendaylight.yangtools.yang.data.api.InstanceIdentifier treeRoot = org.opendaylight.yangtools.yang.data.api.InstanceIdentifier
+ .builder().toInstance();
biDataImpl.registerConfigurationReader(treeRoot, dataStore);
biDataImpl.registerOperationalReader(treeRoot, dataStore);
biDataImpl.registerCommitHandler(treeRoot, dataStore);
-
+
mappingServiceImpl = new RuntimeGeneratedMappingServiceImpl();
mappingServiceImpl.setPool(POOL);
mappingService = mappingServiceImpl;
File pathname = new File("target/gen-classes-debug");
- //System.out.println("Generated classes are captured in " + pathname.getAbsolutePath());
+ // System.out.println("Generated classes are captured in " +
+ // pathname.getAbsolutePath());
mappingServiceImpl.start(null);
- //mappingServiceImpl.getBinding().setClassFileCapturePath(pathname);
-
+ // mappingServiceImpl.getBinding().setClassFileCapturePath(pathname);
+
connectorServiceImpl = new BindingIndependentDataServiceConnector();
connectorServiceImpl.setBaDataService(baDataService);
connectorServiceImpl.setBiDataService(biDataService);
connectorServiceImpl.setMappingService(mappingServiceImpl);
connectorServiceImpl.start();
-
- String[] yangFiles= getModelFilenames();
- if(yangFiles != null && yangFiles.length > 0) {
- mappingServiceImpl.onGlobalContextUpdated(getContext(yangFiles));
+
+ String[] yangFiles = getModelFilenames();
+ if (yangFiles != null && yangFiles.length > 0) {
+ SchemaContext context = getContext(yangFiles);
+ mappingServiceImpl.onGlobalContextUpdated(context);
+ schemaAwareDataStore.onGlobalContextUpdated(context);
}
}
-
- protected String[] getModelFilenames() {
+ protected String[] getModelFilenames() {
return getAllModelFilenames();
}
-
+
public static String[] getAllModelFilenames() {
Predicate<String> predicate = new Predicate<String>() {
@Override
return input.endsWith(".yang");
}
};
- Reflections reflection= new Reflections("META-INF.yang", new ResourcesScanner());
+ Reflections reflection = new Reflections("META-INF.yang", new ResourcesScanner());
Set<String> result = reflection.getResources(predicate);
return (String[]) result.toArray(new String[result.size()]);
}
-
+
public static SchemaContext getContext(String[] yangFiles) {
ClassLoader loader = AbstractDataServiceTest.class.getClassLoader();
Set<Module> modules = parser.parseYangModelsFromStreams(streams);
return parser.resolveSchemaContext(modules);
}
+
+ @After
+ public void afterTest() {
+
+ log.info("BIDataStore Statistics: Configuration Read Count: {} TotalTime: {} ns AverageTime (ns): {} ns",
+ dataStoreStats.getConfigurationReadCount(), dataStoreStats.getConfigurationReadTotalTime(),
+ dataStoreStats.getConfigurationReadAverageTime());
+
+ log.info("BIDataStore Statistics: Operational Read Count: {} TotalTime: {} ns AverageTime (ns): {} ns",
+ dataStoreStats.getOperationalReadCount(), dataStoreStats.getOperationalReadTotalTime(),
+ dataStoreStats.getOperationalReadAverageTime());
+
+ log.info("BIDataStore Statistics: Request Commit Count: {} TotalTime: {} ns AverageTime (ns): {} ns",
+ dataStoreStats.getRequestCommitCount(), dataStoreStats.getRequestCommitTotalTime(),
+ dataStoreStats.getRequestCommitAverageTime());
+
+ }
}
public interface DataChangeEvent<P,D> extends DataChange<P, D>, Immutable {
+ /**
+ * Returns a orignal subtree of data, which starts at the path
+ * where listener was registered.
+ *
+ */
+ D getOriginalConfigurationSubtree();
+
/**
* Returns a new subtree of data, which starts at the path
* where listener was registered.
*
*/
+ D getOriginalOperationalSubtree();
+
+
+
+ /**
+ * Returns a updated subtree of data, which starts at the path
+ * where listener was registered.
+ *
+ */
D getUpdatedConfigurationSubtree();
/**
- * Returns a new subtree of data, which starts at the path
+ * Returns a udpated subtree of data, which starts at the path
* where listener was registered.
*
*/
import org.opendaylight.controller.md.sal.common.api.RegistrationListener
import org.opendaylight.yangtools.concepts.util.ListenerRegistry
import java.util.concurrent.atomic.AtomicLong
+import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent
abstract class AbstractDataBroker<P extends Path<P>, D, DCL extends DataChangeListener<P, D>> implements DataModificationTransactionFactory<P, D>, //
DataReader<P, D>, //
override final def registerDataChangeListener(P path, DCL listener) {
val reg = new DataChangeListenerRegistration(path, listener, this);
listeners.put(path, reg);
+ val initialConfig = dataReadRouter.readConfigurationData(path);
+ val initialOperational = dataReadRouter.readOperationalData(path);
+ val event = createInitialListenerEvent(path,initialConfig,initialOperational);
+ listener.onDataChanged(event);
return reg;
}
return ret;
}
+ protected def DataChangeEvent<P,D> createInitialListenerEvent(P path,D initialConfig,D initialOperational) {
+ return new InitialDataChangeEventImpl<P, D>(initialConfig,initialOperational);
+
+ }
protected final def removeListener(DataChangeListenerRegistration<P, D, DCL> registration) {
listeners.remove(registration.path, registration);
private final DataChange<P, D> dataChange;
private final D originalConfigurationSubtree;
+
+
private final D originalOperationalSubtree;
private final D updatedOperationalSubtree;
private final D updatedConfigurationSubtree;
this.updatedConfigurationSubtree = updatedConfigurationSubtree;
}
+ @Override
+ public D getOriginalConfigurationSubtree() {
+ return originalConfigurationSubtree;
+ }
+
+ @Override
+ public D getOriginalOperationalSubtree() {
+ return originalOperationalSubtree;
+ }
+
@Override
public D getUpdatedOperationalSubtree() {
return updatedOperationalSubtree;
--- /dev/null
+package org.opendaylight.controller.md.sal.common.impl.service;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+
+import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
+
+public class InitialDataChangeEventImpl<P,D> implements DataChangeEvent<P, D> {
+
+ private final D originalOperationalTree;
+ private final D originalConfigurationTree;
+
+ public InitialDataChangeEventImpl(D configTree, D operTree) {
+ originalConfigurationTree = configTree;
+ originalOperationalTree = operTree;
+ }
+
+ @Override
+ public Map<P, D> getCreatedConfigurationData() {
+ return Collections.emptyMap();
+ }
+
+ @Override
+ public Map<P, D> getCreatedOperationalData() {
+ return Collections.emptyMap();
+ }
+
+ @Override
+ public Map<P, D> getOriginalConfigurationData() {
+ return Collections.emptyMap();
+ }
+ @Override
+ public Map<P, D> getOriginalOperationalData() {
+ return Collections.emptyMap();
+ }
+ @Override
+ public Set<P> getRemovedConfigurationData() {
+ return Collections.emptySet();
+ }
+ @Override
+ public Set<P> getRemovedOperationalData() {
+ return Collections.emptySet();
+ }
+ @Override
+ public Map<P, D> getUpdatedConfigurationData() {
+ return Collections.emptyMap();
+ }
+
+ @Override
+ public D getUpdatedConfigurationSubtree() {
+ return originalConfigurationTree;
+ }
+ @Override
+ public D getUpdatedOperationalSubtree() {
+ return originalOperationalTree;
+ }
+
+ @Override
+ public D getOriginalConfigurationSubtree() {
+ return originalConfigurationTree;
+ }
+
+ @Override
+ public D getOriginalOperationalSubtree() {
+ return originalOperationalTree;
+ }
+
+ @Override
+ public Map<P, D> getUpdatedOperationalData() {
+ return Collections.emptyMap();
+ }
+
+
+}
--- /dev/null
+package org.opendaylight.controller.md.sal.common.impl.util;
+
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+
+import org.opendaylight.yangtools.concepts.Delegator;
+
+import com.google.common.base.Preconditions;
+
+public class AbstractLockableDelegator<T> implements Delegator<T> {
+
+ private final ReentrantReadWriteLock delegateLock = new ReentrantReadWriteLock();
+ private final ReadLock delegateReadLock = delegateLock.readLock();
+ private final WriteLock delegateWriteLock = delegateLock.writeLock();
+
+
+ protected Lock getDelegateReadLock() {
+ return delegateReadLock;
+ }
+
+ private T delegate;
+
+ public AbstractLockableDelegator() {
+ // NOOP
+ }
+
+ public AbstractLockableDelegator(T initialDelegate) {
+ delegate = initialDelegate;
+ }
+
+ @Override
+ public T getDelegate() {
+ try {
+ delegateReadLock.lock();
+ return delegate;
+ } finally {
+ delegateReadLock.unlock();
+ }
+ }
+
+ public T retrieveDelegate() {
+ try {
+ delegateReadLock.lock();
+ Preconditions.checkState(delegate != null,"Delegate is null");
+ return delegate;
+ } finally {
+ delegateReadLock.unlock();
+ }
+ }
+
+ /**
+ *
+ * @param newDelegate
+ * @return oldDelegate
+ */
+ public final T changeDelegate(T newDelegate) {
+ try {
+ delegateWriteLock.lock();
+ T oldDelegate = delegate;
+ delegate = newDelegate;
+ onDelegateChanged(oldDelegate, newDelegate);
+ return oldDelegate;
+ } finally {
+ delegateWriteLock.unlock();
+ }
+ }
+
+
+ protected void onDelegateChanged(T oldDelegate, T newDelegate) {
+ // NOOP in abstract calss;
+ }
+}
public interface DataStore extends //
DataReader<InstanceIdentifier, CompositeNode>,
DataCommitHandler<InstanceIdentifier, CompositeNode> {
+
+
+ Iterable<InstanceIdentifier> getStoredConfigurationPaths();
+ Iterable<InstanceIdentifier> getStoredOperationalPaths();
+
+ boolean containsConfigurationPath(InstanceIdentifier path);
+ boolean containsOperationalPath(InstanceIdentifier path);
}
import org.opendaylight.controller.sal.dom.broker.BrokerConfigActivator;
import org.opendaylight.controller.sal.dom.broker.BrokerImpl;
import org.osgi.framework.BundleContext;
+import static com.google.common.base.Preconditions.*;
/**
*
@Override
public void validate(){
super.validate();
- // Add custom validation for module attributes here.
+ checkArgument(getDataStore() != null, "Data Store needs to be provided for DomBroker");
}
+
+
@Override
public java.lang.AutoCloseable createInstance() {
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl
import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier
import org.opendaylight.controller.sal.core.api.data.DataStore
+import org.opendaylight.controller.sal.dom.broker.impl.SchemaAwareDataStoreAdapter
class BrokerConfigActivator implements AutoCloseable {
private var SchemaServiceImpl schemaService;
private var DataBrokerImpl dataService;
private var MountPointManagerImpl mountService;
+
+ SchemaAwareDataStoreAdapter wrappedStore
public def void start(BrokerImpl broker,DataStore store,BundleContext context) {
val emptyProperties = new Hashtable<String, String>();
dataReg = context.registerService(DataBrokerService, dataService, emptyProperties);
dataProviderReg = context.registerService(DataProviderService, dataService, emptyProperties);
- dataService.registerConfigurationReader(ROOT, store);
- dataService.registerCommitHandler(ROOT, store);
- dataService.registerOperationalReader(ROOT, store);
+ wrappedStore = new SchemaAwareDataStoreAdapter();
+ wrappedStore.changeDelegate(store);
+ wrappedStore.setValidationEnabled(false);
+
+ dataService.registerConfigurationReader(ROOT, wrappedStore);
+ dataService.registerCommitHandler(ROOT, wrappedStore);
+ dataService.registerOperationalReader(ROOT, wrappedStore);
mountService = new MountPointManagerImpl();
mountService.setDataBroker(dataService);
import java.util.concurrent.atomic.AtomicLong;
+import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
import org.opendaylight.controller.md.sal.common.api.data.DataReader;
import org.opendaylight.controller.md.sal.common.impl.service.AbstractDataBroker;
import org.opendaylight.controller.sal.common.DataStoreIdentifier;
import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier
import org.opendaylight.yangtools.yang.data.api.CompositeNode
import org.opendaylight.controller.md.sal.common.api.data.DataReader
+import org.opendaylight.yangtools.yang.common.QName
+import java.net.URI
+import java.util.List
+import org.opendaylight.yangtools.yang.data.api.Node
+import java.util.ArrayList
+import org.opendaylight.yangtools.yang.data.impl.SimpleNodeTOImpl
+import java.util.Map
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.PathArgument
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.NodeIdentifier
+import org.opendaylight.yangtools.yang.data.api.SimpleNode
+import java.util.Collections
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.NodeIdentifierWithPredicates
+import java.util.HashMap
+import static com.google.common.base.Preconditions.*;
+import java.util.Collection
+import java.util.Set
+import java.util.Map.Entry
+import org.slf4j.LoggerFactory
+import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl
class DataReaderRouter extends AbstractDataReadRouter<InstanceIdentifier, CompositeNode> {
+ private static val LOG = LoggerFactory.getLogger(DataReaderRouter);
+ private static val NETCONF_NAMESPACE = URI.create("urn:ietf:params:xml:ns:netconf:base:1.0")
+ private static val NETCONF_DATA = new QName(NETCONF_NAMESPACE,"data");
override protected merge(InstanceIdentifier path, Iterable<CompositeNode> data) {
+ val pathArgument = path.path.last;
+ var empty = true;
+ var name = pathArgument?.nodeType;
+ val nodes = new ArrayList<Node<?>>();
+ val keyNodes = new HashMap<QName, SimpleNode<?>>();
val iterator = data.iterator;
- if(iterator.hasNext) {
- return data.iterator.next
+ for(dataBit : data) {
+ try {
+ if(pathArgument != null && dataBit != null) {
+ empty = false;
+ val keyNodesLocal = getKeyNodes(pathArgument,dataBit);
+ nodes.addAll(dataBit.childrenWithout(keyNodesLocal.entrySet));
+ } else if (dataBit != null) {
+ empty = false;
+ nodes.addAll(dataBit.children)
+ }
+ } catch (IllegalStateException e) {
+ LOG.error("BUG: Readed data for path {} was invalid",path,e);
+ }
}
- return null;
+ if(empty) {
+ return null;
+ }
+ /**
+ * Reading from Root
+ *
+ */
+ if(pathArgument == null) {
+ return new CompositeNodeTOImpl(NETCONF_DATA,null,nodes);
+ }
+ val finalNodes = new ArrayList<Node<?>>();
+ finalNodes.addAll(keyNodes.values);
+ finalNodes.addAll(nodes);
+ return new CompositeNodeTOImpl(name,null,finalNodes);
}
-
+
+
+
+ dispatch def Map<QName, SimpleNode<?>> getKeyNodes(PathArgument argument, CompositeNode node) {
+ return Collections.emptyMap();
+ }
+
+ dispatch def getKeyNodes(NodeIdentifierWithPredicates argument, CompositeNode node) {
+ val ret = new HashMap<QName, SimpleNode<?>>();
+ for (keyValue : argument.keyValues.entrySet) {
+ val simpleNode = node.getSimpleNodesByName(keyValue.key);
+ if(simpleNode !== null && !simpleNode.empty) {
+ checkState(simpleNode.size <= 1,"Only one simple node for key $s is allowed in node $s",keyValue.key,node);
+ checkState(simpleNode.get(0).value == keyValue.value,"Key node must equals to instance identifier value");
+ ret.put(keyValue.key,simpleNode.get(0));
+ }
+ val compositeNode = node.getCompositesByName(keyValue.key);
+ checkState(compositeNode === null || compositeNode.empty,"Key node must be Simple Node, not composite node.");
+ }
+ return ret;
+ }
+
+ def Collection<? extends Node<?>> childrenWithout(CompositeNode node, Set<Entry<QName, SimpleNode<?>>> entries) {
+ if(entries.empty) {
+ return node.children;
+ }
+ val filteredNodes = new ArrayList<Node<?>>();
+ for(scannedNode : node.children) {
+ if(!entries.contains(scannedNode.nodeType)) {
+ filteredNodes.add(scannedNode);
+ }
+ }
+ return filteredNodes;
+ }
+
}
--- /dev/null
+package org.opendaylight.controller.sal.dom.broker.impl;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.opendaylight.controller.md.sal.common.api.data.DataModification;
+import org.opendaylight.controller.sal.core.api.data.DataStore;
+import org.opendaylight.yangtools.concepts.Delegator;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
+
+public class DataStoreStatsWrapper implements Delegator<DataStore>, DataStore {
+
+ private final DataStore delegate;
+
+ private AtomicLong cfgReadCount = new AtomicLong();
+ private AtomicLong cfgReadTimeTotal = new AtomicLong();
+
+ private AtomicLong operReadCount = new AtomicLong();
+ private AtomicLong operReadTimeTotal = new AtomicLong();
+
+ private AtomicLong requestCommitCount = new AtomicLong();
+ private AtomicLong requestCommitTimeTotal = new AtomicLong();
+
+ public DataStoreStatsWrapper(DataStore store) {
+ delegate = store;
+ }
+
+ @Override
+ public DataStore getDelegate() {
+ return delegate;
+ }
+
+ @Override
+ public CompositeNode readConfigurationData(InstanceIdentifier path) {
+ cfgReadCount.incrementAndGet();
+ final long startTime = System.nanoTime();
+ try {
+ return delegate.readConfigurationData(path);
+ } finally {
+ final long endTime = System.nanoTime();
+ final long runTime = endTime - startTime;
+ cfgReadTimeTotal.addAndGet(runTime);
+ }
+ }
+
+ @Override
+ public CompositeNode readOperationalData(InstanceIdentifier path) {
+ operReadCount.incrementAndGet();
+ final long startTime = System.nanoTime();
+ try {
+ return delegate.readOperationalData(path);
+ } finally {
+ final long endTime = System.nanoTime();
+ final long runTime = endTime - startTime;
+ cfgReadTimeTotal.addAndGet(runTime);
+ }
+ }
+
+ public DataCommitTransaction<InstanceIdentifier, CompositeNode> requestCommit(
+ DataModification<InstanceIdentifier, CompositeNode> modification) {
+ requestCommitCount.incrementAndGet();
+ final long startTime = System.nanoTime();
+ try {
+ return delegate.requestCommit(modification);
+ } finally {
+ final long endTime = System.nanoTime();
+ final long runTime = endTime - startTime;
+ requestCommitTimeTotal.addAndGet(runTime);
+ }
+ };
+
+ @Override
+ public boolean containsConfigurationPath(InstanceIdentifier path) {
+ return delegate.containsConfigurationPath(path);
+ }
+
+ public Iterable<InstanceIdentifier> getStoredConfigurationPaths() {
+ return delegate.getStoredConfigurationPaths();
+ }
+
+ public Iterable<InstanceIdentifier> getStoredOperationalPaths() {
+ return delegate.getStoredOperationalPaths();
+ }
+
+ public boolean containsOperationalPath(InstanceIdentifier path) {
+ return delegate.containsOperationalPath(path);
+ }
+
+ public final long getConfigurationReadCount() {
+ return cfgReadCount.get();
+ }
+
+ public final long getOperationalReadCount() {
+ return operReadCount.get();
+ }
+
+ public final long getRequestCommitCount() {
+ return requestCommitCount.get();
+ }
+
+ public final long getConfigurationReadTotalTime() {
+ return cfgReadTimeTotal.get();
+ }
+
+ public final long getOperationalReadTotalTime() {
+ return operReadTimeTotal.get();
+ }
+
+ public final long getRequestCommitTotalTime() {
+ return requestCommitTimeTotal.get();
+ }
+
+ public final long getConfigurationReadAverageTime() {
+ long readCount = cfgReadCount.get();
+ if(readCount == 0) {
+ return 0;
+ }
+ return cfgReadTimeTotal.get() / readCount;
+ }
+
+ public final long getOperationalReadAverageTime() {
+ long readCount = operReadCount.get();
+ if(readCount == 0) {
+ return 0;
+ }
+ return operReadTimeTotal.get() / readCount;
+ }
+
+ public final long getRequestCommitAverageTime() {
+ long count = requestCommitCount.get();
+ if(count == 0) {
+ return 0;
+ }
+ return requestCommitTimeTotal.get() / count;
+ }
+
+}
+++ /dev/null
-package org.opendaylight.controller.sal.dom.broker.impl
-
-import org.opendaylight.yangtools.yang.data.api.CompositeNode
-import java.util.Map
-import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier
-import java.util.Map.Entry
-import java.util.HashSet
-import java.util.ArrayList
-import org.opendaylight.yangtools.yang.data.api.Node
-import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl
-
-class DataUtils {
-
- static def CompositeNode read(Map<InstanceIdentifier, CompositeNode> map, InstanceIdentifier path) {
- val root = map.get(path);
- val childs = map.getChilds(path);
- if(root === null && childs.empty) {
- return null;
- }
-
- return merge(path, root, childs);
- }
-
- static def CompositeNode merge(InstanceIdentifier path, CompositeNode node,
- HashSet<Entry<InstanceIdentifier, CompositeNode>> entries) {
- val it = new ArrayList<Node<?>>();
- val qname = path.path.last.nodeType;
- if (node != null) {
- addAll(node.children);
- }
- for (entry : entries) {
- val nesting = entry.key.path.size - path.path.size;
- if (nesting === 1) {
- add(entry.value);
- }
- }
- return new CompositeNodeTOImpl(qname, null, it);
- }
-
- static def getChilds(Map<InstanceIdentifier, CompositeNode> map, InstanceIdentifier path) {
- val it = new HashSet<Entry<InstanceIdentifier, CompositeNode>>();
- for (entry : map.entrySet) {
- if (path.contains(entry.key)) {
- add(entry);
- }
- }
- return it;
- }
-
-}
import java.util.Collections
import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier
import org.opendaylight.yangtools.yang.data.api.CompositeNode
-import static extension org.opendaylight.controller.sal.dom.broker.impl.DataUtils.*;
import org.opendaylight.controller.sal.core.api.data.DataStore
import java.util.HashSet
class HashMapDataStore implements DataStore, AutoCloseable {
+
val Map<InstanceIdentifier, CompositeNode> configuration = new ConcurrentHashMap();
val Map<InstanceIdentifier, CompositeNode> operational = new ConcurrentHashMap();
+
+
+
+ override containsConfigurationPath(InstanceIdentifier path) {
+ throw new UnsupportedOperationException("TODO: auto-generated method stub")
+
+ }
+
+ override containsOperationalPath(InstanceIdentifier path) {
+ throw new UnsupportedOperationException("TODO: auto-generated method stub")
+ }
+
+ override getStoredConfigurationPaths() {
+ configuration.keySet
+ }
+
+ override getStoredOperationalPaths() {
+ operational.keySet
+ }
override readConfigurationData(InstanceIdentifier path) {
- configuration.read(path);
+ configuration.get(path);
}
override readOperationalData(InstanceIdentifier path) {
- operational.read(path);
+ operational.get(path);
}
-
--- /dev/null
+package org.opendaylight.controller.sal.dom.broker.impl
+
+import org.opendaylight.yangtools.yang.model.api.SchemaContext
+import org.opendaylight.yangtools.yang.data.api.CompositeNode
+
+class SchemaAwareDataMerger {
+
+ private SchemaContext schema;
+
+
+
+
+}
\ No newline at end of file
--- /dev/null
+package org.opendaylight.controller.sal.dom.broker.impl;
+
+import java.awt.PageAttributes.OriginType;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.opendaylight.controller.md.sal.common.api.data.DataModification;
+import org.opendaylight.controller.md.sal.common.api.data.DataReader;
+import org.opendaylight.controller.md.sal.common.impl.util.AbstractLockableDelegator;
+import org.opendaylight.controller.sal.core.api.data.DataStore;
+import org.opendaylight.controller.sal.core.api.model.SchemaServiceListener;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.FluentIterable;
+
+import static com.google.common.base.Preconditions.*;
+
+public class SchemaAwareDataStoreAdapter extends AbstractLockableDelegator<DataStore> implements //
+ DataStore, //
+ SchemaServiceListener, //
+ AutoCloseable {
+
+ private final static Logger LOG = LoggerFactory.getLogger(SchemaAwareDataStoreAdapter.class);
+
+ private SchemaContext schema = null;
+ private boolean validationEnabled = false;
+ private SchemaAwareDataMerger dataMerger = null;
+ private DataReader<InstanceIdentifier, CompositeNode> reader = new MergeFirstLevelReader();
+
+ @Override
+ public boolean containsConfigurationPath(InstanceIdentifier path) {
+ try {
+ getDelegateReadLock().lock();
+ return getDelegate().containsConfigurationPath(path);
+
+ } finally {
+ getDelegateReadLock().unlock();
+ }
+ }
+
+ @Override
+ public boolean containsOperationalPath(InstanceIdentifier path) {
+ try {
+ getDelegateReadLock().lock();
+ return getDelegate().containsOperationalPath(path);
+
+ } finally {
+ getDelegateReadLock().unlock();
+ }
+ }
+
+ @Override
+ public Iterable<InstanceIdentifier> getStoredConfigurationPaths() {
+ try {
+ getDelegateReadLock().lock();
+ return getDelegate().getStoredConfigurationPaths();
+
+ } finally {
+ getDelegateReadLock().unlock();
+ }
+ }
+
+ @Override
+ public Iterable<InstanceIdentifier> getStoredOperationalPaths() {
+ try {
+ getDelegateReadLock().lock();
+ return getDelegate().getStoredOperationalPaths();
+
+ } finally {
+ getDelegateReadLock().unlock();
+ }
+ }
+
+ @Override
+ public CompositeNode readConfigurationData(InstanceIdentifier path) {
+ return reader.readConfigurationData(path);
+ }
+
+ @Override
+ public CompositeNode readOperationalData(InstanceIdentifier path) {
+ return reader.readOperationalData(path);
+ }
+
+ @Override
+ public org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction<InstanceIdentifier, CompositeNode> requestCommit(
+ DataModification<InstanceIdentifier, CompositeNode> modification) {
+ validateAgainstSchema(modification);
+ DataModification<InstanceIdentifier, CompositeNode> cleanedUp = prepareMergedTransaction(modification);
+ return retrieveDelegate().requestCommit(cleanedUp);
+ }
+
+ public boolean isValidationEnabled() {
+ return validationEnabled;
+ }
+
+ public void setValidationEnabled(boolean validationEnabled) {
+ this.validationEnabled = validationEnabled;
+ }
+
+ private void validateAgainstSchema(DataModification<InstanceIdentifier, CompositeNode> modification) {
+ if (!validationEnabled) {
+ return;
+ }
+
+ if (schema == null) {
+ LOG.info("Validation not performed for {}. Reason: YANG Schema not present.", modification.getIdentifier());
+ return;
+ }
+ }
+
+ @Override
+ protected void onDelegateChanged(DataStore oldDelegate, DataStore newDelegate) {
+ // NOOP
+ }
+
+ @Override
+ public void onGlobalContextUpdated(SchemaContext context) {
+ this.schema = context;
+ }
+
+ @Override
+ public void close() throws Exception {
+ this.schema = null;
+ }
+
+ private DataModification<InstanceIdentifier, CompositeNode> prepareMergedTransaction(
+ DataModification<InstanceIdentifier, CompositeNode> original) {
+ // NOOP for now
+ return original;
+ }
+
+ private final Comparator<Entry<InstanceIdentifier, CompositeNode>> preparationComparator = new Comparator<Entry<InstanceIdentifier, CompositeNode>>() {
+ @Override
+ public int compare(Entry<InstanceIdentifier, CompositeNode> o1, Entry<InstanceIdentifier, CompositeNode> o2) {
+ InstanceIdentifier o1Key = o1.getKey();
+ InstanceIdentifier o2Key = o2.getKey();
+ return Integer.compare(o1Key.getPath().size(), o2Key.getPath().size());
+ }
+ };
+
+ private class MergeFirstLevelReader implements DataReader<InstanceIdentifier, CompositeNode> {
+
+ @Override
+ public CompositeNode readConfigurationData(final InstanceIdentifier path) {
+ getDelegateReadLock().lock();
+ try {
+ if (path.getPath().isEmpty()) {
+ return null;
+ }
+ QName qname = null;
+ CompositeNode original = getDelegate().readConfigurationData(path);
+ ArrayList<Node<?>> childNodes = new ArrayList<Node<?>>();
+ if (original != null) {
+ childNodes.addAll(original.getChildren());
+ qname = original.getNodeType();
+ } else {
+ qname = path.getPath().get(path.getPath().size() - 1).getNodeType();
+ }
+
+ FluentIterable<InstanceIdentifier> directChildren = FluentIterable.from(getStoredConfigurationPaths())
+ .filter(new Predicate<InstanceIdentifier>() {
+ @Override
+ public boolean apply(InstanceIdentifier input) {
+ if (path.contains(input)) {
+ int nesting = input.getPath().size() - path.getPath().size();
+ if (nesting == 1) {
+ return true;
+ }
+ }
+ return false;
+ }
+ });
+ for (InstanceIdentifier instanceIdentifier : directChildren) {
+ childNodes.add(getDelegate().readConfigurationData(instanceIdentifier));
+ }
+ if (original == null && childNodes.isEmpty()) {
+ return null;
+ }
+
+ return new CompositeNodeTOImpl(qname, null, childNodes);
+ } finally {
+ getDelegateReadLock().unlock();
+ }
+ }
+
+ @Override
+ public CompositeNode readOperationalData(final InstanceIdentifier path) {
+ getDelegateReadLock().lock();
+ try {
+ if (path.getPath().isEmpty()) {
+ return null;
+ }
+ QName qname = null;
+ CompositeNode original = getDelegate().readOperationalData(path);
+ ArrayList<Node<?>> childNodes = new ArrayList<Node<?>>();
+ if (original != null) {
+ childNodes.addAll(original.getChildren());
+ qname = original.getNodeType();
+ } else {
+ qname = path.getPath().get(path.getPath().size() - 1).getNodeType();
+ }
+
+ FluentIterable<InstanceIdentifier> directChildren = FluentIterable.from(getStoredOperationalPaths())
+ .filter(new Predicate<InstanceIdentifier>() {
+ @Override
+ public boolean apply(InstanceIdentifier input) {
+ if (path.contains(input)) {
+ int nesting = input.getPath().size() - path.getPath().size();
+ if (nesting == 1) {
+ return true;
+ }
+ }
+ return false;
+ }
+ });
+
+ for (InstanceIdentifier instanceIdentifier : directChildren) {
+ childNodes.add(getDelegate().readOperationalData(instanceIdentifier));
+ }
+ if (original == null && childNodes.isEmpty()) {
+ return null;
+ }
+
+ return new CompositeNodeTOImpl(qname, null, childNodes);
+ } finally {
+ getDelegateReadLock().unlock();
+ }
+ }
+ }
+}
<groupId>org.eclipse.xtend</groupId>
<artifactId>org.eclipse.xtend.lib</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-threadgroup-config</artifactId>
+ <version>0.2.3-SNAPSHOT</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-client</artifactId>
<groupId>${project.groupId}</groupId>
<artifactId>config-api</artifactId>
<version>${netconf.version}</version>
- <scope>test</scope>
+ <scope>provided</scope>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<version>${netconf.version}</version>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>netconf-api</artifactId>
- <version>${netconf.version}</version>
- <scope>test</scope>
- </dependency>
<dependency>
<groupId>org.opendaylight.bgpcep</groupId>
<artifactId>util</artifactId>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-client</artifactId>
- <scope>test</scope>
<version>${netconf.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
+ <version>2010.09.24.2-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>threadpool-config-api</artifactId>
+ <version>0.2.3-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-config-api</artifactId>
+ <version>0.2.3-SNAPSHOT</version>
+ </dependency>
</dependencies>
<packaging>bundle</packaging>
<plugin>
<groupId>org.apache.felix</groupId>
<artifactId>maven-bundle-plugin</artifactId>
- <configuration>
- <instructions>
- <Bundle-Activator>org.opendaylight.controller.sal.connect.netconf.NetconfProvider</Bundle-Activator>
- </instructions>
- </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>0.5.9-SNAPSHOT</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>
+ org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+ </codeGeneratorClass>
+ <outputBaseDir>${project.build.directory}/generated-sources/config</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>
+ urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang
+ </namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>0.2.3-SNAPSHOT</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <version>1.8</version>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>${project.build.directory}/generated-sources/config</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.eclipse.xtend</groupId>
+ <artifactId>xtend-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-netconf-connector yang module local name: sal-netconf-connector
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Mon Nov 18 09:44:16 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.connector.netconf;
+
+import io.netty.channel.EventLoopGroup;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+
+import javax.net.ssl.SSLContext;
+
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
+import org.opendaylight.controller.sal.connect.netconf.NetconfDevice;
+import org.osgi.framework.BundleContext;
+
+import static com.google.common.base.Preconditions.*;
+
+import com.google.common.base.Optional;
+import com.google.common.net.InetAddresses;
+
+/**
+*
+*/
+public final class NetconfConnectorModule extends org.opendaylight.controller.config.yang.md.sal.connector.netconf.AbstractNetconfConnectorModule
+{
+
+ private BundleContext bundleContext;
+
+ public NetconfConnectorModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public NetconfConnectorModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, NetconfConnectorModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void validate(){
+ super.validate();
+ checkState(getAddress() != null,"Address must be set.");
+ //checkState(getAddress().getIpv4Address() != null || getAddress().getIpv6Address() != null,"Address must be set.");
+ checkState(getPort() != null,"Port must be set.");
+ checkState(getDomRegistry() != null,"Dom Registry must be provided.");
+ }
+
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+
+ getDomRegistryDependency();
+ NetconfDevice device = new NetconfDevice(getIdentifier().getInstanceName());
+ String addressValue = getAddress();
+
+
+ /*
+ * Uncomment after Switch to IP Address
+ if(getAddress().getIpv4Address() != null) {
+ addressValue = getAddress().getIpv4Address().getValue();
+ } else {
+ addressValue = getAddress().getIpv6Address().getValue();
+ }
+
+ */
+ InetAddress addr = InetAddresses.forString(addressValue);
+ InetSocketAddress socketAddress = new InetSocketAddress(addr , getPort().intValue());
+ device.setSocketAddress(socketAddress);
+
+ EventLoopGroup bossGroup = getBossThreadGroupDependency();
+ EventLoopGroup workerGroup = getWorkerThreadGroupDependency();
+ Optional<SSLContext> maybeContext = Optional.absent();
+ NetconfClientDispatcher dispatcher = new NetconfClientDispatcher(maybeContext , bossGroup, workerGroup);
+
+ getDomRegistryDependency().registerProvider(device, bundleContext);
+
+ device.start(dispatcher);
+ return device;
+ }
+
+ public void setBundleContext(BundleContext bundleContext) {
+ this.bundleContext = bundleContext;
+ }
+}
--- /dev/null
+/**
+ * Generated file
+
+ * Generated from: yang module name: opendaylight-sal-netconf-connector yang module local name: sal-netconf-connector
+ * Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+ * Generated at: Mon Nov 18 09:44:16 CET 2013
+ *
+ * Do not modify this file unless it is present under src/main directory
+ */
+package org.opendaylight.controller.config.yang.md.sal.connector.netconf;
+
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
+import org.opendaylight.controller.config.spi.Module;
+import org.osgi.framework.BundleContext;
+
+/**
+*
+*/
+public class NetconfConnectorModuleFactory extends
+ org.opendaylight.controller.config.yang.md.sal.connector.netconf.AbstractNetconfConnectorModuleFactory {
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver,
+ DynamicMBeanWithInstance old, BundleContext bundleContext) throws Exception {
+ NetconfConnectorModule module = (NetconfConnectorModule) super.createModule(instanceName, dependencyResolver,
+ old, bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver, BundleContext bundleContext) {
+ NetconfConnectorModule module = (NetconfConnectorModule) super.createModule(instanceName, dependencyResolver,
+ bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
+}
import java.util.Collections
import org.opendaylight.controller.netconf.client.NetconfClientDispatcher
import org.opendaylight.yangtools.concepts.Registration
+import org.opendaylight.controller.sal.core.api.Provider
+import org.opendaylight.controller.sal.core.api.Broker.ProviderSession
+import org.opendaylight.controller.sal.core.api.mount.MountProvisionService
+import static org.opendaylight.controller.sal.connect.netconf.InventoryUtils.*;
+import org.opendaylight.controller.sal.core.api.data.DataBrokerService
+import org.opendaylight.controller.sal.core.api.data.DataModificationTransaction
+import org.opendaylight.yangtools.yang.data.impl.SimpleNodeTOImpl
+import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl
-class NetconfDevice implements DataReader<InstanceIdentifier, CompositeNode>, RpcImplementation {
+class NetconfDevice implements Provider, DataReader<InstanceIdentifier, CompositeNode>, RpcImplementation, AutoCloseable {
var NetconfClient client;
var InetSocketAddress socketAddress;
@Property
- val MountProvisionInstance mountInstance;
+ var MountProvisionInstance mountInstance;
@Property
- val InstanceIdentifier path;
-
- Registration<DataReader<InstanceIdentifier,CompositeNode>> operReaderReg
-
- Registration<DataReader<InstanceIdentifier,CompositeNode>> confReaderReg
-
- public new(MountProvisionInstance mount,InstanceIdentifier path) {
- _mountInstance = mount;
- _path = path;
+ var InstanceIdentifier path;
+
+ Registration<DataReader<InstanceIdentifier, CompositeNode>> operReaderReg
+
+ Registration<DataReader<InstanceIdentifier, CompositeNode>> confReaderReg
+
+ String name
+
+ MountProvisionService mountService
+
+ public new(String name) {
+ this.name = name;
+ this.path = InstanceIdentifier.builder(INVENTORY_PATH).nodeWithKey(INVENTORY_NODE,
+ Collections.singletonMap(INVENTORY_ID, name)).toInstance;
}
def start(NetconfClientDispatcher dispatcher) {
- client = new NetconfClient("sal-netconf-connector", socketAddress, dispatcher);
-
- confReaderReg = mountInstance.registerConfigurationReader(path,this);
- operReaderReg = mountInstance.registerOperationalReader(path,this);
+ client = new NetconfClient(name, socketAddress, dispatcher);
+ confReaderReg = mountInstance.registerConfigurationReader(path, this);
+ operReaderReg = mountInstance.registerOperationalReader(path, this);
}
override readConfigurationData(InstanceIdentifier path) {
return result.toRpcResult();
}
+ override getProviderFunctionality() {
+ Collections.emptySet
+ }
+
+ override onSessionInitiated(ProviderSession session) {
+ val dataBroker = session.getService(DataBrokerService);
+
+
+
+ val transaction = dataBroker.beginTransaction
+ if(transaction.operationalNodeNotExisting) {
+ transaction.putOperationalData(path,nodeWithId)
+ }
+ if(transaction.configurationNodeNotExisting) {
+ transaction.putConfigurationData(path,nodeWithId)
+ }
+ transaction.commit().get();
+ mountService = session.getService(MountProvisionService);
+ mountInstance = mountService.createOrGetMountPoint(path);
+ }
+
+ def getNodeWithId() {
+ val id = new SimpleNodeTOImpl(INVENTORY_ID,null,name);
+ return new CompositeNodeTOImpl(INVENTORY_NODE,null,Collections.singletonList(id));
+ }
+
+ def boolean configurationNodeNotExisting(DataModificationTransaction transaction) {
+ return null === transaction.readConfigurationData(path);
+ }
+
+ def boolean operationalNodeNotExisting(DataModificationTransaction transaction) {
+ return null === transaction.readOperationalData(path);
+ }
+
def Node<?> findNode(CompositeNode node, InstanceIdentifier identifier) {
var Node<?> current = node;
}
return current;
}
-
- public def stop() {
+
+ override close() {
confReaderReg?.close()
operReaderReg?.close()
+ client?.close()
}
}
+++ /dev/null
-package org.opendaylight.controller.sal.connect.netconf
-
-import org.opendaylight.controller.sal.core.api.Broker.ProviderSession
-import org.opendaylight.controller.sal.core.api.mount.MountProvisionService
-import org.opendaylight.controller.md.sal.common.api.data.DataProvisionService
-import org.opendaylight.controller.sal.core.api.data.DataProviderService
-import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier
-import org.opendaylight.yangtools.yang.common.QName
-import static org.opendaylight.controller.sal.connect.netconf.InventoryUtils.*;
-import static extension org.opendaylight.controller.sal.connect.netconf.NetconfInventoryUtils.*;
-
-import org.opendaylight.controller.sal.core.api.data.DataChangeListener
-import org.opendaylight.yangtools.yang.data.api.CompositeNode
-import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent
-import java.util.Map
-import java.util.concurrent.ConcurrentHashMap
-import org.opendaylight.controller.sal.core.api.mount.MountProvisionInstance
-import org.opendaylight.controller.netconf.client.NetconfClientDispatcher
-import java.io.OptionalDataException
-import com.google.common.base.Optional
-import java.net.SocketAddress
-import java.net.InetSocketAddress
-
-class NetconfDeviceManager {
-
- val Map<InstanceIdentifier, NetconfDevice> devices = new ConcurrentHashMap;
-
- var ProviderSession session;
-
- @Property
- var DataProviderService dataService;
-
- @Property
- var MountProvisionService mountService;
-
- val nodeUpdateListener = new NetconfInventoryListener(this);
-
-
- @Property
- var NetconfClientDispatcher dispatcher;
-
- def void start() {
- dataService?.registerDataChangeListener(INVENTORY_PATH, nodeUpdateListener);
- if(dispatcher == null) {
- dispatcher = new NetconfClientDispatcher(Optional.absent);
- }
- }
-
- def netconfNodeAdded(InstanceIdentifier path, CompositeNode node) {
- val address = node.endpointAddress;
- val port = Integer.parseInt(node.endpointPort);
- netconfNodeAdded(path,new InetSocketAddress(address,port))
-
- }
-
- def netconfNodeAdded(InstanceIdentifier path, InetSocketAddress address) {
-
- val mountPointPath = path;
- val mountPoint = mountService.createOrGetMountPoint(mountPointPath);
- val localPath = InstanceIdentifier.builder().toInstance;
- val netconfDevice = new NetconfDevice(mountPoint,localPath);
- netconfDevice.setSocketAddress(address);
- netconfDevice.start(dispatcher);
- }
-
- def netconfNodeRemoved(InstanceIdentifier path) {
-
- }
-
-}
-
-class NetconfInventoryListener implements DataChangeListener {
-
- val NetconfDeviceManager manager;
-
- new(NetconfDeviceManager manager) {
- this.manager = manager;
- }
-
- override onDataChanged(DataChangeEvent<InstanceIdentifier, CompositeNode> change) {
-
- //manager.netconfNodeAdded(path, change);
- }
-}
+++ /dev/null
-package org.opendaylight.controller.sal.connect.netconf;
-
-import java.util.Hashtable;
-
-import org.opendaylight.controller.sal.core.api.AbstractProvider;
-import org.opendaylight.controller.sal.core.api.Broker.ProviderSession;
-import org.opendaylight.controller.sal.core.api.data.DataProviderService;
-import org.opendaylight.controller.sal.core.api.mount.MountProvisionService;
-import org.osgi.framework.BundleContext;
-
-public class NetconfProvider extends AbstractProvider {
-
- private NetconfDeviceManager netconfDeviceManager;
-
- @Override
- protected void startImpl(BundleContext context) {
- netconfDeviceManager = new NetconfDeviceManager();
- context.registerService(NetconfDeviceManager.class, netconfDeviceManager, new Hashtable<String,String>());
- }
-
-
- @Override
- public void onSessionInitiated(ProviderSession session) {
- MountProvisionService mountService = session.getService(MountProvisionService.class);
-
-
- netconfDeviceManager.setMountService(mountService);
- netconfDeviceManager.start();
- }
-
- @Override
- protected void stopImpl(BundleContext context) {
-
- }
-}
--- /dev/null
+module odl-sal-netconf-connector-cfg {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:connector:netconf";
+ prefix "sal-netconf";
+
+ import config { prefix config; revision-date 2013-04-05; }
+ import threadpool {prefix th;}
+ import netty {prefix netty;}
+ import ietf-inet-types {prefix inet;}
+ import opendaylight-md-sal-dom {prefix dom;}
+
+ description
+ "Service definition for Binding Aware MD-SAL.";
+
+ revision "2013-10-28" {
+ description
+ "Initial revision";
+ }
+
+ identity sal-netconf-connector {
+ base config:module-type;
+ config:java-name-prefix NetconfConnector;
+ }
+
+
+ grouping server {
+ leaf address {
+ type string;
+ }
+
+ leaf port {
+ type uint32;
+ }
+ }
+
+
+ augment "/config:modules/config:module/config:configuration" {
+ case sal-netconf-connector {
+ when "/config:modules/config:module/config:type = 'sal-netconf-connector'";
+
+ leaf address {
+ type string;
+ }
+
+ leaf port {
+ type uint32;
+ }
+
+ container dom-registry {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity dom:dom-broker-osgi-registry;
+ }
+ }
+ }
+
+ container boss-thread-group {
+ uses config:service-ref {
+ refine type {
+ config:required-identity netty:netty-threadgroup;
+ }
+ }
+ }
+
+ container worker-thread-group {
+ uses config:service-ref {
+ refine type {
+ config:required-identity netty:netty-threadgroup;
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.rest.api;
+
+public class Draft01 {
+ public static class MediaTypes {
+ public static final String API = "application/vnd.yang.api";
+ public static final String DATASTORE = "application/vnd.yang.datastore";
+ public static final String DATA = "application/vnd.yang.data";
+ public static final String EVENT = "application/vnd.yang.event";
+ public static final String OPERATION = "application/vnd.yang.operation";
+ public static final String PATCH = "application/vnd.yang.patch";
+ }
+}
*/
package org.opendaylight.controller.sal.rest.api;
-import static org.opendaylight.controller.sal.restconf.impl.MediaTypes.API;
-
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.opendaylight.controller.sal.restconf.impl.StructuredData;
@GET
@Path("/modules")
- @Produces({API+JSON,API+XML})
+ @Produces({Draft01.MediaTypes.API+JSON,Draft01.MediaTypes.API+XML,
+ Draft02.MediaTypes.API+JSON,Draft02.MediaTypes.API+XML})
public StructuredData getModules();
@POST
@Path("/operations/{identifier}")
- @Produces({Draft02.MediaTypes.API+JSON,Draft02.MediaTypes.API+XML,API+JSON,API+XML})
+ @Produces({Draft01.MediaTypes.DATA+JSON,Draft01.MediaTypes.DATA+XML,
+ Draft02.MediaTypes.DATA+JSON,Draft02.MediaTypes.DATA+XML,
+ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML, MediaType.TEXT_XML})
public StructuredData invokeRpc(@PathParam("identifier") String identifier, CompositeNode payload);
@GET
@Path("/config/{identifier:.+}")
- @Produces({Draft02.MediaTypes.DATA+JSON,Draft02.MediaTypes.DATA+XML})
+ @Produces({Draft02.MediaTypes.DATA+JSON,Draft02.MediaTypes.DATA+XML,
+ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML, MediaType.TEXT_XML})
public StructuredData readConfigurationData(@PathParam("identifier") String identifier);
- @PUT
+ @POST
@Path("/config/{identifier:.+}")
- @Produces({API+JSON,API+XML})
+ @Produces({Draft02.MediaTypes.DATA+JSON,Draft02.MediaTypes.DATA+XML,
+ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML, MediaType.TEXT_XML})
public Response createConfigurationData(@PathParam("identifier") String identifier, CompositeNode payload);
- @POST
+ @PUT
@Path("/config/{identifier:.+}")
- @Produces({API+JSON,API+XML})
+ @Produces({Draft02.MediaTypes.DATA+JSON,Draft02.MediaTypes.DATA+XML,
+ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML, MediaType.TEXT_XML})
public Response updateConfigurationData(@PathParam("identifier") String identifier, CompositeNode payload);
@GET
@Path("/operational/{identifier:.+}")
- @Produces({Draft02.MediaTypes.DATA+JSON,Draft02.MediaTypes.DATA+XML})
+ @Produces({Draft02.MediaTypes.DATA+JSON,Draft02.MediaTypes.DATA+XML,
+ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML, MediaType.TEXT_XML})
public StructuredData readOperationalData(@PathParam("identifier") String identifier);
- @PUT
+ @POST
@Path("/operational/{identifier:.+}")
- @Produces({API+JSON,API+XML})
+ @Produces({Draft02.MediaTypes.DATA+JSON,Draft02.MediaTypes.DATA+XML,
+ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML, MediaType.TEXT_XML})
public Response createOperationalData(@PathParam("identifier") String identifier, CompositeNode payload);
- @POST
+ @PUT
@Path("/operational/{identifier:.+}")
- @Produces({API+JSON,API+XML})
+ @Produces({Draft02.MediaTypes.DATA+JSON,Draft02.MediaTypes.DATA+XML,
+ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML, MediaType.TEXT_XML})
public Response updateOperationalData(@PathParam("identifier") String identifier, CompositeNode payload);
}
package org.opendaylight.controller.sal.rest.api;
-import static org.opendaylight.controller.sal.restconf.impl.MediaTypes.API;
-
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.opendaylight.controller.sal.restconf.impl.StructuredData;
@Deprecated
@GET
@Path("/datastore")
- @Produces({API+JSON,API+XML})
+ @Produces({Draft01.MediaTypes.DATASTORE+JSON,Draft01.MediaTypes.DATASTORE+XML})
public StructuredData readAllData();
@Deprecated
@GET
@Path("/datastore/{identifier:.+}")
- @Produces({API+JSON,API+XML})
+ @Produces({Draft01.MediaTypes.DATA+JSON,Draft01.MediaTypes.DATA+XML,
+ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML, MediaType.TEXT_XML})
public StructuredData readData(@PathParam("identifier") String identifier);
@Deprecated
- @PUT
+ @POST
@Path("/datastore/{identifier:.+}")
- @Produces({API+JSON,API+XML})
+ @Produces({Draft01.MediaTypes.DATA+JSON,Draft01.MediaTypes.DATA+XML,
+ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML, MediaType.TEXT_XML})
public Response createConfigurationDataLegacy(@PathParam("identifier") String identifier, CompositeNode payload);
@Deprecated
- @POST
+ @PUT
@Path("/datastore/{identifier:.+}")
- @Produces({API+JSON,API+XML})
+ @Produces({Draft01.MediaTypes.DATA+JSON,Draft01.MediaTypes.DATA+XML,
+ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML, MediaType.TEXT_XML})
public Response updateConfigurationDataLegacy(@PathParam("identifier") String identifier, CompositeNode payload);
}
import static com.google.common.base.Preconditions.checkNotNull;
import java.io.IOException;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
+import java.util.*;
import javax.activation.UnsupportedDataTypeException;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.Node;
-import org.opendaylight.yangtools.yang.data.api.SimpleNode;
-import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.type.BooleanTypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.type.DecimalTypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.type.EmptyTypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.type.InstanceIdentifierTypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.type.IntegerTypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.type.UnsignedIntegerTypeDefinition;
+import org.opendaylight.controller.sal.restconf.impl.ControllerContext;
+import org.opendaylight.yangtools.yang.data.api.*;
+import org.opendaylight.yangtools.yang.model.api.*;
+import org.opendaylight.yangtools.yang.model.api.type.*;
import com.google.common.base.Preconditions;
import com.google.gson.stream.JsonWriter;
class JsonMapper {
-
+
private final Set<LeafListSchemaNode> foundLeafLists = new HashSet<>();
private final Set<ListSchemaNode> foundLists = new HashSet<>();
-
+
public void write(JsonWriter writer, CompositeNode data, DataNodeContainer schema) throws IOException {
Preconditions.checkNotNull(writer);
Preconditions.checkNotNull(data);
Preconditions.checkNotNull(schema);
writer.beginObject();
-
+
if (schema instanceof ContainerSchemaNode) {
writeContainer(writer, data, (ContainerSchemaNode) schema);
} else if (schema instanceof ListSchemaNode) {
- writeList(writer, data, (ListSchemaNode) schema);
+ writeList(writer, null, data, (ListSchemaNode) schema);
} else {
throw new UnsupportedDataTypeException(
"Schema can be ContainerSchemaNode or ListSchemaNode. Other types are not supported yet.");
}
-
+
writer.endObject();
-
+
foundLeafLists.clear();
foundLists.clear();
}
- private void writeChildrenOfParent(JsonWriter writer, CompositeNode parent, DataNodeContainer parentSchema) throws IOException {
+ private void writeChildrenOfParent(JsonWriter writer, CompositeNode parent, DataNodeContainer parentSchema)
+ throws IOException {
checkNotNull(parent);
checkNotNull(parentSchema);
-
+
for (Node<?> child : parent.getChildren()) {
DataSchemaNode childSchema = findFirstSchemaForNode(child, parentSchema.getChildNodes());
if (childSchema == null) {
throw new UnsupportedDataTypeException("Probably the data node \"" + child.getNodeType().getLocalName()
+ "\" is not conform to schema");
}
-
+
if (childSchema instanceof ContainerSchemaNode) {
Preconditions.checkState(child instanceof CompositeNode,
"Data representation of Container should be CompositeNode - " + child.getNodeType());
Preconditions.checkState(child instanceof CompositeNode,
"Data representation of List should be CompositeNode - " + child.getNodeType());
foundLists.add((ListSchemaNode) childSchema);
- writeList(writer, (CompositeNode) child, (ListSchemaNode) childSchema);
+ writeList(writer, parent, (CompositeNode) child, (ListSchemaNode) childSchema);
}
} else if (childSchema instanceof LeafListSchemaNode) {
if (!foundLeafLists.contains(childSchema)) {
Preconditions.checkState(child instanceof SimpleNode<?>,
"Data representation of LeafList should be SimpleNode - " + child.getNodeType());
foundLeafLists.add((LeafListSchemaNode) childSchema);
- writeLeafList(writer, (SimpleNode<?>) child, (LeafListSchemaNode) childSchema);
+ writeLeafList(writer, parent, (SimpleNode<?>) child, (LeafListSchemaNode) childSchema);
}
} else if (childSchema instanceof LeafSchemaNode) {
Preconditions.checkState(child instanceof SimpleNode<?>,
+ "LeafListSchemaNode, or LeafSchemaNode. Other types are not supported yet.");
}
}
-
+
for (Node<?> child : parent.getChildren()) {
DataSchemaNode childSchema = findFirstSchemaForNode(child, parentSchema.getChildNodes());
if (childSchema instanceof LeafListSchemaNode) {
}
}
}
-
+
private DataSchemaNode findFirstSchemaForNode(Node<?> node, Set<DataSchemaNode> dataSchemaNode) {
for (DataSchemaNode dsn : dataSchemaNode) {
if (node.getNodeType().getLocalName().equals(dsn.getQName().getLocalName())) {
}
return null;
}
-
+
private void writeContainer(JsonWriter writer, CompositeNode node, ContainerSchemaNode schema) throws IOException {
- writer.name(node.getNodeType().getLocalName());
+ writeName(node, schema, writer);
writer.beginObject();
writeChildrenOfParent(writer, node, schema);
writer.endObject();
}
-
- private void writeList(JsonWriter writer, CompositeNode node, ListSchemaNode schema) throws IOException {
- writer.name(node.getNodeType().getLocalName());
- writer.beginArray();
-
- if (node.getParent() != null) {
- CompositeNode parent = node.getParent();
- List<CompositeNode> nodeLists = parent.getCompositesByName(node.getNodeType());
- for (CompositeNode nodeList : nodeLists) {
- writer.beginObject();
- writeChildrenOfParent(writer, nodeList, schema);
- writer.endObject();
- }
- } else {
+
+ private void writeList(JsonWriter writer, CompositeNode nodeParent, CompositeNode node, ListSchemaNode schema)
+ throws IOException {
+ writeName(node, schema, writer);
+ writer.beginArray();
+
+ if (nodeParent != null) {
+ List<CompositeNode> nodeLists = nodeParent.getCompositesByName(node.getNodeType());
+ for (CompositeNode nodeList : nodeLists) {
writer.beginObject();
- writeChildrenOfParent(writer, node, schema);
+ writeChildrenOfParent(writer, nodeList, schema);
writer.endObject();
}
-
- writer.endArray();
- }
-
- private void writeLeafList(JsonWriter writer, SimpleNode<?> node, LeafListSchemaNode schema) throws IOException {
- writer.name(node.getNodeType().getLocalName());
- writer.beginArray();
-
- CompositeNode parent = node.getParent();
- List<SimpleNode<?>> nodeLeafLists = parent.getSimpleNodesByName(node.getNodeType());
- for (SimpleNode<?> nodeLeafList : nodeLeafLists) {
- writeValueOfNodeByType(writer, nodeLeafList, schema.getType());
- }
-
- writer.endArray();
+ } else {
+ writer.beginObject();
+ writeChildrenOfParent(writer, node, schema);
+ writer.endObject();
+ }
+
+ writer.endArray();
+ }
+
+ private void writeLeafList(JsonWriter writer, CompositeNode nodeParent, SimpleNode<?> node,
+ LeafListSchemaNode schema) throws IOException {
+ writeName(node, schema, writer);
+ writer.beginArray();
+
+ List<SimpleNode<?>> nodeLeafLists = nodeParent.getSimpleNodesByName(node.getNodeType());
+ for (SimpleNode<?> nodeLeafList : nodeLeafLists) {
+ writeValueOfNodeByType(writer, nodeLeafList, schema.getType());
+ }
+
+ writer.endArray();
}
-
+
private void writeLeaf(JsonWriter writer, SimpleNode<?> node, LeafSchemaNode schema) throws IOException {
- writer.name(node.getNodeType().getLocalName());
+ writeName(node, schema, writer);
writeValueOfNodeByType(writer, node, schema.getType());
}
-
- private void writeValueOfNodeByType(JsonWriter writer, SimpleNode<?> node, TypeDefinition<?> type) throws IOException {
- if (!(node.getValue() instanceof String)) {
- throw new IllegalStateException("Value in SimpleNode should be type String");
- }
-
- String value = (String) node.getValue();
- // TODO check Leafref, InstanceIdentifierTypeDefinition, IdentityrefTypeDefinition, UnionTypeDefinition
- if (type.getBaseType() != null) {
- writeValueOfNodeByType(writer, node, type.getBaseType());
- } else if (type instanceof InstanceIdentifierTypeDefinition) {
- writer.value(((InstanceIdentifierTypeDefinition) type).getPathStatement().toString());
- } else if (type instanceof DecimalTypeDefinition
- || type instanceof IntegerTypeDefinition
- || type instanceof UnsignedIntegerTypeDefinition) {
+
+ private void writeValueOfNodeByType(JsonWriter writer, SimpleNode<?> node, TypeDefinition<?> type)
+ throws IOException {
+
+ String value = String.valueOf(node.getValue());
+ // TODO check Leafref, InstanceIdentifierTypeDefinition,
+ // IdentityrefTypeDefinition, UnionTypeDefinition
+ TypeDefinition<?> baseType = resolveBaseTypeFrom(type);
+ if (baseType instanceof InstanceIdentifierTypeDefinition) {
+ writer.value(((InstanceIdentifierTypeDefinition) baseType).getPathStatement().toString());
+ } else if (baseType instanceof UnionTypeDefinition) {
+ processTypeIsUnionType(writer, (UnionTypeDefinition) baseType, value);
+ } else if (baseType instanceof DecimalTypeDefinition || baseType instanceof IntegerTypeDefinition
+ || baseType instanceof UnsignedIntegerTypeDefinition) {
+ writer.value(new NumberForJsonWriter(value));
+ } else if (baseType instanceof BooleanTypeDefinition) {
+ writer.value(Boolean.parseBoolean(value));
+ } else if (baseType instanceof EmptyTypeDefinition) {
+ writeEmptyDataTypeToJson(writer);
+ } else {
+ writer.value(value.equals("null") ? "" : value);
+ }
+ }
+
+ private void processTypeIsUnionType(JsonWriter writer, UnionTypeDefinition unionType, String value)
+ throws IOException {
+ if (value == null) {
+ writeEmptyDataTypeToJson(writer);
+ } else if ((isNumber(value))
+ && containsType(unionType, UnsignedIntegerTypeDefinition.class, IntegerTypeDefinition.class,
+ DecimalTypeDefinition.class)) {
writer.value(new NumberForJsonWriter(value));
- } else if (type instanceof BooleanTypeDefinition) {
+ } else if (isBoolean(value) && containsType(unionType, BooleanTypeDefinition.class)) {
writer.value(Boolean.parseBoolean(value));
- } else if (type instanceof EmptyTypeDefinition) {
- writer.beginArray();
- writer.nullValue();
- writer.endArray();
} else {
- writer.value(value != null ? value : "");
+ writer.value(value);
}
}
-
+
+ private boolean isBoolean(String value) {
+ if (value.equals("true") || value.equals("false")) {
+ return true;
+ }
+ return false;
+ }
+
+ private void writeEmptyDataTypeToJson(JsonWriter writer) throws IOException {
+ writer.beginArray();
+ writer.nullValue();
+ writer.endArray();
+ }
+
+ private boolean isNumber(String value) {
+ try {
+ Double.valueOf(value);
+ } catch (NumberFormatException e) {
+ return false;
+ }
+ return true;
+ }
+
+ private boolean containsType(UnionTypeDefinition unionType, Class<?>... searchedTypes) {
+ List<TypeDefinition<?>> allUnionSubtypes = resolveAllUnionSubtypesFrom(unionType);
+
+ for (TypeDefinition<?> unionSubtype : allUnionSubtypes) {
+ for (Class<?> searchedType : searchedTypes) {
+ if (searchedType.isInstance(unionSubtype)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ private List<TypeDefinition<?>> resolveAllUnionSubtypesFrom(UnionTypeDefinition inputType) {
+ List<TypeDefinition<?>> result = new ArrayList<>();
+ for (TypeDefinition<?> subtype : inputType.getTypes()) {
+ TypeDefinition<?> resolvedSubtype = subtype;
+
+ resolvedSubtype = resolveBaseTypeFrom(subtype);
+
+ if (resolvedSubtype instanceof UnionTypeDefinition) {
+ List<TypeDefinition<?>> subtypesFromRecursion = resolveAllUnionSubtypesFrom((UnionTypeDefinition) resolvedSubtype);
+ result.addAll(subtypesFromRecursion);
+ } else {
+ result.add(resolvedSubtype);
+ }
+ }
+
+ return result;
+ }
+
+ private TypeDefinition<?> resolveBaseTypeFrom(TypeDefinition<?> type) {
+ return type.getBaseType() != null ? resolveBaseTypeFrom(type.getBaseType()) : type;
+ }
+
+ private void writeName(Node<?> node, DataSchemaNode schema, JsonWriter writer) throws IOException {
+ String nameForOutput = node.getNodeType().getLocalName();
+ if (schema.isAugmenting()) {
+ ControllerContext contContext = ControllerContext.getInstance();
+ CharSequence moduleName;
+ moduleName = contContext.toRestconfIdentifier(schema.getQName());
+ if (moduleName != null) {
+ nameForOutput = moduleName.toString();
+ }
+ }
+ writer.name(nameForOutput);
+ }
+
private static final class NumberForJsonWriter extends Number {
-
+
private static final long serialVersionUID = -3147729419814417666L;
private final String value;
-
+
public NumberForJsonWriter(String value) {
this.value = value;
}
public String toString() {
return value;
}
-
+
}
}
package org.opendaylight.controller.sal.rest.impl;
-import static org.opendaylight.controller.sal.restconf.impl.MediaTypes.API;
-
import java.io.IOException;
import java.io.InputStream;
import java.lang.annotation.Annotation;
import javax.ws.rs.ext.MessageBodyReader;
import javax.ws.rs.ext.Provider;
+import org.opendaylight.controller.sal.rest.api.Draft01;
+import org.opendaylight.controller.sal.rest.api.Draft02;
import org.opendaylight.controller.sal.rest.api.RestconfService;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
@Provider
-@Consumes({API+RestconfService.JSON})
+@Consumes({ Draft01.MediaTypes.DATA + RestconfService.JSON, Draft02.MediaTypes.DATA + RestconfService.JSON,
+ MediaType.APPLICATION_JSON })
public enum JsonToCompositeNodeProvider implements MessageBodyReader<CompositeNode> {
INSTANCE;
-
+
@Override
public boolean isReadable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
return true;
try {
return jsonReader.read(entityStream);
} catch (UnsupportedFormatException e) {
- throw new WebApplicationException(e,Response.status(Response.Status.BAD_REQUEST)
- .entity(e.getMessage()).build());
+ throw new WebApplicationException(e, Response.status(Response.Status.BAD_REQUEST).entity(e.getMessage())
+ .build());
}
}
package org.opendaylight.controller.sal.rest.impl;
-import static org.opendaylight.controller.sal.restconf.impl.MediaTypes.API;
-
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import javax.ws.rs.ext.MessageBodyWriter;
import javax.ws.rs.ext.Provider;
+import org.opendaylight.controller.sal.rest.api.Draft01;
+import org.opendaylight.controller.sal.rest.api.Draft02;
import org.opendaylight.controller.sal.rest.api.RestconfService;
import org.opendaylight.controller.sal.restconf.impl.StructuredData;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import com.google.gson.stream.JsonWriter;
@Provider
-@Produces({API+RestconfService.JSON})
+@Produces({ Draft01.MediaTypes.DATA + RestconfService.JSON, Draft02.MediaTypes.DATA + RestconfService.JSON,
+ MediaType.APPLICATION_JSON })
public enum StructuredDataToJsonProvider implements MessageBodyWriter<StructuredData> {
INSTANCE;
-
+
@Override
public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
return true;
package org.opendaylight.controller.sal.rest.impl;
-import static org.opendaylight.controller.sal.restconf.impl.MediaTypes.API;
-
import java.io.IOException;
import java.io.OutputStream;
import java.lang.annotation.Annotation;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
+import org.opendaylight.controller.sal.rest.api.Draft01;
+import org.opendaylight.controller.sal.rest.api.Draft02;
import org.opendaylight.controller.sal.rest.api.RestconfService;
+import org.opendaylight.controller.sal.restconf.impl.ResponseException;
import org.opendaylight.controller.sal.restconf.impl.StructuredData;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.impl.NodeUtils;
import org.w3c.dom.Document;
@Provider
-@Produces({API+RestconfService.XML})
+@Produces({ Draft01.MediaTypes.DATA + RestconfService.XML, Draft02.MediaTypes.DATA + RestconfService.XML,
+ MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public enum StructuredDataToXmlProvider implements MessageBodyWriter<StructuredData> {
INSTANCE;
-
+
private final static Logger logger = LoggerFactory.getLogger(StructuredDataToXmlProvider.class);
@Override
throws IOException, WebApplicationException {
CompositeNode data = t.getData();
if (data == null) {
- throw new WebApplicationException(Response.status(Response.Status.NOT_FOUND).build());
+ throw new ResponseException(Response.Status.NOT_FOUND, "No data exists.");
}
-
+
Document domTree = NodeUtils.buildShadowDomTree(data);
try {
TransformerFactory tf = TransformerFactory.newInstance();
transformer.transform(new DOMSource(domTree), new StreamResult(entityStream));
} catch (TransformerException e) {
logger.error("Error during translation of Document to OutputStream", e);
- throw new WebApplicationException(Response.status(Response.Status.INTERNAL_SERVER_ERROR).build());
+ throw new ResponseException(Response.Status.INTERNAL_SERVER_ERROR, e.getMessage());
}
}
import org.opendaylight.controller.sal.restconf.impl.SimpleNodeWrapper;
public class XmlReader {
-
+
private final static XMLInputFactory xmlInputFactory = XMLInputFactory.newInstance();
private XMLEventReader eventReader;
public CompositeNodeWrapper read(InputStream entityStream) throws XMLStreamException, UnsupportedFormatException {
eventReader = xmlInputFactory.createXMLEventReader(entityStream);
-
+
if (eventReader.hasNext()) {
XMLEvent element = eventReader.peek();
if (element.isStartDocument()) {
if (eventReader.hasNext() && !isCompositeNodeEvent(eventReader.peek())) {
throw new UnsupportedFormatException("Root element of XML has to be composite element.");
}
-
+
final Stack<NodeWrapper<?>> processingQueue = new Stack<>();
CompositeNodeWrapper root = null;
NodeWrapper<?> element = null;
element = processingQueue.pop();
}
}
-
+
if (!root.getLocalName().equals(element.getLocalName())) {
throw new UnsupportedFormatException("XML should contain only one root element");
}
-
+
return root;
}
-
+
private boolean isSimpleNodeEvent(final XMLEvent event) throws XMLStreamException {
checkArgument(event != null, "XML Event cannot be NULL!");
if (event.isStartElement()) {
}
return false;
}
-
+
private boolean isCompositeNodeEvent(final XMLEvent event) throws XMLStreamException {
checkArgument(event != null, "XML Event cannot be NULL!");
if (event.isStartElement()) {
}
return false;
}
-
- private SimpleNodeWrapper resolveSimpleNodeFromStartElement(final StartElement startElement) throws XMLStreamException {
+
+ private SimpleNodeWrapper resolveSimpleNodeFromStartElement(final StartElement startElement)
+ throws XMLStreamException {
checkArgument(startElement != null, "Start Element cannot be NULL!");
String data = null;
data = innerEvent.asCharacters().getData();
}
} else if (innerEvent.isEndElement()) {
- data = "";
+ if (startElement.getLocation().getCharacterOffset() == innerEvent.getLocation().getCharacterOffset()) {
+ data = null;
+ } else {
+ data = "";
+ }
}
}
-
+
return new SimpleNodeWrapper(getNamespaceFrom(startElement), getLocalNameFrom(startElement), data);
}
-
+
private CompositeNodeWrapper resolveCompositeNodeFromStartElement(final StartElement startElement) {
checkArgument(startElement != null, "Start Element cannot be NULL!");
return new CompositeNodeWrapper(getNamespaceFrom(startElement), getLocalNameFrom(startElement));
}
-
+
private String getLocalNameFrom(StartElement startElement) {
return startElement.getName().getLocalPart();
}
-
+
private URI getNamespaceFrom(StartElement startElement) {
String namespaceURI = startElement.getName().getNamespaceURI();
return namespaceURI.isEmpty() ? null : URI.create(namespaceURI);
}
-
+
}
package org.opendaylight.controller.sal.rest.impl;
-import static org.opendaylight.controller.sal.restconf.impl.MediaTypes.API;
-
import java.io.IOException;
import java.io.InputStream;
import java.lang.annotation.Annotation;
import javax.ws.rs.ext.Provider;
import javax.xml.stream.XMLStreamException;
+import org.opendaylight.controller.sal.rest.api.Draft01;
+import org.opendaylight.controller.sal.rest.api.Draft02;
import org.opendaylight.controller.sal.rest.api.RestconfService;
+import org.opendaylight.controller.sal.restconf.impl.ResponseException;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
@Provider
-@Consumes({ API + RestconfService.XML })
+@Consumes({ Draft01.MediaTypes.DATA + RestconfService.XML, Draft02.MediaTypes.DATA + RestconfService.XML,
+ MediaType.APPLICATION_XML, MediaType.TEXT_XML })
public enum XmlToCompositeNodeProvider implements MessageBodyReader<CompositeNode> {
INSTANCE;
try {
return xmlReader.read(entityStream);
} catch (XMLStreamException | UnsupportedFormatException e) {
- throw new WebApplicationException(Response.status(Response.Status.BAD_REQUEST).entity(e.getMessage())
- .build());
+ throw new ResponseException(Response.Status.BAD_REQUEST, e.getMessage());
}
}
private DataBrokerService dataService;
private new() {
- if (INSTANCE != null) {
+ if (INSTANCE !== null) {
throw new IllegalStateException("Already instantiated");
}
}
}
private def void checkPreconditions() {
- if (context == null || dataService == null) {
- throw new WebApplicationException(Response.status(Response.Status.SERVICE_UNAVAILABLE)
- .entity(RestconfProvider::NOT_INITALIZED_MSG).build())
+ if (context === null || dataService === null) {
+ throw new ResponseException(Response.Status.SERVICE_UNAVAILABLE, RestconfProvider::NOT_INITALIZED_MSG)
}
}
import java.util.List
import java.util.Map
import java.util.concurrent.ConcurrentHashMap
-import javax.ws.rs.WebApplicationException
import javax.ws.rs.core.Response
import org.opendaylight.controller.sal.core.api.model.SchemaServiceListener
import org.opendaylight.controller.sal.rest.impl.RestconfProvider
private new() {
- if (INSTANCE != null) {
+ if (INSTANCE !== null) {
throw new IllegalStateException("Already instantiated");
}
}
}
private def void checkPreconditions() {
- if (schemas == null) {
- throw new WebApplicationException(Response.status(Response.Status.SERVICE_UNAVAILABLE)
- .entity(RestconfProvider::NOT_INITALIZED_MSG).build())
+ if (schemas === null) {
+ throw new ResponseException(Response.Status.SERVICE_UNAVAILABLE, RestconfProvider::NOT_INITALIZED_MSG)
}
}
pathArgs.remove(0)
}
val schemaNode = ret.collectPathArguments(pathArgs, restconfInstance.findModule);
- if (schemaNode == null) {
+ if (schemaNode === null) {
return null
}
new InstanceIdWithSchemaNode(ret.toInstance, schemaNode)
private def getLatestModule(SchemaContext schema, String moduleName) {
checkNotNull(schema)
- checkArgument(moduleName != null && !moduleName.empty)
+ checkArgument(moduleName !== null && !moduleName.empty)
val modules = schema.modules.filter[m|m.name == moduleName]
var latestModule = modules.head
for (module : modules) {
def CharSequence toRestconfIdentifier(QName qname) {
checkPreconditions
var module = uriToModuleName.get(qname.namespace)
- if (module == null) {
+ if (module === null) {
val moduleSchema = schemas.findModuleByNamespaceAndRevision(qname.namespace, qname.revision);
- if(moduleSchema == null) throw new IllegalArgumentException()
+ if(moduleSchema === null) throw new IllegalArgumentException()
uriToModuleName.put(qname.namespace, moduleSchema.name)
module = moduleSchema.name;
}
}
private def toUriString(Object object) {
- if(object == null) return "";
+ if(object === null) return "";
return URLEncoder.encode(object.toString)
}
private def DataSchemaNode collectPathArguments(InstanceIdentifierBuilder builder, List<String> strings,
DataNodeContainer parentNode) {
checkNotNull(strings)
+ if (parentNode === null) {
+ return null;
+ }
if (strings.empty) {
return parentNode as DataSchemaNode;
}
val nodeName = nodeRef.toNodeName();
val targetNode = parentNode.getDataChildByName(nodeName);
- if (targetNode == null) {
+ if (targetNode === null) {
val children = parentNode.childNodes
for (child : children) {
if (child instanceof ChoiceNode) {
val choice = child as ChoiceNode
for (caze : choice.cases) {
val result = builder.collectPathArguments(strings, caze as DataNodeContainer);
- if (result != null)
+ if (result !== null)
return result
}
}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.restconf.impl;
-
-public class MediaTypes {
- public static final String API = "application/vnd.yang.api";
- public static final String DATASTORE = "application/vnd.yang.datastore";
- public static final String DATA = "application/vnd.yang.data";
- public static final String EVENT = "application/vnd.yang.event";
- public static final String OPERATION = "application/vnd.yang.operation";
- public static final String PATCH = "application/vnd.yang.patch";
-}
--- /dev/null
+package org.opendaylight.controller.sal.restconf.impl;
+
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+public class ResponseException extends WebApplicationException {
+
+ private static final long serialVersionUID = -5320114450593021655L;
+
+ public ResponseException(Status status, String msg) {
+ super(Response.status(status).type(MediaType.TEXT_PLAIN_TYPE).entity(msg).build());
+ }
+}
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode
import org.opendaylight.controller.md.sal.common.api.TransactionStatus
+import javax.ws.rs.WebApplicationException
class RestconfImpl implements RestconfService {
extension ControllerContext controllerContext
private new() {
- if (INSTANCE != null) {
+ if (INSTANCE !== null) {
throw new IllegalStateException("Already instantiated");
}
}
}
override readData(String identifier) {
- val instanceIdentifierWithSchemaNode = identifier.toInstanceIdentifier
+ val instanceIdentifierWithSchemaNode = identifier.resolveInstanceIdentifier
val data = broker.readOperationalData(instanceIdentifierWithSchemaNode.getInstanceIdentifier);
return new StructuredData(data, instanceIdentifierWithSchemaNode.schemaNode)
}
override createConfigurationData(String identifier, CompositeNode payload) {
- val identifierWithSchemaNode = identifier.toInstanceIdentifier
+ val identifierWithSchemaNode = identifier.resolveInstanceIdentifier
val value = resolveNodeNamespaceBySchema(payload, identifierWithSchemaNode.schemaNode)
val status = broker.commitConfigurationDataPut(identifierWithSchemaNode.instanceIdentifier,value).get();
switch status.result {
}
override updateConfigurationData(String identifier, CompositeNode payload) {
- val identifierWithSchemaNode = identifier.toInstanceIdentifier
+ val identifierWithSchemaNode = identifier.resolveInstanceIdentifier
val value = resolveNodeNamespaceBySchema(payload, identifierWithSchemaNode.schemaNode)
val status = broker.commitConfigurationDataPut(identifierWithSchemaNode.instanceIdentifier,value).get();
switch status.result {
}
override readConfigurationData(String identifier) {
- val instanceIdentifierWithSchemaNode = identifier.toInstanceIdentifier
+ val instanceIdentifierWithSchemaNode = identifier.resolveInstanceIdentifier
val data = broker.readOperationalData(instanceIdentifierWithSchemaNode.getInstanceIdentifier);
return new StructuredData(data, instanceIdentifierWithSchemaNode.schemaNode)
}
override readOperationalData(String identifier) {
- val instanceIdentifierWithSchemaNode = identifier.toInstanceIdentifier
+ val instanceIdentifierWithSchemaNode = identifier.resolveInstanceIdentifier
val data = broker.readOperationalData(instanceIdentifierWithSchemaNode.getInstanceIdentifier);
return new StructuredData(data, instanceIdentifierWithSchemaNode.schemaNode)
}
}
override createOperationalData(String identifier, CompositeNode payload) {
- val identifierWithSchemaNode = identifier.toInstanceIdentifier
+ val identifierWithSchemaNode = identifier.resolveInstanceIdentifier
val value = resolveNodeNamespaceBySchema(payload, identifierWithSchemaNode.schemaNode)
val status = broker.commitOperationalDataPut(identifierWithSchemaNode.instanceIdentifier,value).get();
switch status.result {
}
override updateOperationalData(String identifier, CompositeNode payload) {
- val identifierWithSchemaNode = identifier.toInstanceIdentifier
+ val identifierWithSchemaNode = identifier.resolveInstanceIdentifier
val value = resolveNodeNamespaceBySchema(payload, identifierWithSchemaNode.schemaNode)
val status = broker.commitOperationalDataPut(identifierWithSchemaNode.instanceIdentifier,value).get();
switch status.result {
}
}
+ private def InstanceIdWithSchemaNode resolveInstanceIdentifier(String identifier) {
+ val identifierWithSchemaNode = identifier.toInstanceIdentifier
+ if (identifierWithSchemaNode === null) {
+ throw new ResponseException(Response.Status.BAD_REQUEST, "URI has bad format");
+ }
+ return identifierWithSchemaNode
+ }
+
private def CompositeNode resolveNodeNamespaceBySchema(CompositeNode node, DataSchemaNode schema) {
if (node instanceof CompositeNodeWrapper) {
addNamespaceToNodeFromSchemaRecursively(node as CompositeNodeWrapper, schema)
}
private def void addNamespaceToNodeFromSchemaRecursively(NodeWrapper<?> nodeBuilder, DataSchemaNode schema) {
- if (nodeBuilder.namespace == null) {
+ if (nodeBuilder.namespace === null) {
nodeBuilder.namespace = schema.QName.namespace
}
if (nodeBuilder instanceof CompositeNodeWrapper) {
package org.opendaylight.controller.sal.restconf.impl;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.*;
public class StructuredData {
-
+
private final CompositeNode data;
private final DataSchemaNode schema;
-
+
public StructuredData(CompositeNode data, DataSchemaNode schema) {
this.data = data;
this.schema = schema;
public DataSchemaNode getSchema() {
return schema;
}
-
}
}
@Test
- public void nullArrayToCompositeNodeWithNullValueTest() {
+ public void nullArrayToSimpleNodeWithNullValueTest() {
CompositeNode compositeNode = compositeContainerFromJson("/json-to-composite-node/array-with-null.json", true);
assertNotNull(compositeNode);
assertEquals("cont", compositeNode.getNodeType().getLocalName());
import org.junit.*;
import org.opendaylight.controller.sal.rest.impl.XmlToCompositeNodeProvider;
-import org.opendaylight.controller.sal.restconf.impl.CompositeNodeWrapper;
+import org.opendaylight.controller.sal.restconf.impl.*;
import org.opendaylight.yangtools.yang.data.api.*;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.slf4j.*;
String nameSpace = "data:container:yang";
assertEquals(nameSpace, compNode.getNodeType().getNamespace().toString());
+ verifyNullAndEmptyStringSingleNode(compNode, nameSpace);
verifyCommonPartAOfXml(compNode, "", nameSpace);
}
+ private void verifyNullAndEmptyStringSingleNode(CompositeNode compNode, String nameSpace) {
+ assertEquals("cont", compNode.getNodeType().getLocalName());
+
+ SimpleNode<?> lf2 = null;
+ SimpleNode<?> lf3 = null;
+ int found = 0;
+ for (Node<?> child : compNode.getChildren()) {
+ if (found == 0x3)
+ break;
+ if (child instanceof SimpleNode<?>) {
+ SimpleNode<?> childSimple = (SimpleNode<?>) child;
+ if (childSimple.getNodeType().getLocalName().equals("lf3")) {
+ lf3 = childSimple;
+ found = found | (1 << 0);
+ } else if (childSimple.getNodeType().getLocalName().equals("lf2")) {
+ lf2 = childSimple;
+ found = found | (1 << 1);
+ }
+ }
+ assertEquals(nameSpace, child.getNodeType().getNamespace().toString());
+ }
+
+ assertEquals("", lf2.getValue());
+ assertEquals(null, lf3.getValue());
+ }
+
@Test
public void testXmlDataList() {
CompositeNode compNode = compositeContainerFromXml("/xml-to-composite-node/data-list.xml", false);
import java.util.concurrent.Future;
import javax.ws.rs.WebApplicationException;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.parsers.*;
import javax.xml.stream.XMLStreamException;
import javax.xml.transform.*;
import javax.xml.transform.dom.DOMSource;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.rest.impl.StructuredDataToJsonProvider;
import org.opendaylight.controller.sal.restconf.impl.*;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.*;
import org.opendaylight.yangtools.yang.data.api.*;
import org.opendaylight.yangtools.yang.data.impl.XmlTreeBuilder;
import org.opendaylight.yangtools.yang.model.api.*;
}
return (CompositeNode) dataTree;
}
-
+
public static Document loadDocumentFrom(InputStream inputStream) {
try {
DocumentBuilderFactory dbfac = DocumentBuilderFactory.newInstance();
}
static String convertCompositeNodeDataAndYangToJson(CompositeNode compositeNode, String yangPath, String outputPath) {
+ return convertCompositeNodeDataAndYangToJson(compositeNode, yangPath, outputPath, null, null);
+ }
+
+ static String convertCompositeNodeDataAndYangToJson(CompositeNode compositeNode, String yangPath,
+ String outputPath, String searchedModuleName, String searchedDataSchemaName) {
String jsonResult = null;
Set<Module> modules = null;
}
assertNotNull("modules can't be null.", modules);
+ Module module = null;
+ if (searchedModuleName != null) {
+ for (Module m : modules) {
+ if (m.getName().equals(searchedModuleName)) {
+ module = m;
+ break;
+ }
+ }
+ } else if (modules.size() == 1) {
+ module = modules.iterator().next();
+ }
+ assertNotNull("Module is missing", module);
+
assertNotNull("Composite node can't be null", compositeNode);
StructuredDataToJsonProvider structuredDataToJsonProvider = StructuredDataToJsonProvider.INSTANCE;
- for (Module module : modules) {
- ByteArrayOutputStream byteArrayOS = new ByteArrayOutputStream();
- for (DataSchemaNode dataSchemaNode : module.getChildNodes()) {
- StructuredData structuredData = new StructuredData(compositeNode, dataSchemaNode);
- try {
- structuredDataToJsonProvider.writeTo(structuredData, null, null, null, null, null, byteArrayOS);
- } catch (WebApplicationException | IOException e) {
- e.printStackTrace();
+ ByteArrayOutputStream byteArrayOS = new ByteArrayOutputStream();
+ DataSchemaNode dataSchemaNode = null;
+ if (searchedDataSchemaName != null) {
+ for (DataSchemaNode dsn : module.getChildNodes()) {
+ if (dsn.getQName().getLocalName().equals(searchedDataSchemaName)) {
+ dataSchemaNode = dsn;
}
- assertFalse(
- "Returning JSON string can't be empty for node " + dataSchemaNode.getQName().getLocalName(),
- byteArrayOS.toString().isEmpty());
- }
- jsonResult = byteArrayOS.toString();
- try {
- outputToFile(byteArrayOS, outputPath);
- } catch (IOException e) {
- System.out.println("Output file wasn't cloased sucessfuly.");
}
+ } else if (module.getChildNodes().size() == 1) {
+ dataSchemaNode = module.getChildNodes().iterator().next();
+ }
+ assertNotNull(dataSchemaNode);
+ // SchemaContextUtil.
+
+ ControllerContext controllerContext = ControllerContext.getInstance();
+ controllerContext.setSchemas(loadSchemaContext(modules));
+ StructuredData structuredData = new StructuredData(compositeNode, dataSchemaNode);
+ try {
+ structuredDataToJsonProvider.writeTo(structuredData, null, null, null, null, null, byteArrayOS);
+ } catch (WebApplicationException | IOException e) {
+ e.printStackTrace();
+ }
+ assertFalse("Returning JSON string can't be empty for node " + dataSchemaNode.getQName().getLocalName(),
+ byteArrayOS.toString().isEmpty());
+ jsonResult = byteArrayOS.toString();
+ try {
+ outputToFile(byteArrayOS, outputPath);
+ } catch (IOException e) {
+ System.out.println("Output file wasn't cloased sucessfuly.");
}
+
return jsonResult;
}
RpcResult<TransactionStatus> rpcResult = DummyRpcResult.builder().result(TransactionStatus.COMMITED).build();
Future<RpcResult<TransactionStatus>> future = DummyFuture.builder().rpcResult(rpcResult).build();
when(controllerContext.toInstanceIdentifier(any(String.class))).thenReturn(instIdAndSchema);
- when(broker.commitConfigurationDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(future);
+ when(broker.commitConfigurationDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(
+ future);
restconf.setControllerContext(controllerContext);
restconf.setBroker(broker);
if (modules.size() < 1) {
return null;
}
-
- Module moduleRes = null;
+
+ Module moduleRes = null;
if (modules.size() > 1) {
if (moduleName == null) {
return null;
} else {
- for (Module module: modules) {
+ for (Module module : modules) {
if (module.getName().equals(moduleName)) {
- moduleRes = module;
+ moduleRes = module;
}
}
if (moduleRes == null) {
} else {
moduleRes = modules.iterator().next();
}
-
+
if (moduleRes.getChildNodes() == null) {
return null;
}
@Test
public void simpleYangDataTest() {
String jsonOutput;
- // jsonOutput =
- // TestUtils.readJsonFromFile("/yang-to-json-conversion/simple-yang-types/xml/awaited_output.json",
- // false);
-
jsonOutput = TestUtils.convertCompositeNodeDataAndYangToJson(
TestUtils.loadCompositeNode("/yang-to-json-conversion/simple-data-types/xml/data.xml"),
"/yang-to-json-conversion/simple-data-types", "/yang-to-json-conversion/simple-data-types/xml");
// boolean lfref1Checked = false;
boolean lfemptyChecked = false;
boolean lfstr1Checked = false;
-
+
while (jReader.hasNext()) {
String keyName = jReader.nextName();
JsonToken peek = null;
jReader.nextNull();
jReader.endArray();
lfemptyChecked = true;
- // TODO: test will be implemented when functionality will be
- // implemented
- } else if (keyName.equals("lflstunion")) {
- jReader.skipValue();
+ } else if (keyName.startsWith("lfunion")) {
+ checkLfUnion(jReader, keyName, peek);
} else {
assertTrue("Key " + keyName + " doesn't exists in yang file.", false);
}
jReader.endObject();
}
+
+ private void checkLfUnion(JsonReader jReader, String keyName, JsonToken peek) throws IOException {
+ if (keyName.equals("lfunion1")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.NUMBER, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion2")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.NUMBER, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion3")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextInt();
+ } else if (keyName.equals("lfunion4")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.BOOLEAN, peek);
+ jReader.nextBoolean();
+ } else if (keyName.equals("lfunion5")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion6")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion7")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion8")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion9")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion10")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion11")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.NUMBER, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion12")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.BOOLEAN, peek);
+ jReader.nextBoolean();
+ }
+ }
}
--- /dev/null
+package org.opendaylight.controller.sal.restconf.impl.test;
+
+import static org.junit.Assert.assertTrue;
+
+import org.junit.Test;
+
+public class ToJsonWithAugmentTest {
+
+ /**
+ * Test of json output when as input are specified composite node with empty
+ * data + YANG file
+ */
+ @Test
+ public void augmentedElementsToJson() {
+ String jsonOutput = TestUtils.convertCompositeNodeDataAndYangToJson(
+ TestUtils.loadCompositeNode("/yang-to-json-conversion/augmentation/xml/data.xml"),
+ "/yang-to-json-conversion/augmentation", "/yang-to-json-conversion/augmentation/xml", "yang", "cont");
+
+ assertTrue(jsonOutput.contains("\"augment-leaf:lf2\": \"lf2\""));
+ assertTrue(jsonOutput.contains("\"augment-container:cont1\": {"));
+ assertTrue(jsonOutput.contains("\"augment-container:lf11\": \"lf11\""));
+ assertTrue(jsonOutput.contains("\"augment-list:lst1\": ["));
+ assertTrue(jsonOutput.contains("\"augment-list:lf11\": \"lf1_1\""));
+ assertTrue(jsonOutput.contains("\"augment-list:lf11\": \"lf1_2\""));
+ assertTrue(jsonOutput.contains("\"augment-leaflist:lflst1\": ["));
+ }
+}
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.sal.rest.api.Draft01;
import org.opendaylight.controller.sal.rest.api.RestconfService;
import org.opendaylight.controller.sal.rest.impl.StructuredDataToXmlProvider;
import org.opendaylight.controller.sal.rest.impl.XmlToCompositeNodeProvider;
import org.opendaylight.controller.sal.restconf.impl.BrokerFacade;
import org.opendaylight.controller.sal.restconf.impl.ControllerContext;
-import org.opendaylight.controller.sal.restconf.impl.MediaTypes;
import org.opendaylight.controller.sal.restconf.impl.RestconfImpl;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
private static ControllerContext controllerContext;
private static BrokerFacade brokerFacade;
private static RestconfImpl restconfImpl;
- private static final MediaType MEDIA_TYPE = new MediaType("application", "vnd.yang.api+xml");
+ private static final MediaType MEDIA_TYPE = new MediaType("application", "vnd.yang.data+xml");
+ private static final MediaType MEDIA_TYPE_DRAFT02 = new MediaType("application", "yang.data+xml");
@BeforeClass
public static void init() throws FileNotFoundException {
public void testBadFormatXmlToCompositeNodeProvider() throws UnsupportedEncodingException, URISyntaxException {
String uri = createUri("/operations/", "ietf-interfaces:interfaces/interface/eth0");
- Response response = target(uri).request(MediaTypes.API + RestconfService.XML).post(
+ Response response = target(uri).request(Draft01.MediaTypes.DATA + RestconfService.XML).post(
Entity.entity("<SimpleNode/>", MEDIA_TYPE));
assertEquals(400, response.getStatus());
- response = target(uri).request(MediaTypes.API + RestconfService.XML).post(
+ response = target(uri).request(Draft01.MediaTypes.DATA + RestconfService.XML).post(
Entity.entity("<SimpleNode>", MEDIA_TYPE));
assertEquals(400, response.getStatus());
}
when(brokerFacade.readOperationalData(any(InstanceIdentifier.class))).thenReturn(null);
- Response response = target(uri).request(MediaTypes.API+RestconfService.XML).get();
+ Response response = target(uri).request(Draft01.MediaTypes.DATA+RestconfService.XML).get();
assertEquals(404, response.getStatus());
}
+ @Test
+ public void testXmlToCompositeNode400() throws UnsupportedEncodingException, URISyntaxException {
+ String uri = createUri("/datastore/", "simple-nodes:user/name");
+
+ when(brokerFacade.readOperationalData(any(InstanceIdentifier.class))).thenReturn(null);
+
+ Response response = target(uri).request(Draft01.MediaTypes.DATA+RestconfService.XML).get();
+ assertEquals(400, response.getStatus());
+ }
+
@Test
public void testRpcResultCommitedToStatusCodes() throws UnsupportedEncodingException {
InputStream xmlStream = RestconfImplTest.class.getResourceAsStream("/parts/ietf-interfaces_interfaces.xml");
String xml = TestUtils.getDocumentInPrintableForm(TestUtils.loadDocumentFrom(xmlStream));
- Entity<String> entity = Entity.entity(xml, MEDIA_TYPE);
+ Entity<String> entity = Entity.entity(xml, MEDIA_TYPE_DRAFT02);
RpcResult<TransactionStatus> rpcResult = DummyRpcResult.builder().result(TransactionStatus.COMMITED).build();
Future<RpcResult<TransactionStatus>> dummyFuture = DummyFuture.builder().rpcResult(rpcResult).build();
when(brokerFacade.commitOperationalDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(dummyFuture);
when(brokerFacade.commitConfigurationDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(dummyFuture);
String uri = createUri("/config/", "ietf-interfaces:interfaces/interface/eth0");
- Response response = target(uri).request(MEDIA_TYPE).put(entity);
- assertEquals(200, response.getStatus());
- response = target(uri).request(MEDIA_TYPE).post(entity);
+ Response response = target(uri).request(MEDIA_TYPE_DRAFT02).put(entity);
assertEquals(204, response.getStatus());
+ response = target(uri).request(MEDIA_TYPE_DRAFT02).post(entity);
+ assertEquals(200, response.getStatus());
uri = createUri("/operational/", "ietf-interfaces:interfaces/interface/eth0");
- response = target(uri).request(MEDIA_TYPE).put(entity);
- assertEquals(200, response.getStatus());
- response = target(uri).request(MEDIA_TYPE).post(entity);
+ response = target(uri).request(MEDIA_TYPE_DRAFT02).put(entity);
assertEquals(204, response.getStatus());
+ response = target(uri).request(MEDIA_TYPE_DRAFT02).post(entity);
+ assertEquals(200, response.getStatus());
uri = createUri("/datastore/", "ietf-interfaces:interfaces/interface/eth0");
response = target(uri).request(MEDIA_TYPE).put(entity);
- assertEquals(200, response.getStatus());
- response = target(uri).request(MEDIA_TYPE).post(entity);
assertEquals(204, response.getStatus());
+ response = target(uri).request(MEDIA_TYPE).post(entity);
+ assertEquals(200, response.getStatus());
}
@Test
public void testRpcResultOtherToStatusCodes() throws UnsupportedEncodingException {
InputStream xmlStream = RestconfImplTest.class.getResourceAsStream("/parts/ietf-interfaces_interfaces.xml");
String xml = TestUtils.getDocumentInPrintableForm(TestUtils.loadDocumentFrom(xmlStream));
- Entity<String> entity = Entity.entity(xml, MEDIA_TYPE);
+ Entity<String> entity = Entity.entity(xml, MEDIA_TYPE_DRAFT02);
RpcResult<TransactionStatus> rpcResult = DummyRpcResult.builder().result(TransactionStatus.FAILED).build();
Future<RpcResult<TransactionStatus>> dummyFuture = DummyFuture.builder().rpcResult(rpcResult).build();
when(brokerFacade.commitOperationalDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(dummyFuture);
when(brokerFacade.commitConfigurationDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(dummyFuture);
String uri = createUri("/config/", "ietf-interfaces:interfaces/interface/eth0");
- Response response = target(uri).request(MEDIA_TYPE).put(entity);
+ Response response = target(uri).request(MEDIA_TYPE_DRAFT02).put(entity);
assertEquals(500, response.getStatus());
- response = target(uri).request(MEDIA_TYPE).post(entity);
+ response = target(uri).request(MEDIA_TYPE_DRAFT02).post(entity);
assertEquals(500, response.getStatus());
uri = createUri("/operational/", "ietf-interfaces:interfaces/interface/eth0");
- response = target(uri).request(MEDIA_TYPE).put(entity);
+ response = target(uri).request(MEDIA_TYPE_DRAFT02).put(entity);
assertEquals(500, response.getStatus());
- response = target(uri).request(MEDIA_TYPE).post(entity);
+ response = target(uri).request(MEDIA_TYPE_DRAFT02).post(entity);
assertEquals(500, response.getStatus());
uri = createUri("/datastore/", "ietf-interfaces:interfaces/interface/eth0");
namespace "data:container:yang";
prefix "dtconyg";
- revision 2013-11-19 {
+ revision 2013-11-19 {
}
container cont {
leaf lf1 {
type string;
}
+
+ leaf lf2 {
+ type string;
+ }
+
+ leaf lf3 {
+ type empty;
+ }
+
leaf-list lflst1 {
type string;
}
<cont>
<lf1>str0</lf1>
+ <lf2></lf2>
+ <lf3/>
<lflst1>121</lflst1>
<lflst1>131</lflst1>
<lflst1>str1</lflst1>
--- /dev/null
+module augment-container {
+ namespace "ns:augment:container";
+ prefix "augcont";
+
+
+ import yang {prefix yng; revision-date 2013-11-26;}
+
+
+ revision "2013-11-26" {
+ }
+
+ augment "/yng:cont" {
+ container cont1 {
+ leaf lf11 {
+ type string;
+ }
+ }
+ }
+
+
+
+}
\ No newline at end of file
--- /dev/null
+module augment-leaf {
+ namespace "ns:augment:leaf";
+ prefix "auglf";
+
+
+ import yang {prefix yng; revision-date 2013-11-26;}
+
+
+ revision "2013-11-26" {
+ }
+
+ augment "/yng:cont" {
+ leaf lf2 {
+ type string;
+ }
+ }
+
+}
\ No newline at end of file
--- /dev/null
+module augment-leaflist {
+ namespace "ns:augment:leaflist";
+ prefix "auglflst";
+
+
+ import yang {prefix yng; revision-date 2013-11-26;}
+
+
+ revision "2013-11-26" {
+ }
+
+ augment "/yng:cont" {
+ leaf-list lflst1 {
+ type string;
+ }
+ }
+
+
+
+}
\ No newline at end of file
--- /dev/null
+module augment-list {
+ namespace "ns:augment:list";
+ prefix "auglst";
+
+
+ import yang {prefix yng; revision-date 2013-11-26;}
+
+
+ revision "2013-11-26" {
+ }
+
+ augment "/yng:cont" {
+ list lst1 {
+ leaf lf11 {
+ type string;
+ }
+ }
+ }
+
+
+
+}
\ No newline at end of file
--- /dev/null
+<cont>
+ <lf1>lf1</lf1>
+ <lf2>lf2</lf2>
+ <cont1>
+ <lf11>lf11</lf11>
+ </cont1>
+ <lst1>
+ <lf11>lf1_1</lf11>
+ </lst1>
+ <lst1>
+ <lf11>lf1_2</lf11>
+ </lst1>
+ <lflst1>lflst1_1</lflst1>
+ <lflst1>lflst1_2</lflst1>
+ <lflst1>lflst1_3</lflst1>
+</cont>
\ No newline at end of file
--- /dev/null
+module yang {
+ namespace "ns:yang";
+
+ prefix "yng";
+ revision 2013-11-26 {
+ }
+
+ container cont {
+ leaf lf1 {
+ type string;
+ }
+
+ }
+
+
+
+}
\ No newline at end of file
revision 2013-11-12 {
}
+ typedef tpdfempty {
+ type empty;
+ }
+
+ typedef tpdfbit {
+ type bits {
+ bit b1;
+ bit b2;
+ bit b3;
+ }
+ }
+
+ typedef tpdfun4 {
+ type boolean;
+ }
+
+ typedef tpdfun3 {
+ type union {
+ type tpdfbit;
+ type tpdfempty;
+ }
+ }
+
+ typedef tpdfun2 {
+ type union {
+ type tpdfun3;
+ type tpdfun4;
+ }
+ }
+
+ typedef tpdfun1 {
+ type union {
+ type uint8;
+ type decimal64 {
+ fraction-digits 2;
+ }
+ }
+ }
+
container cont {
leaf lfnint8Min {
type int8;
type empty;
}
- leaf-list lflstunion {
+ leaf lfunion1 {
type union {
type uint16;
type string;
}
}
+ leaf lfunion2 {
+ type union {
+ type decimal64 {
+ fraction-digits 2;
+ }
+ type string;
+ }
+ }
+
+ leaf lfunion3 {
+ type union {
+ type empty;
+ type string;
+ }
+ }
+
+ leaf lfunion4 {
+ type union {
+ type boolean;
+ type string;
+ }
+ }
+
+ leaf lfunion5 {
+ type union {
+ type uint16;
+ type string;
+ }
+ }
+
+ leaf lfunion6 {
+ type union {
+ type uint16;
+ type empty;
+ }
+ }
+
+ leaf lfunion7 {
+ type tpdfun3;
+ }
+
+ leaf lfunion8 {
+ type union {
+ type uint16;
+ type string;
+ }
+ }
+
+ leaf lfunion9 {
+ type union {
+ type uint16;
+ type boolean;
+ }
+ }
+
+ leaf lfunion10 {
+ type union {
+ type bits {
+ bit bt1;
+ bit bt2;
+ }
+ type boolean;
+ }
+ }
+
+ leaf lfunion11 {
+ type union {
+ type tpdfun1;
+ type tpdfun2;
+ }
+ }
+
+ leaf lfunion12 {
+ type tpdfun2;
+ }
}
<lfbits>bit3</lfbits>
<lfbinary>AAaacdabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ%%-#^</lfbinary>
<lfempty></lfempty>
- <lflstunion>324</lflstunion>
- <lflstunion>33.3</lflstunion>
- <lflstunion>lfunion</lflstunion>
- <lflstunion>true</lflstunion>
+ <lfunion1>324</lfunion1>
+ <lfunion2>33.3</lfunion2>
+ <lfunion3>55</lfunion3>
+ <lfunion4>true</lfunion4>
+ <lfunion5>true</lfunion5>
+ <lfunion6>false</lfunion6>
+ <lfunion7></lfunion7>
+ <lfunion8></lfunion8>
+ <lfunion9></lfunion9>
+ <lfunion10>bt1</lfunion10>
+ <lfunion11>33</lfunion11>
+ <lfunion12>false</lfunion12>
</cont>
\ No newline at end of file
@Override
public void onSessionInitiated(ProviderContext session) {
+ pSession = session;
DataProviderService dps = session.<DataProviderService>getSALService(DataProviderService.class);
StatisticsManagerActivator.statsProvider.setDataService(dps);
NotificationProviderService nps = session.<NotificationProviderService>getSALService(NotificationProviderService.class);
public void run() {
while(true){
try {
- spLogger.info("Statistics requester thread started with timer interval : {}",5000);
-
statsRequestSender();
Thread.sleep(5000);
}
}
});
+
+ spLogger.debug("Statistics requester thread started with timer interval : {}",5000);
+
+ statisticsRequesterThread.start();
+
spLogger.info("Statistics Provider started.");
}
//Need to call API to receive all the nodes connected to controller.
List<Node> targetNodes = getAllConnectedNodes();
+
+ if(targetNodes == null)
+ return;
for (Node targetNode : targetNodes){
spLogger.info("Send request for stats collection to node : {})",targetNode.getId());
private List<Node> getAllConnectedNodes(){
Nodes nodes = (Nodes) dps.readOperationalData(nodesIdentifier);
+ if(nodes == null)
+ return null;
+
spLogger.info("Number of connected nodes : {}",nodes.getNode().size());
return nodes.getNode();
}
import org.junit.runner.RunWith;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
import org.opendaylight.controller.sal.connect.netconf.InventoryUtils;
-import org.opendaylight.controller.sal.connect.netconf.NetconfDeviceManager;
import org.opendaylight.controller.sal.connect.netconf.NetconfInventoryUtils;
import org.opendaylight.controller.sal.core.api.data.DataBrokerService;
import org.opendaylight.controller.sal.core.api.mount.MountProvisionInstance;
@Inject
DataBrokerService dataBroker;
- @Inject
- NetconfDeviceManager netconfManager;
-
@Test
public void properInitialized() throws Exception {
InstanceIdentifier path = InstanceIdentifier.builder(InventoryUtils.INVENTORY_PATH)
.nodeWithKey(InventoryUtils.INVENTORY_NODE, InventoryUtils.INVENTORY_ID, "foo").toInstance();
- netconfManager.netconfNodeAdded(path, new InetSocketAddress("127.0.0.1", 8383));
-
InstanceIdentifier mountPointPath = path;
public ModuleRpcs getRpcMapping(RuntimeRpcElementResolved id) {
Map<String, ModuleRpcs> modules = mappedRpcs.get(id.getNamespace());
Preconditions.checkState(modules != null, "No modules found for namespace %s", id.getNamespace());
- ModuleRpcs rpcMapping = modules.get(id.getModuleName());
- Preconditions.checkState(modules != null, "No module %s found for namespace %s", id.getModuleName(),
+ String moduleName = id.getModuleName();
+ ModuleRpcs rpcMapping = modules.get(moduleName);
+ Preconditions.checkState(rpcMapping != null, "No module %s found for namespace %s", moduleName,
id.getNamespace());
return rpcMapping;
import com.google.common.collect.Collections2;
import com.google.common.collect.Sets;
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.config.InstanceConfig;
-import org.opendaylight.controller.netconf.util.xml.XmlNetconfConstants;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
}));
}
- public Element toXml(ObjectName rootOn, Set<ObjectName> childRbeOns, Document document) {
- return toXml(rootOn, childRbeOns, document, null, null);
+ public Element toXml(ObjectName rootOn, Set<ObjectName> childRbeOns, Document document, Element parentElement, String namespace) {
+ return toXml(rootOn, childRbeOns, document, null, parentElement, namespace);
}
public Element toXml(ObjectName rootOn, Set<ObjectName> childRbeOns, Document document, String instanceIndex,
- String keyName) {
- Element xml = document.createElement(keyName == null ? XmlNetconfConstants.DATA_KEY : keyName);
+ Element parentElement, String namespace) {
// TODO namespace
- xml = instanceMapping.toXml(rootOn, null, "namespace", document, xml);
+ Element xml = instanceMapping.toXml(rootOn, null, namespace, document, parentElement);
if (instanceIndex != null) {
xml.setAttribute(KEY_ATTRIBUTE_KEY, instanceIndex);
String runtimeInstanceIndex = objectName.getKeyProperty(childMappingEntry.getKey());
String elementName = jmxToYangChildRbeMapping.get(childMappingEntry.getKey());
- xml.appendChild(childMappingEntry.getValue().toXml(objectName, innerChildRbeOns, document,
- runtimeInstanceIndex, elementName));
+
+ Element innerXml = document.createElement(elementName);
+ childMappingEntry.getValue().toXml(objectName, innerChildRbeOns, document,
+ runtimeInstanceIndex, innerXml, namespace);
+ xml.appendChild(innerXml);
}
}
package org.opendaylight.controller.netconf.confignetconfconnector.mapping.runtime;
-import com.google.common.collect.Multimap;
import com.google.common.collect.Sets;
import org.opendaylight.controller.netconf.util.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
throw new IllegalStateException("Root runtime bean not found among " + runtimeBeanOns);
}
- public Element toXml(String namespace, Multimap<String, ObjectName> instances, Document document) {
- Element root = document.createElement(XmlNetconfConstants.MODULE_KEY);
- XmlUtil.addNamespaceAttr(root, namespace);
+ public Element toXml(String namespace, String instanceName, Collection<ObjectName> runtimeBeanOns, Document document) {
+ Element moduleElement = document.createElement(XmlNetconfConstants.MODULE_KEY);
- Element nameElement = XmlUtil.createTextElement(document, XmlNetconfConstants.NAME_KEY, moduleName);
- root.appendChild(nameElement);
+ final String prefix = getPrefix(namespace);
+ Element typeElement = XmlUtil.createPrefixedTextElement(document, XmlNetconfConstants.TYPE_KEY, prefix,
+ moduleName);
+ XmlUtil.addPrefixedNamespaceAttr(typeElement, prefix, namespace);
+ moduleElement.appendChild(typeElement);
- for (String instanceName : instances.keySet()) {
- Element instance = document.createElement(XmlNetconfConstants.INSTANCE_KEY);
+ Element nameElement = XmlUtil.createTextElement(document, XmlNetconfConstants.NAME_KEY, instanceName);
+ moduleElement.appendChild(nameElement);
- Element innerNameElement = XmlUtil.createTextElement(document, XmlNetconfConstants.NAME_KEY, instanceName);
- instance.appendChild(innerNameElement);
+ ObjectName rootName = findRoot(runtimeBeanOns);
- Collection<ObjectName> runtimeBeanOns = instances.get(instanceName);
- ObjectName rootName = findRoot(runtimeBeanOns);
+ Set<ObjectName> childrenRuntimeBeans = Sets.newHashSet(runtimeBeanOns);
+ childrenRuntimeBeans.remove(rootName);
- Set<ObjectName> childrenRuntimeBeans = Sets.newHashSet(runtimeBeanOns);
- childrenRuntimeBeans.remove(rootName);
+ instanceRuntime.toXml(rootName, childrenRuntimeBeans, document, moduleElement, namespace);
- instance.appendChild(instanceRuntime.toXml(rootName, childrenRuntimeBeans, document));
-
- root.appendChild(instance);
- }
+ return moduleElement;
+ }
- return root;
+ private String getPrefix(String namespace) {
+ return XmlNetconfConstants.PREFIX;
}
}
if (instanceToRbe == null)
continue;
- ModuleRuntime moduleRuntime = moduleRuntimes.get(localNamespace).get(moduleName);
- Element innerXml = moduleRuntime.toXml(localNamespace, instanceToRbe, document);
- modulesElement.appendChild(innerXml);
+ for (String instanceName : instanceToRbe.keySet()) {
+ ModuleRuntime moduleRuntime = moduleRuntimes.get(localNamespace).get(moduleName);
+ Element innerXml = moduleRuntime.toXml(localNamespace, instanceName, instanceToRbe.get(instanceName), document);
+ modulesElement.appendChild(innerXml);
+ }
+
}
}
private RuntimeRpcElementResolved(String namespace, String moduleName, String instanceName, String runtimeBeanName,
Map<String, String> additionalAttributes) {
- this.moduleName = moduleName;
- this.instanceName = instanceName;
+ this.moduleName = Preconditions.checkNotNull(moduleName, "Module name");
+ this.instanceName = Preconditions.checkNotNull(instanceName, "Instance name");
this.additionalAttributes = additionalAttributes;
- this.namespace = namespace;
- this.runtimeBeanName = runtimeBeanName;
+ this.namespace = Preconditions.checkNotNull(namespace, "Namespace");
+ this.runtimeBeanName = Preconditions.checkNotNull(runtimeBeanName, "Runtime bean name");
}
public String getModuleName() {
return ObjectNameUtil.createRuntimeBeanName(moduleName, instanceName, additionalAttributesJavaNames);
}
- private static final String xpathPatternBlueprint = "/" + XmlNetconfConstants.DATA_KEY + "/"
- + XmlNetconfConstants.MODULES_KEY + "/" + XmlNetconfConstants.MODULE_KEY + "\\["
- + XmlNetconfConstants.NAME_KEY + "='(.+)'\\]/" + XmlNetconfConstants.INSTANCE_KEY + "\\["
- + XmlNetconfConstants.NAME_KEY + "='([^']+)'\\](.*)";
+ private static final String xpathPatternBlueprint =
+ "/" + XmlNetconfConstants.MODULES_KEY
+ + "/" + XmlNetconfConstants.MODULE_KEY
+ + "\\["
+
+ + "(?<key1>type|name)"
+ + "='(?<value1>[^']+)'"
+ + "( and |\\]\\[)"
+ + "(?<key2>type|name)"
+ + "='(?<value2>[^']+)'"
+
+ + "\\]"
+ + "(?<additional>.*)";
+
private static final Pattern xpathPattern = Pattern.compile(xpathPatternBlueprint);
- private static final String additionalPatternBlueprint = "(.+)\\[(.+)='(.+)'\\]";
+ private static final String additionalPatternBlueprint = "(?<additionalKey>.+)\\[(.+)='(?<additionalValue>.+)'\\]";
private static final Pattern additionalPattern = Pattern.compile(additionalPatternBlueprint);
public static RuntimeRpcElementResolved fromXpath(String xpath, String elementName, String namespace) {
"Node %s with value '%s' not in required form on rpc element %s, required format is %s",
RuntimeRpc.CONTEXT_INSTANCE, xpath, elementName, xpathPatternBlueprint);
- String moduleName = matcher.group(1);
- String instanceName = matcher.group(2);
- String additionalString = matcher.group(3);
- HashMap<String, String> additionalAttributes = Maps.<String, String> newHashMap();
- String runtimeBeanYangName = moduleName;
- for (String additionalKeyValue : additionalString.split("/")) {
- if (Strings.isNullOrEmpty(additionalKeyValue))
- continue;
- matcher = additionalPattern.matcher(additionalKeyValue);
- Preconditions
- .checkState(
- matcher.matches(),
- "Attribute %s not in required form on rpc element %s, required format for additional attributes is %s",
- additionalKeyValue, elementName, additionalPatternBlueprint);
- String name = matcher.group(1);
- runtimeBeanYangName = name;
- additionalAttributes.put(name, matcher.group(3));
- }
+ PatternGroupResolver groups = new PatternGroupResolver(matcher.group("key1"), matcher.group("value1"),
+ matcher.group("key2"), matcher.group("value2"), matcher.group("additional"));
+
+ String moduleName = groups.getModuleName();
+ String instanceName = groups.getInstanceName();
+
+ HashMap<String, String> additionalAttributes = groups.getAdditionalKeys(elementName, moduleName);
- return new RuntimeRpcElementResolved(namespace, moduleName, instanceName, runtimeBeanYangName,
+ return new RuntimeRpcElementResolved(namespace, moduleName, instanceName, groups.getRuntimeBeanYangName(),
additionalAttributes);
}
+
+ private static final class PatternGroupResolver {
+
+ private final String key1, key2, value1, value2;
+ private final String additional;
+ private String runtimeBeanYangName;
+
+ PatternGroupResolver(String key1, String value1, String key2, String value2, String additional) {
+ this.key1 = Preconditions.checkNotNull(key1);
+ this.value1 = Preconditions.checkNotNull(value1);
+
+ this.key2 = Preconditions.checkNotNull(key2);
+ this.value2 = Preconditions.checkNotNull(value2);
+
+ this.additional = Preconditions.checkNotNull(additional);
+ }
+
+ String getModuleName() {
+ return key1.equals(XmlNetconfConstants.TYPE_KEY) ? value1 : value2;
+ }
+
+ String getInstanceName() {
+ return key1.equals(XmlNetconfConstants.NAME_KEY) ? value1 : value2;
+ }
+
+ HashMap<String, String> getAdditionalKeys(String elementName, String moduleName) {
+ HashMap<String, String> additionalAttributes = Maps.newHashMap();
+
+ runtimeBeanYangName = moduleName;
+ for (String additionalKeyValue : additional.split("/")) {
+ if (Strings.isNullOrEmpty(additionalKeyValue))
+ continue;
+ Matcher matcher = additionalPattern.matcher(additionalKeyValue);
+ Preconditions
+ .checkState(
+ matcher.matches(),
+ "Attribute %s not in required form on rpc element %s, required format for additional attributes is %s",
+ additionalKeyValue, elementName, additionalPatternBlueprint);
+ String name = matcher.group("additionalKey");
+ runtimeBeanYangName = name;
+ additionalAttributes.put(name, matcher.group("additionalValue"));
+ }
+ return additionalAttributes;
+ }
+
+ private String getRuntimeBeanYangName() {
+ Preconditions.checkState(runtimeBeanYangName!=null);
+ return runtimeBeanYangName;
+ }
+ }
}
Element response = get();
- assertEquals(2, getElementsSize(response, "instance"));
+ System.err.println(XmlUtil.toString(response));
+
+ assertEquals(2, getElementsSize(response, "module"));
assertEquals(2, getElementsSize(response, "asdf"));
assertEquals(5, getElementsSize(response, "inner-running-data"));
assertEquals(5, getElementsSize(response, "deep2"));
import java.io.InputStream;
import java.net.InetSocketAddress;
import java.util.Collections;
+import java.util.HashSet;
import java.util.Set;
/**
Thread.sleep(delay);
}
-
- throw new RuntimeException("Netconf server did not provide required capabilities " + expectedCaps
- + " in time, provided capabilities " + currentCapabilities);
+ Set<String> allNotFound = new HashSet<>(expectedCaps);
+ allNotFound.removeAll(currentCapabilities);
+ logger.error("Netconf server did not provide required capabilities. Expected but not found: {}, all expected {}, current {}",
+ allNotFound, expectedCaps ,currentCapabilities);
+ throw new RuntimeException("Netconf server did not provide required capabilities. Expected but not found:" + allNotFound);
}
String messageAsString = XmlUtil.toString(message);
try {
- netconfOperationExecution = getNetconfOperationWithHighestPriority(
- message, session);
+ netconfOperationExecution = getNetconfOperationWithHighestPriority(message, session);
} catch (IllegalArgumentException | IllegalStateException e) {
logger.warn("Unable to handle rpc {} on session {}", messageAsString, session, e);
throw new NetconfDocumentedException(errorMessage, e, NetconfDocumentedException.ErrorType.application,
tag, NetconfDocumentedException.ErrorSeverity.error, errorInfo);
+ } catch (RuntimeException e) {
+ throw handleUnexpectedEx("Unexpected exception during netconf operation sort", e);
}
+ try {
+ return executeOperationWithHighestPriority(message, netconfOperationExecution, messageAsString);
+ } catch (RuntimeException e) {
+ throw handleUnexpectedEx("Unexpected exception during netconf operation execution", e);
+ }
+ }
+
+ private NetconfDocumentedException handleUnexpectedEx(String s, Exception e) throws NetconfDocumentedException {
+ logger.error(s, e);
+
+ Map<String, String> info = Maps.newHashMap();
+ info.put(NetconfDocumentedException.ErrorSeverity.error.toString(), e.toString());
+ return new NetconfDocumentedException("Unexpected error",
+ NetconfDocumentedException.ErrorType.application,
+ NetconfDocumentedException.ErrorTag.operation_failed,
+ NetconfDocumentedException.ErrorSeverity.error, info);
+ }
+
+ private Document executeOperationWithHighestPriority(Document message, NetconfOperationExecution netconfOperationExecution, String messageAsString) throws NetconfDocumentedException {
logger.debug("Forwarding netconf message {} to {}", messageAsString,
netconfOperationExecution.operationWithHighestPriority);
+ "<no-arg xmlns=\""
+ expectedNamespace
+ "\"> "
- + "<context-instance>/data/modules/module[name='impl-netconf']/instance[name='instance']</context-instance>"
+ + "<context-instance>/modules/module[type='impl-netconf'][name='instance']</context-instance>"
+ "<arg1>argument1</arg1>" + "</no-arg>" + "</rpc>";
final Document doc = XmlUtil.readXmlToDocument(rpc);
final NetconfMessage message = netconfClient.sendMessage(new NetconfMessage(doc));
<rpc message-id="a" a="64" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<no-arg xmlns="urn:opendaylight:params:xml:ns:yang:controller:test:impl">
- <context-instance>/data/modules/module[name='impl-netconf']/instance[name='instance']</context-instance>
+ <context-instance>/modules/module[type='impl-netconf' and name='instance']</context-instance>
<arg1>
testarg1
</arg1>
<rpc message-id="a" a="64" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<noArgInner xmlns="urn:opendaylight:params:xml:ns:yang:controller:test:impl">
<context-instance>
- /data/modules/module[name='impl-netconf']/instance[name='instance2']/inner-running-data-additional[key='randomString_1003']
+ /modules/module[name='instance2'][type='impl-netconf']/inner-running-data-additional[key='randomString_1003']
</context-instance>
</noArgInner>
</rpc>
<noArgInnerInner
xmlns="urn:opendaylight:params:xml:ns:yang:controller:test:impl">
<context-instance>
- /data/modules/module[name='impl-netconf']/instance[name='instance2']/inner-running-data[key='1015']/inner-inner-running-data[key='1017']
+ /modules/module[type='impl-netconf'][name='instance2']/inner-running-data[key='1015']/inner-inner-running-data[key='1017']
</context-instance>
<arg1>
<leaf-list-output
xmlns="urn:opendaylight:params:xml:ns:yang:controller:test:impl">
<context-instance>
- /data/modules/module[name='impl-netconf']/instance[name='instance2']/inner-running-data[key='1015']/inner-inner-running-data[key='1017']
+ /modules/module[type='impl-netconf'][name='instance2']/inner-running-data[key='1015']/inner-inner-running-data[key='1017']
</context-instance>
</leaf-list-output>
</rpc>
import org.opendaylight.controller.containermanager.ContainerConfig;
-@XmlRootElement(name = "container-config-list")
+@XmlRootElement(name = "containerConfig-list")
@XmlAccessorType(XmlAccessType.NONE)
public class ContainerConfigs {
- @XmlElement(name = "container-config")
+ @XmlElement(name = "containerConfig")
List<ContainerConfig> containerConfig;
//To satisfy JAXB
* http://localhost:8080/controller/nb/v2/containermanager/containers
*
* Response body in XML:
- * <container-config-list>
- *    <container-config>
+ * <containerConfig-list>
+ *    <containerConfig>
*       <container>black</container>
*       <staticVlan>10</staticVlan>
*       <nodeConnectors>OF|1@OF|00:00:00:00:00:00:00:01</nodeConnectors>
*        <name>tcp</name>
*        <protocol>TCP</protocol>
*       </flowSpecs>
- *     </container-config>
- *     <container-config>
+ *     </containerConfig>
+ *     <containerConfig>
*       <container>red</container>
*       <staticVlan>20</staticVlan>
*       <nodeConnectors>OF|1@OF|00:00:00:00:00:00:00:01</nodeConnectors>
*        <name>udp</name>
*        <protocol>UDP</protocol>
*       </flowSpecs>
- *     </container-config>
- * </container-config-list>
+ *     </containerConfig>
+ * </containerConfig-list>
*
* Response body in JSON:
- * { "container-config" : [
+ * { "containerConfig" : [
* { "container" : "black",
* "nodeConnectors" : [
* "OF|1@OF|00:00:00:00:00:00:00:01", "OF|23@OF|00:00:00:00:00:00:20:21"
* http://localhost:8080/controller/nb/v2/containermanager/container/blue
*
* Response body in XML:
- * <container-config>
+ * <containerConfig>
*     <container>blue</container>
*     <staticVlan>10</staticVlan>
*     <nodeConnectors>OF|1@OF|00:00:00:00:00:00:00:01</nodeConnectors>
*     <nodeConnectors>OF|23@OF|00:00:00:00:00:00:20:21</nodeConnectors>
- * </container-config>
+ * </containerConfig>
*
* Response body in JSON:
* {
- * "container-config": [
+ * "containerConfig": [
* {
* "container": "yellow",
* "staticVlan": "10",
* http://localhost:8080/controller/nb/v2/containermanager/container/yellow
*
* Request body in XML:
- * <container-config>
+ * <containerConfig>
*     <container>yellow</container>
*     <staticVlan>10</staticVlan>
*     <nodeConnectors></nodeConnectors>
- * </container-config>
+ * </containerConfig>
*
* Request body in JSON:
* {
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>forwarding.staticrouting</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${forwarding.staticrouting}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>arphandler</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${arphandler.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker.implementation</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>forwarding.staticrouting</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${forwarding.staticrouting}</version>
</dependency>
<dependency>
<groupId>org.codehaus.enunciate</groupId>
</scm>
<artifactId>samples.loadbalancer</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.1-SNAPSHOT</version>
<packaging>bundle</packaging>
<build>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
import org.apache.felix.dm.Component;
import org.opendaylight.controller.forwardingrulesmanager.FlowEntry;
import org.opendaylight.controller.forwardingrulesmanager.IForwardingRulesManager;
+import org.opendaylight.controller.hosttracker.HostIdFactory;
+import org.opendaylight.controller.hosttracker.IHostId;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
import org.opendaylight.controller.sal.action.Action;
import org.slf4j.LoggerFactory;
/**
- * This class is the main class that represents the load balancer service.
- * This is a sample load balancer application that balances traffic to backend servers
- * based on the source address and source port on each incoming packet. The service
- * reactively installs OpenFlow rules to direct all packets with a specific source address
- * and source port to one of the appropriate backend servers. The servers may be chosen
- * using a round robin policy or a random policy. This service can be configured via a
- * REST APIs which are similar to the OpenStack Quantum LBaaS (Load-balancer-as-a-Service)
- * v1.0 API proposal (http://wiki.openstack.org/Quantum/LBaaS)
+ * This class is the main class that represents the load balancer service. This
+ * is a sample load balancer application that balances traffic to backend
+ * servers based on the source address and source port on each incoming packet.
+ * The service reactively installs OpenFlow rules to direct all packets with a
+ * specific source address and source port to one of the appropriate backend
+ * servers. The servers may be chosen using a round robin policy or a random
+ * policy. This service can be configured via a REST APIs which are similar to
+ * the OpenStack Quantum LBaaS (Load-balancer-as-a-Service) v1.0 API proposal
+ * (http://wiki.openstack.org/Quantum/LBaaS)
*
- * To use this service, a virtual IP (or VIP) should be exposed to the clients of this service
- * and used as the destination address. A VIP is a entity that comprises of a virtual IP, port
- * and protocol (TCP or UDP).
- * Assumptions:
- * 1. One or more VIPs may be mapped to the same server pool. All VIPs that share the same
- * pool must also share the same load balancing policy (random or round robin).
+ * To use this service, a virtual IP (or VIP) should be exposed to the clients
+ * of this service and used as the destination address. A VIP is a entity that
+ * comprises of a virtual IP, port and protocol (TCP or UDP). Assumptions: 1.
+ * One or more VIPs may be mapped to the same server pool. All VIPs that share
+ * the same pool must also share the same load balancing policy (random or round
+ * robin).
*
- * 2. Only one server pool can be be assigned to a VIP.
+ * 2. Only one server pool can be be assigned to a VIP.
*
- * 3. All flow rules are installed with an idle timeout of 5 seconds.
+ * 3. All flow rules are installed with an idle timeout of 5 seconds.
*
- * 4. Packets to a VIP must leave the OpenFlow cluster from the same switch from where
- * it entered it.
+ * 4. Packets to a VIP must leave the OpenFlow cluster from the same switch from
+ * where it entered it.
*
- * 5. When you delete a VIP or a server pool or a server from a pool, the service does not
- * delete the flow rules it has already installed. The flow rules should automatically
- * time out after the idle timeout of 5 seconds.
+ * 5. When you delete a VIP or a server pool or a server from a pool, the
+ * service does not delete the flow rules it has already installed. The flow
+ * rules should automatically time out after the idle timeout of 5 seconds.
*
*/
-public class LoadBalancerService implements IListenDataPacket, IConfigManager{
+public class LoadBalancerService implements IListenDataPacket, IConfigManager {
/*
* Logger instance
private static Logger lbsLogger = LoggerFactory.getLogger(LoadBalancerService.class);
/*
- * Single instance of the configuration manager. Application passes this reference to all
- * the new policies implemented for load balancing.
+ * Single instance of the configuration manager. Application passes this
+ * reference to all the new policies implemented for load balancing.
*/
private static ConfigManager configManager = new ConfigManager();
* Round robing policy instance. Need to implement factory patterns to get
* policy instance.
*/
- private static RoundRobinLBPolicy rrLBMethod= new RoundRobinLBPolicy(configManager);
+ private static RoundRobinLBPolicy rrLBMethod = new RoundRobinLBPolicy(configManager);
/*
* Random policy instance.
*/
- private static RandomLBPolicy ranLBMethod= new RandomLBPolicy(configManager);
+ private static RandomLBPolicy ranLBMethod = new RandomLBPolicy(configManager);
/*
* Reference to the data packet service
private String containerName = null;
/*
- * Set/unset methods for the service instance that load balancer
- * service requires
+ * Set/unset methods for the service instance that load balancer service
+ * requires
*/
public String getContainerName() {
if (containerName == null)
}
}
- public void setForwardingRulesManager(
- IForwardingRulesManager forwardingRulesManager) {
+ public void setForwardingRulesManager(IForwardingRulesManager forwardingRulesManager) {
lbsLogger.debug("Setting ForwardingRulesManager");
this.ruleManager = forwardingRulesManager;
}
- public void unsetForwardingRulesManager(
- IForwardingRulesManager forwardingRulesManager) {
+ public void unsetForwardingRulesManager(IForwardingRulesManager forwardingRulesManager) {
if (this.ruleManager == forwardingRulesManager) {
this.ruleManager = null;
}
}
/**
- * This method receives first packet of flows for which there is no
- * matching flow rule installed on the switch. IP addresses used for VIPs
- * are not supposed to be used by any real/virtual host in the network.
- * Hence, any forwarding/routing service will not install any flows rules matching
- * these VIPs. This ensures that all the flows destined for VIPs will not find a match
- * in the switch and will be forwarded to the load balancing service.
- * Service will decide where to route this traffic based on the load balancing
- * policy of the VIP's attached pool and will install appropriate flow rules
- * in a reactive manner.
+ * This method receives first packet of flows for which there is no matching
+ * flow rule installed on the switch. IP addresses used for VIPs are not
+ * supposed to be used by any real/virtual host in the network. Hence, any
+ * forwarding/routing service will not install any flows rules matching
+ * these VIPs. This ensures that all the flows destined for VIPs will not
+ * find a match in the switch and will be forwarded to the load balancing
+ * service. Service will decide where to route this traffic based on the
+ * load balancing policy of the VIP's attached pool and will install
+ * appropriate flow rules in a reactive manner.
*/
@Override
- public PacketResult receiveDataPacket(RawPacket inPkt){
+ public PacketResult receiveDataPacket(RawPacket inPkt) {
if (inPkt == null) {
return PacketResult.IGNORED;
if (ipPkt instanceof IPv4) {
- lbsLogger.debug("Packet recieved from switch : {}",inPkt.getIncomingNodeConnector().getNode().toString());
- IPv4 ipv4Pkt = (IPv4)ipPkt;
- if(IPProtocols.getProtocolName(ipv4Pkt.getProtocol()).equals(IPProtocols.TCP.toString())
- || IPProtocols.getProtocolName(ipv4Pkt.getProtocol()).equals(IPProtocols.UDP.toString())){
+ lbsLogger.debug("Packet recieved from switch : {}", inPkt.getIncomingNodeConnector().getNode()
+ .toString());
+ IPv4 ipv4Pkt = (IPv4) ipPkt;
+ if (IPProtocols.getProtocolName(ipv4Pkt.getProtocol()).equals(IPProtocols.TCP.toString())
+ || IPProtocols.getProtocolName(ipv4Pkt.getProtocol()).equals(IPProtocols.UDP.toString())) {
- lbsLogger.debug("Packet protocol : {}",IPProtocols.getProtocolName(ipv4Pkt.getProtocol()));
+ lbsLogger.debug("Packet protocol : {}", IPProtocols.getProtocolName(ipv4Pkt.getProtocol()));
Client client = new LBUtil().getClientFromPacket(ipv4Pkt);
VIP vip = new LBUtil().getVIPFromPacket(ipv4Pkt);
- if(configManager.vipExists(vip)){
+ if (configManager.vipExists(vip)) {
VIP vipWithPoolName = configManager.getVIPWithPoolName(vip);
String poolMemberIp = null;
- if(vipWithPoolName.getPoolName() == null){
- lbsLogger.error("No pool attached. Please attach pool with the VIP -- {}",vip);
+ if (vipWithPoolName.getPoolName() == null) {
+ lbsLogger.error("No pool attached. Please attach pool with the VIP -- {}", vip);
return PacketResult.IGNORED;
}
- if(configManager.getPool(vipWithPoolName.getPoolName()).getLbMethod().equalsIgnoreCase(LBConst.ROUND_ROBIN_LB_METHOD)){
+ if (configManager.getPool(vipWithPoolName.getPoolName()).getLbMethod()
+ .equalsIgnoreCase(LBConst.ROUND_ROBIN_LB_METHOD)) {
- poolMemberIp = rrLBMethod.getPoolMemberForClient(client,vipWithPoolName);
+ poolMemberIp = rrLBMethod.getPoolMemberForClient(client, vipWithPoolName);
}
- if(configManager.getPool(vipWithPoolName.getPoolName()).getLbMethod().equalsIgnoreCase(LBConst.RANDOM_LB_METHOD)){
- poolMemberIp = ranLBMethod.getPoolMemberForClient(client,vipWithPoolName);
+ if (configManager.getPool(vipWithPoolName.getPoolName()).getLbMethod()
+ .equalsIgnoreCase(LBConst.RANDOM_LB_METHOD)) {
+ poolMemberIp = ranLBMethod.getPoolMemberForClient(client, vipWithPoolName);
}
try {
Node clientNode = inPkt.getIncomingNodeConnector().getNode();
- HostNodeConnector hnConnector = this.hostTracker.hostFind(InetAddress.getByName(poolMemberIp));
+ // HostTracker hosts db key scheme implementation
+ IHostId id = HostIdFactory.create(InetAddress.getByName(poolMemberIp), null);
+ HostNodeConnector hnConnector = this.hostTracker.hostFind(id);
Node destNode = hnConnector.getnodeconnectorNode();
- lbsLogger.debug("Client is connected to switch : {}",clientNode.toString());
- lbsLogger.debug("Destination pool machine is connected to switch : {}",destNode.toString());
+ lbsLogger.debug("Client is connected to switch : {}", clientNode.toString());
+ lbsLogger
+ .debug("Destination pool machine is connected to switch : {}", destNode.toString());
- //Get path between both the nodes
+ // Get path between both the nodes
NodeConnector forwardPort = null;
- if(clientNode.getNodeIDString().equals(destNode.getNodeIDString())){
+ if (clientNode.getNodeIDString().equals(destNode.getNodeIDString())) {
forwardPort = hnConnector.getnodeConnector();
- lbsLogger.info("Both source (client) and destination pool machine is connected to same switch nodes. Respective ports are - {},{}",forwardPort,inPkt.getIncomingNodeConnector());
+ lbsLogger
+ .info("Both source (client) and destination pool machine is connected to same switch nodes. Respective ports are - {},{}",
+ forwardPort, inPkt.getIncomingNodeConnector());
- }else{
+ } else {
Path route = this.routing.getRoute(clientNode, destNode);
- lbsLogger.info("Path between source (client) and destination switch nodes : {}",route.toString());
+ lbsLogger.info("Path between source (client) and destination switch nodes : {}",
+ route.toString());
forwardPort = route.getEdges().get(0).getTailNodeConnector();
}
- if(installLoadBalancerFlow(client,
- vip,
- clientNode,
- poolMemberIp,
- hnConnector.getDataLayerAddressBytes(),
- forwardPort,
- LBConst.FORWARD_DIRECTION_LB_FLOW)){
- lbsLogger.info("Traffic from client : {} will be routed " +
- "to pool machine : {}",client,poolMemberIp);
- }else{
- lbsLogger.error("Not able to route traffic from client : {}",client );
+ if (installLoadBalancerFlow(client, vip, clientNode, poolMemberIp,
+ hnConnector.getDataLayerAddressBytes(), forwardPort,
+ LBConst.FORWARD_DIRECTION_LB_FLOW)) {
+ lbsLogger.info("Traffic from client : {} will be routed " + "to pool machine : {}",
+ client, poolMemberIp);
+ } else {
+ lbsLogger.error("Not able to route traffic from client : {}", client);
}
- if(installLoadBalancerFlow(client,
- vip,
- clientNode,
- poolMemberIp,
- vipMacAddr,
- inPkt.getIncomingNodeConnector(),
- LBConst.REVERSE_DIRECTION_LB_FLOW)){
- lbsLogger.info("Flow rule installed to change the source ip/mac from " +
- "pool machine ip {} to VIP {} for traffic coming pool machine",poolMemberIp,vip);
- }else{
- lbsLogger.error("Not able to route traffic from client : {}",client );
+ if (installLoadBalancerFlow(client, vip, clientNode, poolMemberIp, vipMacAddr,
+ inPkt.getIncomingNodeConnector(), LBConst.REVERSE_DIRECTION_LB_FLOW)) {
+ lbsLogger.info("Flow rule installed to change the source ip/mac from "
+ + "pool machine ip {} to VIP {} for traffic coming pool machine", poolMemberIp,
+ vip);
+ } else {
+ lbsLogger.error("Not able to route traffic from client : {}", client);
}
- }catch (UnknownHostException e) {
- lbsLogger.error("Pool member not found in the network : {}",e.getMessage());
- lbsLogger.error("",e);
+ } catch (UnknownHostException e) {
+ lbsLogger.error("Pool member not found in the network : {}", e.getMessage());
+ lbsLogger.error("", e);
}
}
}
}
/*
- * This method installs the flow rule for routing the traffic between two hosts.
- * @param source Traffic is sent by this source
- * @param dest Traffic is destined to this destination (VIP)
- * @param sourceSwitch Switch from where controller received the packet
- * @param destMachineIp IP address of the pool member where traffic needs to be routed
- * @param destMachineMac MAC address of the pool member where traffic needs to be routed
- * @param outport Use this port to send out traffic
- * @param flowDirection FORWARD_DIRECTION_LB_FLOW or REVERSE_DIRECTION_LB_FLOW
- * @return true If flow installation was successful
- * false else
- * @throws UnknownHostException
+ * This method installs the flow rule for routing the traffic between two
+ * hosts.
+ *
+ * @param source Traffic is sent by this source
+ *
+ * @param dest Traffic is destined to this destination (VIP)
+ *
+ * @param sourceSwitch Switch from where controller received the packet
+ *
+ * @param destMachineIp IP address of the pool member where traffic needs to
+ * be routed
+ *
+ * @param destMachineMac MAC address of the pool member where traffic needs
+ * to be routed
+ *
+ * @param outport Use this port to send out traffic
+ *
+ * @param flowDirection FORWARD_DIRECTION_LB_FLOW or
+ * REVERSE_DIRECTION_LB_FLOW
+ *
+ * @return true If flow installation was successful false else
+ *
+ * @throws UnknownHostException
*/
- private boolean installLoadBalancerFlow(Client source,
- VIP dest,
- Node sourceSwitch,
- String destMachineIp,
- byte[] destMachineMac,
- NodeConnector outport,
- int flowDirection) throws UnknownHostException{
+ private boolean installLoadBalancerFlow(Client source, VIP dest, Node sourceSwitch, String destMachineIp,
+ byte[] destMachineMac, NodeConnector outport, int flowDirection) throws UnknownHostException {
Match match = new Match();
List<Action> actions = new ArrayList<Action>();
- if(flowDirection == LBConst.FORWARD_DIRECTION_LB_FLOW){
+ if (flowDirection == LBConst.FORWARD_DIRECTION_LB_FLOW) {
match.setField(MatchType.DL_TYPE, EtherTypes.IPv4.shortValue());
match.setField(MatchType.NW_SRC, InetAddress.getByName(source.getIp()));
match.setField(MatchType.NW_DST, InetAddress.getByName(dest.getIp()));
actions.add(new SetDlDst(destMachineMac));
}
- if(flowDirection == LBConst.REVERSE_DIRECTION_LB_FLOW){
+ if (flowDirection == LBConst.REVERSE_DIRECTION_LB_FLOW) {
match.setField(MatchType.DL_TYPE, EtherTypes.IPv4.shortValue());
match.setField(MatchType.NW_SRC, InetAddress.getByName(destMachineIp));
match.setField(MatchType.NW_DST, InetAddress.getByName(source.getIp()));
match.setField(MatchType.NW_PROTO, IPProtocols.getProtocolNumberByte(source.getProtocol()));
match.setField(MatchType.TP_SRC, dest.getPort());
- match.setField(MatchType.TP_DST,source.getPort());
+ match.setField(MatchType.TP_DST, source.getPort());
actions.add(new SetNwSrc(InetAddress.getByName(dest.getIp())));
actions.add(new SetDlSrc(destMachineMac));
flow.setHardTimeout((short) 0);
flow.setPriority(LB_IPSWITCH_PRIORITY);
- String policyName = source.getIp()+":"+source.getProtocol()+":"+source.getPort();
- String flowName =null;
+ String policyName = source.getIp() + ":" + source.getProtocol() + ":" + source.getPort();
+ String flowName = null;
- if(flowDirection == LBConst.FORWARD_DIRECTION_LB_FLOW){
- flowName = "["+policyName+":"+source.getIp() + ":"+dest.getIp()+"]";
+ if (flowDirection == LBConst.FORWARD_DIRECTION_LB_FLOW) {
+ flowName = "[" + policyName + ":" + source.getIp() + ":" + dest.getIp() + "]";
}
- if(flowDirection == LBConst.REVERSE_DIRECTION_LB_FLOW){
+ if (flowDirection == LBConst.REVERSE_DIRECTION_LB_FLOW) {
- flowName = "["+policyName+":"+dest.getIp() + ":"+source.getIp()+"]";
+ flowName = "[" + policyName + ":" + dest.getIp() + ":" + source.getIp() + "]";
}
FlowEntry fEntry = new FlowEntry(policyName, flowName, flow, sourceSwitch);
- lbsLogger.info("Install flow entry {} on node {}",fEntry.toString(),sourceSwitch.toString());
+ lbsLogger.info("Install flow entry {} on node {}", fEntry.toString(), sourceSwitch.toString());
- if(!this.ruleManager.checkFlowEntryConflict(fEntry)){
- if(this.ruleManager.installFlowEntry(fEntry).isSuccess()){
+ if (!this.ruleManager.checkFlowEntryConflict(fEntry)) {
+ if (this.ruleManager.installFlowEntry(fEntry).isSuccess()) {
return true;
- }else{
- lbsLogger.error("Error in installing flow entry to node : {}",sourceSwitch);
+ } else {
+ lbsLogger.error("Error in installing flow entry to node : {}", sourceSwitch);
}
- }else{
- lbsLogger.error("Conflicting flow entry exists : {}",fEntry.toString());
+ } else {
+ lbsLogger.error("Conflicting flow entry exists : {}", fEntry.toString());
}
return false;
}
this.containerName = (String) props.get("containerName");
lbsLogger.info("Running container name:" + this.containerName);
- }else {
+ } else {
// In the Global instance case the containerName is empty
this.containerName = "";
}
lbsLogger.info(configManager.toString());
+
}
/**
- * Function called by the dependency manager when at least one
- * dependency become unsatisfied or when the component is shutting
- * down because for example bundle is being stopped.
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
*
*/
void destroy() {
}
/**
- * Function called by dependency manager after "init ()" is called
- * and after the services provided by the class are registered in
- * the service registry
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
*
*/
void start() {
}
/**
- * Function called by the dependency manager before the services
- * exported by the component are unregistered, this will be
- * followed by a "destroy ()" calls
+ * Function called by the dependency manager before the services exported by
+ * the component are unregistered, this will be followed by a "destroy ()"
+ * calls
*
*/
void stop() {
}
/*
- * All the methods below are just proxy methods to direct the REST API requests to configuration
- * manager. We need this redirection as currently, opendaylight supports only one
- * implementation of the service.
+ * All the methods below are just proxy methods to direct the REST API
+ * requests to configuration manager. We need this redirection as currently,
+ * opendaylight supports only one implementation of the service.
*/
@Override
public Set<VIP> getAllVIPs() {
}
@Override
- public boolean vipExists(String name, String ip, String protocol,
- short protocolPort, String poolName) {
+ public boolean vipExists(String name, String ip, String protocol, short protocolPort, String poolName) {
return configManager.vipExists(name, ip, protocol, protocolPort, poolName);
}
}
@Override
- public VIP createVIP(String name, String ip, String protocol,
- short protocolPort, String poolName) {
+ public VIP createVIP(String name, String ip, String protocol, short protocolPort, String poolName) {
return configManager.createVIP(name, ip, protocol, protocolPort, poolName);
}
}
@Override
- public PoolMember addPoolMember(String name,
- String memberIP,
- String poolName) {
+ public PoolMember addPoolMember(String name, String memberIP, String poolName) {
return configManager.addPoolMember(name, memberIP, poolName);
}
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>samples.loadbalancer</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${samples.loadbalancer}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>forwarding.staticrouting</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${forwarding.staticrouting}</version>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>