<tag>HEAD</tag>
</scm>
<artifactId>arphandler</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.1-SNAPSHOT</version>
<packaging>bundle</packaging>
<build>
<configuration>
<instructions>
<Import-Package>
+ org.opendaylight.controller.sal.packet.address,
org.opendaylight.controller.connectionmanager,
org.opendaylight.controller.sal.connection,
org.opendaylight.controller.sal.core,
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import org.opendaylight.controller.clustering.services.IClusterContainerServices;
import org.opendaylight.controller.clustering.services.IClusterServices;
import org.opendaylight.controller.connectionmanager.IConnectionManager;
+import org.opendaylight.controller.hosttracker.HostIdFactory;
+import org.opendaylight.controller.hosttracker.IHostId;
import org.opendaylight.controller.hosttracker.IfHostListener;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
private BlockingQueue<ARPCacheEvent> ARPCacheEvents = new LinkedBlockingQueue<ARPCacheEvent>();
private Thread cacheEventHandler;
private boolean stopping = false;
+
/*
* A cluster allocated cache. Used for synchronizing ARP request/reply
- * events across all cluster controllers. To raise an event, we put() a specific
- * event object (as key) and all nodes handle it in the entryUpdated callback.
+ * events across all cluster controllers. To raise an event, we put() a
+ * specific event object (as key) and all nodes handle it in the
+ * entryUpdated callback.
*
* In case of ARPReply, we put true value to send replies to any requestors
* by calling generateAndSendReply
*/
private ConcurrentMap<ARPEvent, Boolean> arpRequestReplyEvent;
- void setConnectionManager(IConnectionManager cm){
+ void setConnectionManager(IConnectionManager cm) {
this.connectionManager = cm;
}
- void unsetConnectionManager(IConnectionManager cm){
- if (this.connectionManager == cm){
+ void unsetConnectionManager(IConnectionManager cm) {
+ if (this.connectionManager == cm) {
connectionManager = null;
}
}
- void setClusterContainerService(IClusterContainerServices s){
+ void setClusterContainerService(IClusterContainerServices s) {
this.clusterContainerService = s;
}
}
}
- protected void sendARPReply(NodeConnector p, byte[] sMAC, InetAddress sIP,
- byte[] tMAC, InetAddress tIP) {
+ protected void sendARPReply(NodeConnector p, byte[] sMAC, InetAddress sIP, byte[] tMAC, InetAddress tIP) {
byte[] senderIP = sIP.getAddress();
byte[] targetIP = tIP.getAddress();
- ARP arp = createARP(ARP.REPLY,sMAC,senderIP,tMAC,targetIP);
+ ARP arp = createARP(ARP.REPLY, sMAC, senderIP, tMAC, targetIP);
Ethernet ethernet = createEthernet(sMAC, tMAC, arp);
// Make sure that the host is a legitimate member of this subnet
if (!subnet.hasNodeConnector(p)) {
- log.debug("{} showing up on {} does not belong to {}",
- new Object[] { sourceIP, p, subnet });
+ log.debug("{} showing up on {} does not belong to {}", new Object[] { sourceIP, p, subnet });
return;
}
}
/*
- * OpCode != request -> ARP Reply. If there are hosts (in
- * arpRequestors) waiting for the ARP reply for this sourceIP, it's
- * time to generate the reply and send it to these hosts.
+ * OpCode != request -> ARP Reply. If there are hosts (in arpRequestors)
+ * waiting for the ARP reply for this sourceIP, it's time to generate
+ * the reply and send it to these hosts.
*
* If sourceIP==targetIP, it is a Gratuitous ARP. If there are hosts (in
* arpRequestors) waiting for the ARP reply for this sourceIP, it's time
*/
if (pkt.getOpCode() != ARP.REQUEST || sourceIP.equals(targetIP)) {
- // Raise a reply event so that any waiting requestors will be sent a reply
- // the true value indicates we should generate replies to requestors across the cluster
+ // Raise a reply event so that any waiting requestors will be sent a
+ // reply
+ // the true value indicates we should generate replies to requestors
+ // across the cluster
log.trace("Received ARP reply packet from {}, reply to all requestors.", sourceIP);
arpRequestReplyEvent.put(new ARPReply(sourceIP, sourceMAC), true);
return;
}
/*
- * ARP Request Handling:
- * If targetIP is the IP of the subnet, reply with ARP REPLY
- * If targetIP is a known host, PROXY ARP (by sending ARP REPLY) on behalf of known target hosts.
- * For unknown target hosts, generate and send an ARP request to ALL switches/ports using
- * the IP address defined in the subnet as source address
+ * ARP Request Handling: If targetIP is the IP of the subnet, reply with
+ * ARP REPLY If targetIP is a known host, PROXY ARP (by sending ARP
+ * REPLY) on behalf of known target hosts. For unknown target hosts,
+ * generate and send an ARP request to ALL switches/ports using the IP
+ * address defined in the subnet as source address
*/
/*
* If target IP is gateway IP, Send ARP reply
*/
if ((targetIP.equals(subnet.getNetworkAddress()))
&& (NetUtils.isBroadcastMACAddr(targetMAC) || Arrays.equals(targetMAC, getControllerMAC()))) {
- if (connectionManager.getLocalityStatus(p.getNode()) == ConnectionLocality.LOCAL){
- if (log.isTraceEnabled()){
+ if (connectionManager.getLocalityStatus(p.getNode()) == ConnectionLocality.LOCAL) {
+ if (log.isTraceEnabled()) {
log.trace("Received local ARP req. for default gateway. Replying with controller MAC: {}",
HexEncode.bytesToHexString(getControllerMAC()));
}
return;
}
-
- HostNodeConnector host = hostTracker.hostQuery(targetIP);
+ // Hosttracker hosts db key implementation
+ IHostId id = HostIdFactory.create(targetIP, null);
+ HostNodeConnector host = hostTracker.hostQuery(id);
// unknown host, initiate ARP request
if (host == null) {
// add the requestor to the list so that we can replay the reply
arpRequestors.put(targetIP, requestorSet);
}
requestorSet.add(requestor);
- countDownTimers.put(targetIP, (short) 2); // reset timeout to 2sec
+ countDownTimers.put(targetIP, (short) 2); // reset timeout to
+ // 2sec
}
- //Raise a bcast request event, all controllers need to send one
+ // Raise a bcast request event, all controllers need to send one
log.trace("Sending a bcast ARP request for {}", targetIP);
arpRequestReplyEvent.put(new ARPRequest(targetIP, subnet), false);
} else {
/*
- * Target host known (across the cluster), send ARP REPLY make sure that targetMAC
- * matches the host's MAC if it is not broadcastMAC
+ * Target host known (across the cluster), send ARP REPLY make sure
+ * that targetMAC matches the host's MAC if it is not broadcastMAC
*/
if (NetUtils.isBroadcastMACAddr(targetMAC) || Arrays.equals(host.getDataLayerAddressBytes(), targetMAC)) {
log.trace("Received ARP req. for known host {}, sending reply...", targetIP);
if (connectionManager.getLocalityStatus(p.getNode()) == ConnectionLocality.LOCAL) {
- sendARPReply(p,
- host.getDataLayerAddressBytes(),
- host.getNetworkAddress(),
- pkt.getSenderHardwareAddress(),
- sourceIP);
+ sendARPReply(p, host.getDataLayerAddressBytes(), host.getNetworkAddress(),
+ pkt.getSenderHardwareAddress(), sourceIP);
} else {
- arpRequestReplyEvent.put(new ARPReply(
- p,
- host.getNetworkAddress(),
- host.getDataLayerAddressBytes(),
- sourceIP,
- pkt.getSenderHardwareAddress()), false);
+ arpRequestReplyEvent.put(new ARPReply(p, host.getNetworkAddress(), host.getDataLayerAddressBytes(),
+ sourceIP, pkt.getSenderHardwareAddress()), false);
}
} else {
/*
- * Target MAC has been changed. For now, discard it.
- * TODO: We may need to send unicast ARP REQUEST on behalf of
- * the target back to the sender to trigger the sender to update
- * its table
+ * Target MAC has been changed. For now, discard it. TODO: We
+ * may need to send unicast ARP REQUEST on behalf of the target
+ * back to the sender to trigger the sender to update its table
*/
}
}
}
/**
- * Send a broadcast ARP Request to the switch/ ports using
- * the networkAddress of the subnet as sender IP
- * the controller's MAC as sender MAC
- * the targetIP as the target Network Address
+ * Send a broadcast ARP Request to the switch/ ports using the
+ * networkAddress of the subnet as sender IP the controller's MAC as sender
+ * MAC the targetIP as the target Network Address
*/
protected void sendBcastARPRequest(InetAddress targetIP, Subnet subnet) {
log.trace("sendBcatARPRequest targetIP:{} subnet:{}", targetIP, subnet);
}
byte[] targetHardwareAddress = new byte[] { (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 0 };
- //TODO: should use IBroadcastHandler instead
+ // TODO: should use IBroadcastHandler instead
for (NodeConnector p : nodeConnectors) {
- //filter out any non-local or internal ports
- if (!(connectionManager.getLocalityStatus(p.getNode()) == ConnectionLocality.LOCAL) || topologyManager.isInternal(p)) {
+ // filter out any non-local or internal ports
+ if (!(connectionManager.getLocalityStatus(p.getNode()) == ConnectionLocality.LOCAL)
+ || topologyManager.isInternal(p)) {
continue;
}
log.trace("Sending toward nodeConnector:{}", p);
/**
* Send a unicast ARP Request to the known host on a specific switch/port as
- * defined in the host.
- * The sender IP is the networkAddress of the subnet
+ * defined in the host. The sender IP is the networkAddress of the subnet
* The sender MAC is the controller's MAC
*/
protected void sendUcastARPRequest(HostNodeConnector host, Subnet subnet) {
Subnet subnet = null;
if (switchManager != null) {
- subnet = switchManager.getSubnetByNetworkAddress(host
- .getNetworkAddress());
+ subnet = switchManager.getSubnetByNetworkAddress(host.getNetworkAddress());
}
if (subnet == null) {
log.debug("can't find subnet matching {}", host.getNetworkAddress());
return;
}
- if (connectionManager.getLocalityStatus(host.getnodeconnectorNode()) == ConnectionLocality.LOCAL){
+ if (connectionManager.getLocalityStatus(host.getnodeconnectorNode()) == ConnectionLocality.LOCAL) {
log.trace("Send a ucast ARP req. to: {}", host);
sendUcastARPRequest(host, subnet);
} else {
}
/**
- * An IP packet is punted to the controller, this means that the
- * destination host is not known to the controller.
- * Need to discover it by sending a Broadcast ARP Request
+ * An IP packet is punted to the controller, this means that the destination
+ * host is not known to the controller. Need to discover it by sending a
+ * Broadcast ARP Request
*
* @param pkt
* @param p
InetAddress dIP = NetUtils.getInetAddress(pkt.getDestinationAddress());
if (dIP == null) {
- return;
+ return;
}
// try to find a matching subnet
}
// see if we know about the host
- HostNodeConnector host = hostTracker.hostFind(dIP);
+ // Hosttracker hosts db key implementation
+ IHostId id = HostIdFactory.create(dIP, null);
+ HostNodeConnector host = hostTracker.hostFind(id);
if (host == null) {
// if we don't, know about the host, try to find it
- log.trace("Punted IP pkt to {}, sending bcast ARP event...",
- dIP);
+ log.trace("Punted IP pkt to {}, sending bcast ARP event...", dIP);
/*
* unknown destination host, initiate bcast ARP request
*/
arpRequestReplyEvent.put(new ARPRequest(dIP, subnet), false);
- } else if (routing == null ||
- routing.getRoute(p.getNode(), host.getnodeconnectorNode()) != null) {
- /* if IRouting is available, make sure that this packet can get it's
+ } else if (routing == null || routing.getRoute(p.getNode(), host.getnodeconnectorNode()) != null) {
+ /*
+ * if IRouting is available, make sure that this packet can get it's
* destination normally before teleporting it there. If it's not
* available, then assume it's reachable.
*
log.trace("forwarding punted IP pkt to {} received at {}", dIP, p);
- /* if we know where the host is and there's a path from where this
+ /*
+ * if we know where the host is and there's a path from where this
* packet was punted to where the host is, then deliver it to the
- * host for now */
+ * host for now
+ */
NodeConnector nc = host.getnodeConnector();
// re-encode the Ethernet packet (the parent of the IPv4 packet)
rp.setOutgoingNodeConnector(nc);
this.dataPacketService.transmitDataPacket(rp);
} else {
- log.trace("ignoring punted IP pkt to {} because there is no route from {}",
- dIP, p);
+ log.trace("ignoring punted IP pkt to {} because there is no route from {}", dIP, p);
}
}
allocateCaches();
retrieveCaches();
+
}
@SuppressWarnings({ "unchecked" })
private void retrieveCaches() {
- ConcurrentMap<?,?> map;
+ ConcurrentMap<?, ?> map;
- if (this.clusterContainerService == null){
+ if (this.clusterContainerService == null) {
log.error("Cluster service unavailable, can't retieve ARPHandler caches!");
return;
}
map = clusterContainerService.getCache(ARP_EVENT_CACHE_NAME);
- if (map != null){
+ if (map != null) {
this.arpRequestReplyEvent = (ConcurrentMap<ARPEvent, Boolean>) map;
} else {
log.error("Cache allocation failed for {}", ARP_EVENT_CACHE_NAME);
}
private void allocateCaches() {
- if (clusterContainerService == null){
+ if (clusterContainerService == null) {
nonClusterObjectCreate();
log.error("Clustering service unavailable. Allocated non-cluster caches for ARPHandler.");
return;
}
- try{
+ try {
clusterContainerService.createCache(ARP_EVENT_CACHE_NAME,
EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
- } catch (CacheConfigException e){
+ } catch (CacheConfigException e) {
log.error("ARPHandler cache configuration invalid!");
- } catch (CacheExistException e){
+ } catch (CacheExistException e) {
log.debug("ARPHandler cache exists, skipped allocation.");
}
}
- private void nonClusterObjectCreate(){
+ private void nonClusterObjectCreate() {
arpRequestReplyEvent = new ConcurrentHashMap<ARPEvent, Boolean>();
}
+
/**
- * Function called by the dependency manager when at least one
- * dependency become unsatisfied or when the component is shutting
- * down because for example bundle is being stopped.
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
*
*/
void destroy() {
}
/**
- * Function called by dependency manager after "init ()" is called
- * and after the services provided by the class are registered in
- * the service registry
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
*
*/
void start() {
}
/**
- * Function called by the dependency manager before the services
- * exported by the component are unregistered, this will be
- * followed by a "destroy ()" calls
+ * Function called by the dependency manager before the services exported by
+ * the component are unregistered, this will be followed by a "destroy ()"
+ * calls
*
*/
- void stop(){
+ void stop() {
}
void stopping() {
handlePuntedIPPacket((IPv4) nextPak, inPkt.getIncomingNodeConnector());
} else if (nextPak instanceof ARP) {
log.trace("Handle ARP packet: {}", formattedPak);
- handleARPPacket((Ethernet) formattedPak, (ARP) nextPak, inPkt
- .getIncomingNodeConnector());
+ handleARPPacket((Ethernet) formattedPak, (ARP) nextPak, inPkt.getIncomingNodeConnector());
}
}
return PacketResult.IGNORED;
}
- private ARP createARP(short opCode, byte[] senderMacAddress, byte[] senderIP, byte[] targetMacAddress, byte[] targetIP) {
- ARP arp = new ARP();
- arp.setHardwareType(ARP.HW_TYPE_ETHERNET);
- arp.setProtocolType(EtherTypes.IPv4.shortValue());
- arp.setHardwareAddressLength((byte) 6);
- arp.setProtocolAddressLength((byte) 4);
- arp.setOpCode(opCode);
- arp.setSenderHardwareAddress(senderMacAddress) ;
- arp.setSenderProtocolAddress(senderIP);
- arp.setTargetHardwareAddress(targetMacAddress);
- arp.setTargetProtocolAddress(targetIP);
- return arp;
+ private ARP createARP(short opCode, byte[] senderMacAddress, byte[] senderIP, byte[] targetMacAddress,
+ byte[] targetIP) {
+ ARP arp = new ARP();
+ arp.setHardwareType(ARP.HW_TYPE_ETHERNET);
+ arp.setProtocolType(EtherTypes.IPv4.shortValue());
+ arp.setHardwareAddressLength((byte) 6);
+ arp.setProtocolAddressLength((byte) 4);
+ arp.setOpCode(opCode);
+ arp.setSenderHardwareAddress(senderMacAddress);
+ arp.setSenderProtocolAddress(senderIP);
+ arp.setTargetHardwareAddress(targetMacAddress);
+ arp.setTargetProtocolAddress(targetIP);
+ return arp;
}
private Ethernet createEthernet(byte[] sourceMAC, byte[] targetMAC, ARP arp) {
// Clean up ARP event cache
try {
- if (clusterContainerService.amICoordinator() && ! arpRequestReplyEvent.isEmpty()){
+ if (clusterContainerService.amICoordinator() && !arpRequestReplyEvent.isEmpty()) {
arpRequestReplyEvent.clear();
}
- } catch (Exception e){
+ } catch (Exception e) {
log.warn("ARPHandler: A cluster member failed to clear event cache.");
}
}
private void generateAndSendReply(InetAddress sourceIP, byte[] sourceMAC) {
if (log.isTraceEnabled()) {
log.trace("generateAndSendReply called with params sourceIP:{} sourceMAC:{}", sourceIP,
- HexEncode.bytesToHexString(sourceMAC));
+ HexEncode.bytesToHexString(sourceMAC));
}
Set<HostNodeConnector> hosts = arpRequestors.remove(sourceIP);
if ((hosts == null) || hosts.isEmpty()) {
countDownTimers.remove(sourceIP);
for (HostNodeConnector host : hosts) {
if (log.isTraceEnabled()) {
- log.trace("Sending ARP Reply with src {}/{}, target {}/{}",
- new Object[] {
- HexEncode.bytesToHexString(sourceMAC),
- sourceIP,
- HexEncode.bytesToHexString(host.getDataLayerAddressBytes()),
- host.getNetworkAddress() });
+ log.trace(
+ "Sending ARP Reply with src {}/{}, target {}/{}",
+ new Object[] { HexEncode.bytesToHexString(sourceMAC), sourceIP,
+ HexEncode.bytesToHexString(host.getDataLayerAddressBytes()), host.getNetworkAddress() });
}
- if (connectionManager.getLocalityStatus(host.getnodeconnectorNode()) == ConnectionLocality.LOCAL){
- sendARPReply(host.getnodeConnector(),
- sourceMAC,
- sourceIP,
- host.getDataLayerAddressBytes(),
+ if (connectionManager.getLocalityStatus(host.getnodeconnectorNode()) == ConnectionLocality.LOCAL) {
+ sendARPReply(host.getnodeConnector(), sourceMAC, sourceIP, host.getDataLayerAddressBytes(),
host.getNetworkAddress());
} else {
/*
- * In the remote event a requestor moved to another
- * controller it may turn out it now we need to send
- * the ARP reply from a different controller, this
- * cover the case
+ * In the remote event a requestor moved to another controller
+ * it may turn out it now we need to send the ARP reply from a
+ * different controller, this cover the case
*/
arpRequestReplyEvent.put(
- new ARPReply(
- host.getnodeConnector(),
- sourceIP,
- sourceMAC,
- host.getNetworkAddress(),
- host.getDataLayerAddressBytes()), false);
+ new ARPReply(host.getnodeConnector(), sourceIP, sourceMAC, host.getNetworkAddress(), host
+ .getDataLayerAddressBytes()), false);
}
}
}
-
@Override
public void entryUpdated(ARPEvent key, Boolean new_value, String cacheName, boolean originLocal) {
log.trace("Got and entryUpdated for cacheName {} key {} isNew {}", cacheName, key, new_value);
public void entryCreated(ARPEvent key, String cacheName, boolean originLocal) {
// nothing to do
}
+
@Override
public void entryDeleted(ARPEvent key, String cacheName, boolean originLocal) {
// nothing to do
}
- private void enqueueARPCacheEvent (ARPEvent event, boolean new_value) {
+ private void enqueueARPCacheEvent(ARPEvent event, boolean new_value) {
try {
ARPCacheEvent cacheEvent = new ARPCacheEvent(event, new_value);
if (!ARPCacheEvents.contains(cacheEvent)) {
}
/*
- * this thread monitors the connectionEvent queue for new incoming events from
+ * this thread monitors the connectionEvent queue for new incoming events
+ * from
*/
private class ARPCacheEventHandler implements Runnable {
@Override
log.trace("Trigger and ARP Broadcast Request upon receipt of {}", req);
sendBcastARPRequest(req.getTargetIP(), req.getSubnet());
- //If unicast and local, send reply
+ // If unicast and local, send reply
} else if (connectionManager.getLocalityStatus(req.getHost().getnodeconnectorNode()) == ConnectionLocality.LOCAL) {
log.trace("ARPCacheEventHandler - sendUcatARPRequest upon receipt of {}", req);
sendUcastARPRequest(req.getHost(), req.getSubnet());
}
} else if (event instanceof ARPReply) {
ARPReply rep = (ARPReply) event;
- // New reply received by controller, notify all awaiting requestors across the cluster
+ // New reply received by controller, notify all awaiting
+ // requestors across the cluster
if (ev.isNewReply()) {
log.trace("Trigger a generateAndSendReply in response to {}", rep);
generateAndSendReply(rep.getTargetIP(), rep.getTargetMac());
- // Otherwise, a specific reply. If local, send out.
+ // Otherwise, a specific reply. If local, send out.
} else if (connectionManager.getLocalityStatus(rep.getPort().getNode()) == ConnectionLocality.LOCAL) {
log.trace("ARPCacheEventHandler - sendUcatARPReply locally in response to {}", rep);
- sendARPReply(rep.getPort(),
- rep.getSourceMac(),
- rep.getSourceIP(),
- rep.getTargetMac(),
+ sendARPReply(rep.getPort(), rep.getSourceMac(), rep.getSourceIP(), rep.getTargetMac(),
rep.getTargetIP());
}
}
<yangtools.binding.version>0.6.0-SNAPSHOT</yangtools.binding.version>
<!--versions for bits of the controller -->
<controller.version>0.4.1-SNAPSHOT</controller.version>
+ <hosttracker.version>0.5.1-SNAPSHOT</hosttracker.version>
+ <arphandler.version>0.5.1-SNAPSHOT</arphandler.version>
+ <forwarding.staticrouting>0.5.1-SNAPSHOT</forwarding.staticrouting>
+ <samples.loadbalancer>0.5.1-SNAPSHOT</samples.loadbalancer>
<config.version>0.2.3-SNAPSHOT</config.version>
<netconf.version>0.2.3-SNAPSHOT</netconf.version>
<mdsal.version>1.0-SNAPSHOT</mdsal.version>
the actual service-type which is actually required.";
mandatory true;
- type leafref {
- path "/config:services/config:service/config:type";
- }
+ type service-type-ref;
}
leaf name {
ModuleJMXRegistrator newModuleJMXRegistrator = baseJMXRegistrator
.createModuleJMXRegistrator();
+ OsgiRegistration osgiRegistration = null;
if (entry.hasOldModule()) {
ModuleInternalInfo oldInternalInfo = entry.getOldInternalInfo();
DynamicReadableWrapper oldReadableConfigBean = oldInternalInfo
currentConfig.remove(entry.getName());
// test if old instance == new instance
- if (oldReadableConfigBean.getInstance().equals(
- module.getInstance())) {
+ if (oldReadableConfigBean.getInstance().equals(module.getInstance())) {
// reused old instance:
// wrap in readable dynamic mbean
reusedInstances.add(primaryReadOnlyON);
+ osgiRegistration = oldInternalInfo.getOsgiRegistration();
} else {
// recreated instance:
// it is responsibility of module to call the old instance -
// we just need to unregister configbean
recreatedInstances.add(primaryReadOnlyON);
+
+ // close old osgi registration
+ oldInternalInfo.getOsgiRegistration().close();
}
- // close old osgi registration in any case
- oldInternalInfo.getOsgiRegistration().close();
+
// close old module jmx registrator
oldInternalInfo.getModuleJMXRegistrator().close();
} else {
}
// register to OSGi
- OsgiRegistration osgiRegistration = beanToOsgiServiceManager
- .registerToOsgi(module.getClass(),
- newReadableConfigBean.getInstance(),
- entry.getName());
+ if (osgiRegistration == null) {
+ osgiRegistration = beanToOsgiServiceManager.registerToOsgi(module.getClass(),
+ newReadableConfigBean.getInstance(), entry.getName());
+ }
RootRuntimeBeanRegistratorImpl runtimeBeanRegistrator = runtimeRegistrators
.get(entry.getName());
this.transactionModuleJMXRegistration = transactionModuleJMXRegistration;
}
+
+ /**
+ * Use {@link #getIdentifier()} instead.
+ */
+ @Deprecated
public ModuleIdentifier getName() {
return name;
}
maybeOldInternalInfo.getOrderingIdx());
}
- @Deprecated
+
public Module getModule() {
return module;
}
*/
package org.opendaylight.controller.config.yang.logback.config;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.commons.lang3.StringUtils;
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.DependencyResolverFactory;
+import org.opendaylight.controller.config.api.ModuleIdentifier;
+import org.osgi.framework.BundleContext;
+import org.slf4j.LoggerFactory;
+
import ch.qos.logback.classic.Logger;
import ch.qos.logback.classic.LoggerContext;
import ch.qos.logback.classic.encoder.PatternLayoutEncoder;
import ch.qos.logback.core.rolling.FixedWindowRollingPolicy;
import ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy;
import ch.qos.logback.core.rolling.TimeBasedRollingPolicy;
+
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
-import org.apache.commons.lang3.StringUtils;
-import org.opendaylight.controller.config.api.DependencyResolver;
-import org.opendaylight.controller.config.api.ModuleIdentifier;
-import org.osgi.framework.BundleContext;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
+import com.google.common.collect.Sets;
/**
*
public class LogbackModuleFactory extends
org.opendaylight.controller.config.yang.logback.config.AbstractLogbackModuleFactory {
- private static final String INSTANCE_NAME = "singleton";
+ public static final String INSTANCE_NAME = "singleton";
private Map<String, LoggerTO> loggersDTOs;
private Map<String, RollingFileAppenderTO> rollingDTOs;
private Map<String, ConsoleAppenderTO> consoleDTOs;
private Map<String, FileAppenderTO> fileDTOs;
@Override
- public LogbackModule instantiateModule(String instanceName,
- DependencyResolver dependencyResolver, BundleContext bundleContext) {
+ public LogbackModule instantiateModule(String instanceName, DependencyResolver dependencyResolver,
+ BundleContext bundleContext) {
Preconditions.checkArgument(instanceName.equals(INSTANCE_NAME),
"There should be just one instance of logback, named " + INSTANCE_NAME);
prepareDTOs();
}
@Override
- public LogbackModule instantiateModule(String instanceName,
- DependencyResolver dependencyResolver, LogbackModule oldModule,
- AutoCloseable oldInstance, BundleContext bundleContext) {
+ public LogbackModule instantiateModule(String instanceName, DependencyResolver dependencyResolver,
+ LogbackModule oldModule, AutoCloseable oldInstance, BundleContext bundleContext) {
Preconditions.checkArgument(instanceName.equals(INSTANCE_NAME),
"There should be just one instance of logback, named " + INSTANCE_NAME);
prepareDTOs();
return Lists.newArrayList(loggersToReturn.values());
}
+ @Override
+ public Set<LogbackModule> getDefaultModules(DependencyResolverFactory dependencyResolverFactory,
+ BundleContext bundleContext) {
+ DependencyResolver resolver = dependencyResolverFactory.createDependencyResolver(new ModuleIdentifier(
+ getImplementationName(), INSTANCE_NAME));
+ LogbackModule defaultLogback = instantiateModule(INSTANCE_NAME, resolver, bundleContext);
+ Set<LogbackModule> defaultModules = Sets.newHashSet(defaultLogback);
+ return defaultModules;
+ }
+
}
*/
package org.opendaylight.controller.config.yang.logback.config;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.fail;
-import static org.junit.matchers.JUnitMatchers.containsString;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import javax.management.InstanceAlreadyExistsException;
-import javax.management.InstanceNotFoundException;
-import javax.management.ObjectName;
-
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
-import org.opendaylight.controller.config.api.ConflictingVersionException;
import org.opendaylight.controller.config.api.ValidationException;
import org.opendaylight.controller.config.api.jmx.CommitStatus;
import org.opendaylight.controller.config.manager.impl.AbstractConfigTest;
import org.opendaylight.controller.config.manager.impl.factoriesresolver.HardcodedModuleFactoriesResolver;
import org.opendaylight.controller.config.util.ConfigTransactionJMXClient;
+import javax.management.ObjectName;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+import static org.junit.matchers.JUnitMatchers.containsString;
+
public class LogbackModuleTest extends AbstractConfigTest {
private static final String INSTANCE_NAME = "singleton";
private LogbackModuleFactory factory;
@Before
- public void setUp() throws IOException, ClassNotFoundException, InterruptedException {
+ public void setUp() throws Exception {
factory = new LogbackModuleFactory();
super.initConfigTransactionManagerImpl(new HardcodedModuleFactoriesResolver(factory));
}
@Test
- public void testCreateBean() throws InstanceAlreadyExistsException {
+ public void testCreateBean() throws Exception {
CommitStatus status = createBeans(true, "target/rollingApp",
"%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5, "target/%i.log", "rolling",
}
@Test
- public void testReusingInstance() throws InstanceAlreadyExistsException {
+ public void testReusingInstance() throws Exception {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy", 0,
"FileAppender").commit();
}
@Test
- public void testRecreateInstance() throws InstanceAlreadyExistsException, ValidationException,
- ConflictingVersionException, InstanceNotFoundException {
+ public void testRecreateInstance() throws Exception {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy", 0,
"FileAppender").commit();
}
@Test
- public void testDestroyInstance() throws InstanceNotFoundException, InstanceAlreadyExistsException {
+ public void testDestroyInstance() throws Exception {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy", 0,
"FileAppender").commit();
@Ignore
@Test
- public void testValidation1() throws InstanceAlreadyExistsException {
+ public void testValidation1() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy",
}
@Test
- public void testValidation2() throws InstanceAlreadyExistsException {
+ public void testValidation2() throws Exception {
try {
createBeans(true, "target/rollingApp", null, "30MB", 1, 5, "target/%i.log", "rolling", "consoleName",
"ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy", 0, "FileAppender").commit();
}
@Test
- public void testValidation4() throws InstanceAlreadyExistsException {
+ public void testValidation4() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", null, 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy",
}
@Test
- public void testValidation6() throws InstanceAlreadyExistsException {
+ public void testValidation6() throws Exception {
try {
createBeans(true, "", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5, "target/%i.log",
"rolling", "consoleName", "ALL", "logger1", "DEBUG", "FixedWindowRollingPolicy", 0, "FileAppender")
}
@Test
- public void testValidation7() throws InstanceAlreadyExistsException {
+ public void testValidation7() throws Exception {
try {
createBeans(
}
@Test
- public void testValidation8() throws InstanceAlreadyExistsException {
+ public void testValidation8() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", null, "DEBUG", "FixedWindowRollingPolicy", 0,
}
@Test
- public void testValidation9() throws InstanceAlreadyExistsException {
+ public void testValidation9() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1, 5,
"target/%i.log", "rolling", "consoleName", "ALL", "", "DEBUG", "FixedWindowRollingPolicy", 0,
}
@Test
- public void testValidation10() throws InstanceAlreadyExistsException {
+ public void testValidation10() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", null,
5, "target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG",
}
@Test
- public void testValidation11() throws InstanceAlreadyExistsException {
+ public void testValidation11() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1,
null, "target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG",
}
@Test
- public void testValidation12() throws InstanceAlreadyExistsException {
+ public void testValidation12() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1,
null, "target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", null, 1, "FileAppender")
}
@Test
- public void testValidation13() throws InstanceAlreadyExistsException {
+ public void testValidation13() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1,
null, "target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "", 1, "FileAppender")
}
@Test
- public void testValidation14() throws InstanceAlreadyExistsException {
+ public void testValidation14() throws Exception {
try {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", 1,
null, "target/%i.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "RollingPolicy", 1,
}
@Test
- public void testTimeBasedRollingPolicy() throws InstanceAlreadyExistsException {
+ public void testTimeBasedRollingPolicy() throws Exception {
createBeans(true, "target/rollingApp", "%-4relative [%thread] %-5level %logger{35} - %msg%n", "30MB", null,
null, "target/%d.log", "rolling", "consoleName", "ALL", "logger1", "DEBUG", "TimeBasedRollingPolicy",
1, "FileAppender").commit();
private ConfigTransactionJMXClient createBeans(Boolean isAppend, String rollingFileName, String encoderPattern,
String maxFileSize, Integer minIndex, Integer maxIndex, String fileNamePattern, String rollingName,
String consoleName, String thresholdFilter, String loggerName, String level, String rollingPolicyType,
- int maxHistory, String fileAppName) throws InstanceAlreadyExistsException {
+ int maxHistory, String fileAppName) throws Exception {
ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
- ObjectName nameCreated = transaction.createModule(factory.getImplementationName(), INSTANCE_NAME);
- LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameCreated, LogbackModuleMXBean.class);
+ ObjectName nameRetrieved = transaction.lookupConfigBean(factory.getImplementationName(), INSTANCE_NAME);
+ LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameRetrieved, LogbackModuleMXBean.class);
List<RollingFileAppenderTO> rollingAppenders = new ArrayList<>();
RollingFileAppenderTO rollingAppender = new RollingFileAppenderTO();
import java.util.List;
import javax.management.InstanceAlreadyExistsException;
+import javax.management.InstanceNotFoundException;
import javax.management.JMX;
+import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.apache.commons.io.FileUtils;
}
- public ObjectName createBeans() throws JoranException, InstanceAlreadyExistsException, IOException {
+ public ObjectName createBeans() throws JoranException, InstanceAlreadyExistsException, IOException,
+ MalformedObjectNameException, InstanceNotFoundException {
LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory();
loggersDTOs.add(log);
ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
- ObjectName nameCreated = transaction.createModule(factory.getImplementationName(), "singleton");
- LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameCreated, LogbackModuleMXBean.class);
+ ObjectName nameRetrieved = transaction.lookupConfigBean(factory.getImplementationName(), LogbackModuleFactory.INSTANCE_NAME);
+ LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameRetrieved, LogbackModuleMXBean.class);
bean.setLoggerTO(loggersDTOs);
bean.setRollingFileAppenderTO(rollingAppenders);
transaction.commit();
- return nameCreated;
+ return nameRetrieved;
}
}
import javax.management.InstanceAlreadyExistsException;
import javax.management.InstanceNotFoundException;
import javax.management.JMX;
+import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import org.apache.commons.io.FileUtils;
import org.opendaylight.controller.config.util.ConfigTransactionJMXClient;
import org.slf4j.LoggerFactory;
-import com.google.common.collect.Lists;
-
import ch.qos.logback.classic.LoggerContext;
import ch.qos.logback.classic.joran.JoranConfigurator;
import ch.qos.logback.core.joran.spi.JoranException;
+import com.google.common.collect.Lists;
+
public class LogbackWithXmlConfigModuleTest extends AbstractConfigTest {
private LogbackModuleFactory factory;
/**
* Tests configuration of Logger factory.
+ *
+ * @throws MalformedObjectNameException
*/
@Test
- public void test() throws InstanceAlreadyExistsException, InstanceNotFoundException {
+ public void test() throws InstanceAlreadyExistsException, InstanceNotFoundException, MalformedObjectNameException {
ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
- ObjectName nameCreated = transaction.createModule(factory.getImplementationName(), "singleton");
+ ObjectName nameRetrieved = transaction.lookupConfigBean(factory.getImplementationName(), LogbackModuleFactory.INSTANCE_NAME);
- LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameCreated, LogbackModuleMXBean.class);
+ LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameRetrieved, LogbackModuleMXBean.class);
assertEquals(1, bean.getConsoleAppenderTO().size());
transaction = configRegistryClient.createTransaction();
- nameCreated = transaction.lookupConfigBean(factory.getImplementationName(), "singleton");
+ nameRetrieved = transaction.lookupConfigBean(factory.getImplementationName(), "singleton");
- bean = JMX.newMXBeanProxy(platformMBeanServer, nameCreated, LogbackModuleMXBean.class);
+ bean = JMX.newMXBeanProxy(platformMBeanServer, nameRetrieved, LogbackModuleMXBean.class);
assertEquals(1, bean.getConsoleAppenderTO().size());
assertEquals(1, bean.getRollingFileAppenderTO().size());
@Test
public void testAllLoggers() throws InstanceAlreadyExistsException, InstanceNotFoundException {
ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
- transaction.createModule(factory.getImplementationName(), "singleton");
-
- transaction.commit();
-
- transaction = configRegistryClient.createTransaction();
LogbackModuleMXBean bean = JMX.newMXBeanProxy(ManagementFactory.getPlatformMBeanServer(),
transaction.lookupConfigBean("logback", "singleton"), LogbackModuleMXBean.class);
/**
* Add new logger using FileAppender
+ *
+ * @throws MalformedObjectNameException
*/
@Test
- public void testAddNewLogger() throws InstanceAlreadyExistsException, InstanceNotFoundException {
+ public void testAddNewLogger() throws InstanceAlreadyExistsException, InstanceNotFoundException,
+ MalformedObjectNameException {
ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
- ObjectName nameCreated = transaction.createModule(factory.getImplementationName(), "singleton");
- LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameCreated, LogbackModuleMXBean.class);
+ ObjectName nameRetrieved = transaction.lookupConfigBean(factory.getImplementationName(), LogbackModuleFactory.INSTANCE_NAME);
+ LogbackModuleMXBean bean = transaction.newMXBeanProxy(nameRetrieved, LogbackModuleMXBean.class);
assertEquals(5, bean.getLoggerTO().size());
transaction.commit();
transaction = configRegistryClient.createTransaction();
- nameCreated = transaction.lookupConfigBean(factory.getImplementationName(), "singleton");
- bean = JMX.newMXBeanProxy(platformMBeanServer, nameCreated, LogbackModuleMXBean.class);
+ nameRetrieved = transaction.lookupConfigBean(factory.getImplementationName(), "singleton");
+ bean = JMX.newMXBeanProxy(platformMBeanServer, nameRetrieved, LogbackModuleMXBean.class);
assertEquals(6, bean.getLoggerTO().size());
}
--- /dev/null
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-subsystem</artifactId>
+ <version>0.2.3-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <artifactId>netty-config-api</artifactId>
+ <name>${project.artifactId}</name>
+ <packaging>bundle</packaging>
+ <prerequisites>
+ <maven>3.0.4</maven>
+ </prerequisites>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-transport</artifactId>
+ </dependency>
+ </dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <configuration>
+ <instructions>
+ <Import-Package>
+ org.opendaylight.controller.config.api.*,
+ io.netty.channel,
+ io.netty.util,
+ io.netty.util.concurrent
+ </Import-Package>
+ <Export-Package>
+ org.opendaylight.controller.config.yang.netty
+ </Export-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+</project>
\ No newline at end of file
--- /dev/null
+// vi: set smarttab et sw=4 tabstop=4:
+module netty {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:netty";
+ prefix "netty";
+
+ import config { prefix config; revision-date 2013-04-05; }
+
+ organization "Cisco Systems, Inc.";
+
+ contact "Milos Fabian <milfabia@cisco.com>";
+
+ description
+ "This module contains the base YANG definitions for
+ netty services.
+
+ Copyright (c)2013 Cisco Systems, Inc. All rights reserved.;
+
+ This program and the accompanying materials are made available
+ under the terms of the Eclipse Public License v1.0 which
+ accompanies this distribution, and is available at
+ http://www.eclipse.org/legal/epl-v10.html";
+
+ revision "2013-11-19" {
+ description
+ "Initial revision.";
+ }
+
+ identity netty-threadgroup {
+ description
+ "Configuration wrapper around netty's threadgroup";
+
+ base "config:service-type";
+ config:java-class "io.netty.channel.EventLoopGroup";
+ }
+
+ identity netty-event-executor {
+ description
+ "Configuration wrapper around netty's event executor";
+
+ base "config:service-type";
+ config:java-class "io.netty.util.concurrent.EventExecutor";
+ }
+
+ identity netty-timer {
+ description
+ "Configuration wrapper around netty's timer";
+
+ base "config:service-type";
+ config:java-class "io.netty.util.Timer";
+ }
+}
\ No newline at end of file
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>threadpool-config-api</artifactId>
+ <artifactId>netty-config-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
- <dependency>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </dependency>
<!--test dependencies -->
<dependency>
</Export-Package>
<Import-Package>
com.google.common.base,
- org.opendaylight.controller.config.yang.threadpool,
+ org.opendaylight.controller.config.yang.netty,
io.netty.util.concurrent,
org.opendaylight.controller.config.api,
org.opendaylight.controller.config.api.annotations,
@Override
public void validate() {
super.validate();
- // Add custom validation for module attributes here.
}
@Override
module netty-event-executor {
yang-version 1;
namespace "urn:opendaylight:params:xml:ns:yang:controller:netty:eventexecutor";
- prefix "netty-t";
+ prefix "netty-ee";
import config { prefix config; revision-date 2013-04-05; }
- import threadpool { prefix th; revision-date 2013-04-09; }
+ import netty { prefix netty; revision-date 2013-11-19; }
organization "Cisco Systems, Inc.";
contact "Milos Fabian <milfabia@cisco.com>";
description
- "This module contains the base YANG definitions for NS-OS
- thread-related services.
+ "This module contains the base YANG definitions for
+ netty event executor implementation.
Copyright (c)2013 Cisco Systems, Inc. All rights reserved.;
identity netty-global-event-executor {
base config:module-type;
- config:provided-service th:netty-event-executor;
+ config:provided-service netty:netty-event-executor;
config:java-name-prefix GlobalEventExecutor;
}
}
}
-
-
}
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>threadpool-config-api</artifactId>
+ <artifactId>netty-config-api</artifactId>
<version>${project.version}</version>
</dependency>
-
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
-
- <dependency>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </dependency>
-
<!--test dependencies -->
<dependency>
<groupId>junit</groupId>
<plugin>
<groupId>org.apache.felix</groupId>
<artifactId>maven-bundle-plugin</artifactId>
- <version>2.3.7</version>
<extensions>true</extensions>
<configuration>
<instructions>
<Import-Package>
com.google.common.base,
io.netty.channel.nio,
- org.opendaylight.controller.config.yang.threadpool,
+ org.opendaylight.controller.config.yang.netty,
io.netty.util.concurrent,
org.opendaylight.controller.config.api,
org.opendaylight.controller.config.api.annotations,
*/
package org.opendaylight.controller.config.yang.netty.threadgroup;
-import com.google.common.base.Preconditions;
import io.netty.channel.nio.NioEventLoopGroup;
+import org.opendaylight.controller.config.api.JmxAttributeValidationException;
+
/**
*
*/
@Override
public void validate(){
if(getThreadCount()!=null) {
- Preconditions.checkArgument(getThreadCount() > 0, "Thread count cannot be < 0");
+ JmxAttributeValidationException.checkCondition(getThreadCount() > 0, "value must be greater than 0",
+ threadCountJmxAttribute);
}
}
// vi: set smarttab et sw=4 tabstop=4:
-module nsos-threadpool {
+module threadgroup {
yang-version 1;
namespace "urn:opendaylight:params:xml:ns:yang:controller:netty:threadgroup";
- prefix "netty-t";
+ prefix "netty-th";
import config { prefix config; revision-date 2013-04-05; }
- import threadpool { prefix th; revision-date 2013-04-09; }
+ import netty { prefix netty; revision-date 2013-11-19; }
organization "Cisco Systems, Inc.";
contact "Robert Varga <rovarga@cisco.com>";
description
- "This module contains the base YANG definitions for NS-OS
- thread-related services.
+ "This module contains the base YANG definitions for
+ netty threadgroup implementation.
Copyright (c)2013 Cisco Systems, Inc. All rights reserved.";
identity netty-threadgroup-fixed {
base config:module-type;
- config:provided-service th:netty-threadgroup;
+ config:provided-service netty:netty-threadgroup;
config:java-name-prefix NettyThreadgroup;
}
--- /dev/null
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-subsystem</artifactId>
+ <version>0.2.3-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <artifactId>netty-timer-config</artifactId>
+ <description>Configuration Wrapper around netty's timer</description>
+ <packaging>bundle</packaging>
+ <name>${project.artifactId}</name>
+ <prerequisites>
+ <maven>3.0.4</maven>
+ </prerequisites>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-config-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>threadpool-config-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+
+ <!--test dependencies -->
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-manager</artifactId>
+ <scope>test</scope>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-manager</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-util</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.bgpcep</groupId>
+ <artifactId>mockito-configuration</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>threadpool-config-impl</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
+ <Export-Package>
+ </Export-Package>
+ <Import-Package>
+ javax.management,
+ com.google.common.base,
+ org.opendaylight.controller.config.yang.netty,
+ org.opendaylight.controller.config.yang.threadpool,
+ io.netty.util,
+ org.opendaylight.controller.config.api,
+ org.opendaylight.controller.config.api.annotations,
+ org.opendaylight.controller.config.api.runtime,
+ org.opendaylight.controller.config.spi,
+ org.slf4j,
+ org.osgi.framework
+ </Import-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+ <distributionManagement>
+ <site>
+ <id>${project.artifactId}</id>
+ <name>NETTY-TIMER-CONFIG Module site</name>
+ <url>${basedir}/target/site/${project.artifactId}</url>
+ </site>
+ </distributionManagement>
+</project>
\ No newline at end of file
--- /dev/null
+/**
+ * Generated file
+
+ * Generated from: yang module name: netty-event-executor yang module local name: netty-hashed-wheel-timer
+ * Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+ * Generated at: Tue Nov 19 12:49:59 CET 2013
+ *
+ * Do not modify this file unless it is present under src/main directory
+ */
+package org.opendaylight.controller.config.yang.netty.timer;
+
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timeout;
+import io.netty.util.Timer;
+import io.netty.util.TimerTask;
+
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import org.opendaylight.controller.config.api.JmxAttributeValidationException;
+
+/**
+*
+*/
+public final class HashedWheelTimerModule extends
+ org.opendaylight.controller.config.yang.netty.timer.AbstractHashedWheelTimerModule {
+
+ public HashedWheelTimerModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier,
+ org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public HashedWheelTimerModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier,
+ org.opendaylight.controller.config.api.DependencyResolver dependencyResolver,
+ HashedWheelTimerModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void validate() {
+ super.validate();
+ if (getTickDuration() != null) {
+ JmxAttributeValidationException.checkCondition(getTickDuration() > 0, "value must be greater than 0",
+ tickDurationJmxAttribute);
+ }
+ if (getTicksPerWheel() != null) {
+ JmxAttributeValidationException.checkCondition(getTicksPerWheel() > 0, "value must be greater than 0",
+ ticksPerWheelJmxAttribute);
+ }
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ TimeUnit unit = TimeUnit.MILLISECONDS;
+ if (getTickDuration() != null && getThreadFactoryDependency() == null && getTicksPerWheel() == null) {
+ return new HashedWheelTimerCloseable(new HashedWheelTimer(getTickDuration(), unit));
+ }
+ if (getTickDuration() != null && getThreadFactoryDependency() == null && getTicksPerWheel() != null) {
+ return new HashedWheelTimerCloseable(new HashedWheelTimer(getTickDuration(), unit, getTicksPerWheel()));
+ }
+ if (getTickDuration() == null && getThreadFactoryDependency() != null && getTicksPerWheel() == null) {
+ return new HashedWheelTimerCloseable(new HashedWheelTimer(getThreadFactoryDependency()));
+ }
+ if (getTickDuration() != null && getThreadFactoryDependency() != null && getTicksPerWheel() == null) {
+ return new HashedWheelTimerCloseable(new HashedWheelTimer(getThreadFactoryDependency(), getTickDuration(),
+ unit));
+ }
+ if (getTickDuration() != null && getThreadFactoryDependency() != null && getTicksPerWheel() != null) {
+ return new HashedWheelTimerCloseable(new HashedWheelTimer(getThreadFactoryDependency(), getTickDuration(),
+ unit, getTicksPerWheel()));
+ }
+ return new HashedWheelTimerCloseable(new HashedWheelTimer());
+ }
+
+ static final private class HashedWheelTimerCloseable implements AutoCloseable, Timer {
+
+ private final Timer timer;
+
+ public HashedWheelTimerCloseable(Timer timer) {
+ this.timer = timer;
+ }
+
+ @Override
+ public void close() throws Exception {
+ stop();
+ }
+
+ @Override
+ public Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) {
+ return this.timer.newTimeout(task, delay, unit);
+ }
+
+ @Override
+ public Set<Timeout> stop() {
+ return this.timer.stop();
+ }
+
+ }
+}
--- /dev/null
+/**
+ * Generated file
+
+ * Generated from: yang module name: netty-event-executor yang module local name: netty-hashed-wheel-timer
+ * Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+ * Generated at: Tue Nov 19 12:49:59 CET 2013
+ *
+ * Do not modify this file unless it is present under src/main directory
+ */
+package org.opendaylight.controller.config.yang.netty.timer;
+
+/**
+*
+*/
+public class HashedWheelTimerModuleFactory extends
+ org.opendaylight.controller.config.yang.netty.timer.AbstractHashedWheelTimerModuleFactory {
+
+}
--- /dev/null
+// vi: set smarttab et sw=4 tabstop=4:
+module netty-timer {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:netty:timer";
+ prefix "netty-timer";
+
+ import config { prefix config; revision-date 2013-04-05; }
+ import netty { prefix netty; revision-date 2013-11-19; }
+ import threadpool { prefix th; revision-date 2013-04-09; }
+
+ organization "Cisco Systems, Inc.";
+
+ contact "Milos Fabian <milfabia@cisco.com>";
+
+ description
+ "This module contains the base YANG definitions for
+ netty timer implementation.
+
+ Copyright (c)2013 Cisco Systems, Inc. All rights reserved.;
+
+ This program and the accompanying materials are made available
+ under the terms of the Eclipse Public License v1.0 which
+ accompanies this distribution, and is available at
+ http://www.eclipse.org/legal/epl-v10.html";
+
+ revision "2013-11-19" {
+ description
+ "Initial revision";
+ }
+
+ identity netty-hashed-wheel-timer {
+ base config:module-type;
+ config:provided-service netty:netty-timer;
+ config:java-name-prefix HashedWheelTimer;
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case netty-hashed-wheel-timer {
+ when "/config:modules/config:module/config:type = 'netty-hashed-wheel-timer'";
+
+ leaf tick-duration {
+ type uint32;
+ }
+
+ leaf ticks-per-wheel {
+ type uint16;
+ }
+
+ container thread-factory {
+ uses config:service-ref {
+ refine type {
+ mandatory false;
+ config:required-identity th:threadfactory;
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+package org.opendaylight.controller.config.yang.netty.timer;
+
+import javax.management.InstanceAlreadyExistsException;
+import javax.management.InstanceNotFoundException;
+import javax.management.ObjectName;
+
+import junit.framework.Assert;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.config.api.ConflictingVersionException;
+import org.opendaylight.controller.config.api.ValidationException;
+import org.opendaylight.controller.config.api.jmx.CommitStatus;
+import org.opendaylight.controller.config.manager.impl.AbstractConfigTest;
+import org.opendaylight.controller.config.manager.impl.factoriesresolver.HardcodedModuleFactoriesResolver;
+import org.opendaylight.controller.config.util.ConfigTransactionJMXClient;
+import org.opendaylight.controller.config.yang.threadpool.impl.NamingThreadFactoryModuleFactory;
+import org.opendaylight.controller.config.yang.threadpool.impl.NamingThreadFactoryModuleMXBean;
+
+public class HashedWheelTimerModuleTest extends AbstractConfigTest {
+
+ private HashedWheelTimerModuleFactory factory;
+ private NamingThreadFactoryModuleFactory threadFactory;
+ private final String instanceName = "hashed-wheel-timer1";
+
+ @Before
+ public void setUp() {
+ factory = new HashedWheelTimerModuleFactory();
+ threadFactory = new NamingThreadFactoryModuleFactory();
+ super.initConfigTransactionManagerImpl(new HardcodedModuleFactoriesResolver(factory, threadFactory));
+ }
+
+ public void testValidationExceptionTickDuration() throws InstanceAlreadyExistsException {
+ ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
+ try {
+ createInstance(transaction, instanceName, 0L, 10, true);
+ transaction.validateConfig();
+ Assert.fail();
+ } catch (ValidationException e) {
+ Assert.assertTrue(e.getMessage().contains("TickDuration value must be greater than 0"));
+ }
+ }
+
+ public void testValidationExceptionTicksPerWheel() throws InstanceAlreadyExistsException {
+ ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
+ try {
+ createInstance(transaction, instanceName, 500L, 0, true);
+ transaction.validateConfig();
+ Assert.fail();
+ } catch (ValidationException e) {
+ Assert.assertTrue(e.getMessage().contains("TicksPerWheel value must be greater than 0"));
+ }
+ }
+
+ @Test
+ public void testCreateBean() throws InstanceAlreadyExistsException, ValidationException,
+ ConflictingVersionException {
+ ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
+
+ createInstance(transaction, instanceName, 500L, 10, true);
+ createInstance(transaction, instanceName + 1, null, null, false);
+ createInstance(transaction, instanceName + 2, 500L, 10, false);
+ createInstance(transaction, instanceName + 3, 500L, null, false);
+ transaction.validateConfig();
+ CommitStatus status = transaction.commit();
+
+ assertBeanCount(4, factory.getImplementationName());
+ assertStatus(status, 5, 0, 0);
+ }
+
+ @Test
+ public void testReusingOldInstance() throws InstanceAlreadyExistsException, ConflictingVersionException,
+ ValidationException {
+
+ ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
+ createInstance(transaction, instanceName, 500L, 10, true);
+
+ transaction.commit();
+
+ transaction = configRegistryClient.createTransaction();
+ assertBeanCount(1, factory.getImplementationName());
+ CommitStatus status = transaction.commit();
+
+ assertBeanCount(1, factory.getImplementationName());
+ assertStatus(status, 0, 0, 2);
+ }
+
+ @Test
+ public void testReconfigure() throws InstanceAlreadyExistsException, ConflictingVersionException,
+ ValidationException, InstanceNotFoundException {
+
+ ConfigTransactionJMXClient transaction = configRegistryClient.createTransaction();
+ createInstance(transaction, instanceName, 500L, 10, true);
+ transaction.commit();
+
+ transaction = configRegistryClient.createTransaction();
+ assertBeanCount(1, factory.getImplementationName());
+ HashedWheelTimerModuleMXBean mxBean = transaction.newMBeanProxy(
+ transaction.lookupConfigBean(factory.getImplementationName(), instanceName),
+ HashedWheelTimerModuleMXBean.class);
+ mxBean.setTicksPerWheel(20);
+ CommitStatus status = transaction.commit();
+
+ assertBeanCount(1, factory.getImplementationName());
+ assertStatus(status, 0, 1, 1);
+ }
+
+ private ObjectName createInstance(ConfigTransactionJMXClient transaction, String instanceName,
+ final Long tickDuration, final Integer ticksPerWheel, final boolean hasThreadfactory)
+ throws InstanceAlreadyExistsException {
+ ObjectName nameCreated = transaction.createModule(factory.getImplementationName(), instanceName);
+ HashedWheelTimerModuleMXBean mxBean = transaction
+ .newMBeanProxy(nameCreated, HashedWheelTimerModuleMXBean.class);
+ mxBean.setTickDuration(tickDuration);
+ mxBean.setTicksPerWheel(ticksPerWheel);
+ if (hasThreadfactory) {
+ mxBean.setThreadFactory(createThreadfactoryInstance(transaction, "thread-factory1", "th"));
+ }
+ return nameCreated;
+ }
+
+ private ObjectName createThreadfactoryInstance(ConfigTransactionJMXClient transaction, String instanceName,
+ final String namePrefix) throws InstanceAlreadyExistsException {
+ ObjectName nameCreated = transaction.createModule(threadFactory.getImplementationName(), instanceName);
+ NamingThreadFactoryModuleMXBean mxBean = transaction.newMBeanProxy(nameCreated,
+ NamingThreadFactoryModuleMXBean.class);
+ mxBean.setNamePrefix(namePrefix);
+ return nameCreated;
+ }
+
+}
<module>yang-test</module>
<module>logback-config</module>
<module>threadpool-config-api</module>
+ <module>netty-config-api</module>
<module>threadpool-config-impl</module>
<module>netty-threadgroup-config</module>
<module>netty-event-executor-config</module>
+ <module>netty-timer-config</module>
</modules>
<profiles>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
- <dependency>
- <groupId>io.netty</groupId>
- <artifactId>netty-transport</artifactId>
- </dependency>
</dependencies>
<build>
<Import-Package>
org.opendaylight.controller.config.api.*,
com.google.common.eventbus,
- io.netty.channel,
- io.netty.util.concurrent
</Import-Package>
<Export-Package>
org.opendaylight.controller.config.threadpool,
base "threadpool";
config:java-class "org.opendaylight.controller.config.threadpool.ScheduledThreadPool";
}
-
-
- identity netty-threadgroup {
- description
- "Configuration wrapper around netty's threadgroup";
-
- base "config:service-type";
- config:java-class "io.netty.channel.EventLoopGroup";
- }
-
- identity netty-event-executor {
- description
- "Configuration wrapper around netty's event executor";
-
- base "config:service-type";
- config:java-class "io.netty.util.concurrent.EventExecutor";
- }
-
-
}
*/
package org.opendaylight.controller.config.yangjmxgenerator;
-import static com.google.common.base.Preconditions.checkNotNull;
-import static com.google.common.base.Preconditions.checkState;
-import static java.lang.String.format;
-import static org.opendaylight.controller.config.yangjmxgenerator.ConfigConstants.createConfigQName;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Sets;
import org.opendaylight.controller.config.yangjmxgenerator.attribute.AttributeIfc;
import org.opendaylight.controller.config.yangjmxgenerator.attribute.DependencyAttribute;
import org.opendaylight.controller.config.yangjmxgenerator.attribute.JavaAttribute;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Sets;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Preconditions.checkState;
+import static java.lang.String.format;
+import static org.opendaylight.controller.config.yangjmxgenerator.ConfigConstants.createConfigQName;
/**
* Represents part of yang model that describes a module.
private final String nullableDescription, packageName, javaNamePrefix,
namespace;
- private final Map<String, String> providedServices;
+ private final Map<String, QName> providedServices;
private Collection<RuntimeBeanEntry> runtimeBeans;
public ModuleMXBeanEntry(IdentitySchemaNode id,
Map<String, AttributeIfc> yangToAttributes, String packageName,
- Map<String, String> providedServices2, String javaNamePrefix,
+ Map<String, QName> providedServices2, String javaNamePrefix,
String namespace, Collection<RuntimeBeanEntry> runtimeBeans) {
this.globallyUniqueName = id.getQName().getLocalName();
this.yangToAttributes = yangToAttributes;
return packageName;
}
- public Map<String, String> getProvidedServices() {
+ /**
+ * @return services implemented by this module. Keys are fully qualified java names of generated
+ * ServiceInterface classes, values are identity local names.
+ */
+ public Map<String, QName> getProvidedServices() {
return providedServices;
}
checkState(moduleIdentity != null, "Cannot find identity "
+ moduleLocalNameFromXPath
+ " matching augmentation " + augmentation);
- Map<String, String> providedServices = findProvidedServices(
+ Map<String, QName> providedServices = findProvidedServices(
moduleIdentity, currentModule, qNamesToSIEs,
schemaContext);
return yangToAttributes;
}
- private static Map<String, String> findProvidedServices(
+ private static Map<String, QName> findProvidedServices(
IdentitySchemaNode moduleIdentity, Module currentModule,
Map<QName, ServiceInterfaceEntry> qNamesToSIEs,
SchemaContext schemaContext) {
- Map<String, String> result = new HashMap<>();
+ Map<String, QName> result = new HashMap<>();
for (UnknownSchemaNode unknownNode : moduleIdentity
.getUnknownSchemaNodes()) {
if (ConfigConstants.PROVIDED_SERVICE_EXTENSION_QNAME
.getNodeParameter();
ServiceInterfaceEntry sie = findSIE(prefixAndIdentityLocalName,
currentModule, qNamesToSIEs, schemaContext);
- result.put(sie.getFullyQualifiedName(), sie.getQName()
- .getLocalName());
+ result.put(sie.getFullyQualifiedName(), sie.getQName());
}
}
return result;
public interface YangStoreSnapshot extends AutoCloseable {
- Map<String, Map<String, ModuleMXBeanEntry>> getModuleMXBeanEntryMap();
+ Map<String/* Namespace from yang file */,
+ Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> getModuleMXBeanEntryMap();
Map<String, Entry<Module, String>> getModuleMap();
import org.opendaylight.controller.config.yang.store.api.YangStoreException;
import org.opendaylight.controller.config.yang.store.api.YangStoreService;
import org.opendaylight.controller.config.yang.store.api.YangStoreSnapshot;
+import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
+import org.opendaylight.yangtools.yang.model.api.Module;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleContext;
import org.osgi.framework.BundleEvent;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
proposedNewState.putAll(bundle, addedURLs);
Preconditions.checkArgument(addedURLs.size() > 0, "No change can occur when no URLs are changed");
- boolean success;
- String failureReason = null;
+
try(YangStoreSnapshotImpl snapshot = createSnapshot(mbeParser, proposedNewState)) {
- updateCache(snapshot);
- success = true;
+ onSnapshotSuccess(proposedNewState, snapshot);
} catch(YangStoreException e) {
- failureReason = e.toString();
- success = false;
- }
- if (success){
- // consistent state
- // merge into
- consistentBundlesToYangURLs.clear();
- consistentBundlesToYangURLs.putAll(proposedNewState);
- inconsistentBundlesToYangURLs.clear();
-
- logger.info("Yang store updated to new consistent state containing {} yang files", consistentBundlesToYangURLs.size());
- logger.trace("Yang store updated to new consistent state containing {}", consistentBundlesToYangURLs);
- } else {
- // inconsistent state
- logger.debug("Yang store is falling back on last consistent state containing {}, inconsistent yang files {}, reason {}",
- consistentBundlesToYangURLs, inconsistentBundlesToYangURLs, failureReason);
- logger.warn("Yang store is falling back on last consistent state containing {} files, inconsistent yang files size is {}, reason {}",
- consistentBundlesToYangURLs.size(), inconsistentBundlesToYangURLs.size(), failureReason);
- inconsistentBundlesToYangURLs.putAll(bundle, addedURLs);
+ onSnapshotFailure(bundle, addedURLs, e);
}
}
}
return bundle;
}
- private void updateCache(YangStoreSnapshotImpl snapshot) {
+ private synchronized void onSnapshotFailure(Bundle bundle, List<URL> addedURLs, Exception failureReason) {
+ // inconsistent state
+ inconsistentBundlesToYangURLs.putAll(bundle, addedURLs);
+
+ logger.debug("Yang store is falling back on last consistent state containing {}, inconsistent yang files {}",
+ consistentBundlesToYangURLs, inconsistentBundlesToYangURLs, failureReason);
+ logger.warn("Yang store is falling back on last consistent state containing {} files, inconsistent yang files size is {}, reason {}",
+ consistentBundlesToYangURLs.size(), inconsistentBundlesToYangURLs.size(), failureReason.toString());
+ cache.setInconsistentURLsForReporting(inconsistentBundlesToYangURLs.values());
+ }
+
+ private synchronized void onSnapshotSuccess(Multimap<Bundle, URL> proposedNewState, YangStoreSnapshotImpl snapshot) {
+ // consistent state
+ // merge into
+ consistentBundlesToYangURLs.clear();
+ consistentBundlesToYangURLs.putAll(proposedNewState);
+ inconsistentBundlesToYangURLs.clear();
+
+ updateCache(snapshot);
+ cache.setInconsistentURLsForReporting(Collections.<URL> emptySet());
+ logger.info("Yang store updated to new consistent state containing {} yang files", consistentBundlesToYangURLs.size());
+ logger.debug("Yang store updated to new consistent state containing {}", consistentBundlesToYangURLs);
+ }
+
+ private synchronized void updateCache(YangStoreSnapshotImpl snapshot) {
cache.cacheYangStore(consistentBundlesToYangURLs, snapshot);
}
*/
@Override
public synchronized void removedBundle(Bundle bundle, BundleEvent event, Object object) {
+ logger.debug("Removed bundle {} {} {}", bundle, event, object);
inconsistentBundlesToYangURLs.removeAll(bundle);
consistentBundlesToYangURLs.removeAll(bundle);
}
throws YangStoreException {
Optional<YangStoreSnapshot> yangStoreOpt = cache.getSnapshotIfPossible(consistentBundlesToYangURLs);
if (yangStoreOpt.isPresent()) {
- logger.trace("Returning cached yang store {}", yangStoreOpt.get());
+ logger.debug("Returning cached yang store {}", yangStoreOpt.get());
return yangStoreOpt.get();
}
+
YangStoreSnapshotImpl snapshot = createSnapshot(mbeParser, consistentBundlesToYangURLs);
updateCache(snapshot);
return snapshot;
}
class YangStoreCache {
+ private static final Logger logger = LoggerFactory.getLogger(YangStoreCache.class);
+
+ @GuardedBy("this")
+ private Set<URL> cachedUrls = null;
@GuardedBy("this")
- private Set<URL> cachedUrls = Collections.emptySet();
+ private Optional<YangStoreSnapshot> cachedYangStoreSnapshot = getInitialSnapshot();
@GuardedBy("this")
- private Optional<YangStoreSnapshotImpl> cachedYangStoreSnapshot = Optional.absent();
+ private Collection<URL> inconsistentURLsForReporting = Collections.emptySet();
synchronized Optional<YangStoreSnapshot> getSnapshotIfPossible(Multimap<Bundle, URL> bundlesToYangURLs) {
Set<URL> urls = setFromMultimapValues(bundlesToYangURLs);
- if (cachedUrls != null && cachedUrls.equals(urls)) {
+
+ if (cachedUrls==null || cachedUrls.equals(urls)) {
Preconditions.checkState(cachedYangStoreSnapshot.isPresent());
YangStoreSnapshot freshSnapshot = new YangStoreSnapshotImpl(cachedYangStoreSnapshot.get());
+ if (inconsistentURLsForReporting.size() > 0){
+ logger.warn("Some yang URLs are ignored: {}", inconsistentURLsForReporting);
+ }
return Optional.of(freshSnapshot);
}
+
return Optional.absent();
}
}
synchronized void cacheYangStore(Multimap<Bundle, URL> urls,
- YangStoreSnapshotImpl yangStoreSnapshot) {
+ YangStoreSnapshot yangStoreSnapshot) {
this.cachedUrls = setFromMultimapValues(urls);
this.cachedYangStoreSnapshot = Optional.of(yangStoreSnapshot);
}
cachedYangStoreSnapshot = Optional.absent();
}
}
+
+ public synchronized void setInconsistentURLsForReporting(Collection<URL> urls){
+ inconsistentURLsForReporting = urls;
+ }
+
+ private Optional<YangStoreSnapshot> getInitialSnapshot() {
+ YangStoreSnapshot initialSnapshot = new YangStoreSnapshot() {
+ @Override
+ public Map<String, Map<String, ModuleMXBeanEntry>> getModuleMXBeanEntryMap() {
+ return Collections.emptyMap();
+ }
+
+ @Override
+ public Map<String, Map.Entry<Module, String>> getModuleMap() {
+ return Collections.emptyMap();
+ }
+
+ @Override
+ public int countModuleMXBeanEntries() {
+ return 0;
+ }
+
+ @Override
+ public void close() {
+ }
+ };
+ return Optional.of(initialSnapshot);
+ }
}
this.moduleMap = Collections.unmodifiableMap(moduleMap);
}
- public YangStoreSnapshotImpl(YangStoreSnapshotImpl yangStoreSnapshot) {
- this.moduleMXBeanEntryMap = yangStoreSnapshot.moduleMXBeanEntryMap;
- this.moduleMap = yangStoreSnapshot.moduleMap;
+ public YangStoreSnapshotImpl(YangStoreSnapshot yangStoreSnapshot) {
+ this.moduleMXBeanEntryMap = yangStoreSnapshot.getModuleMXBeanEntryMap();
+ this.moduleMap = yangStoreSnapshot.getModuleMap();
}
+ /**
+ * @return all loaded config modules. Key of outer map is namespace of yang file.
+ * Key of inner map is name of module entry. Value is module entry.
+ */
@Override
public Map<String, Map<String, ModuleMXBeanEntry>> getModuleMXBeanEntryMap() {
return moduleMXBeanEntryMap;
import com.google.common.base.Optional;
import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.config.yang.store.api.YangStoreException;
import org.opendaylight.controller.config.yang.store.api.YangStoreSnapshot;
+import org.opendaylight.yangtools.yang.model.api.Module;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleContext;
import org.osgi.framework.BundleListener;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
+import java.util.Map;
import java.util.regex.Pattern;
import static org.junit.Assert.assertEquals;
@Mock
private BundleContext bundleContext;
+ private Map<String, Map.Entry<Module, String>> moduleMap = Maps.newHashMap();
+
@Before
public void setUp() throws YangStoreException {
+
+ moduleMap.put("1", new Map.Entry<Module, String>() {
+ @Override
+ public Module getKey() {
+ return mock(Module.class);
+ }
+
+ @Override
+ public String getValue() {
+ return "v";
+ }
+
+ @Override
+ public String setValue(String value) {
+ return "v";
+ }
+ });
+
MockitoAnnotations.initMocks(this);
doNothing().when(bundleContext).addBundleListener(any(BundleListener.class));
doReturn(new Bundle[0]).when(bundleContext).getBundles();
doReturn(22).when(yangStoreSnapshot).countModuleMXBeanEntries();
doReturn("mock yang store").when(yangStoreSnapshot).toString();
doNothing().when(yangStoreSnapshot).close();
- doReturn(Collections.emptyMap()).when(yangStoreSnapshot).getModuleMap();
+ doReturn(moduleMap).when(yangStoreSnapshot).getModuleMap();
+ doReturn(Collections.emptyMap()).when(yangStoreSnapshot).getModuleMXBeanEntryMap();
}
@Test
returnedStore = tested.getYangStoreSnapshot();
- assertEquals(yangStoreSnapshot, returnedStore);
+ assertEquals(yangStoreSnapshot.getModuleMap(), returnedStore.getModuleMap());
tested.removedBundle(bundle, null, null);
tested.getYangStoreSnapshot();
tested.getYangStoreSnapshot();
}
- verify(parser, times(7)).parseYangFiles(anyCollectionOf(InputStream.class));
+ verify(parser, times(5)).parseYangFiles(anyCollectionOf(InputStream.class));
returnedStore = tested.getYangStoreSnapshot();
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
//this.unidirectional = false;
}
+ public ContainerFlowConfig(String name, String dlVlan, String srcIP, String dstIP, String proto, String srcPort,
+ String dstPort) {
+ this.name = name;
+ this.dlVlan = dlVlan;
+ this.nwSrc = srcIP;
+ this.nwDst = dstIP;
+ this.protocol = proto;
+ this.tpSrc = srcPort;
+ this.tpDst = dstPort;
+ }
+
public ContainerFlowConfig(ContainerFlowConfig containerFlowConfig) {
this.name = containerFlowConfig.name;
<artifactId>threadpool-config-api</artifactId>
<version>${config.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-config-api</artifactId>
+ <version>${config.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>threadpool-config-impl</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>netty-event-executor-config</artifactId>
+ <artifactId>netty-threadgroup-config</artifactId>
<version>${config.version}</version>
</dependency>
<dependency>
<artifactId>netty-event-executor-config</artifactId>
<version>${config.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-timer-config</artifactId>
+ <version>${config.version}</version>
+ </dependency>
<!-- toaster example I'm pretty sure we should trim -->
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>forwarding.staticrouting</artifactId>
- <version>${controller.version}</version>
+ <version>${forwarding.staticrouting}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>arphandler</artifactId>
- <version>${controller.version}</version>
+ <version>${arphandler.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>${controller.version}</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker.implementation</artifactId>
- <version>${controller.version}</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>samples.loadbalancer</artifactId>
- <version>${controller.version}</version>
+ <version>${samples.loadbalancer}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
#netconf.tls.keystore=
#netconf.tls.keystore.password=
-netconf.config.persister.storageAdapterClass=org.opendaylight.controller.netconf.persist.impl.NoOpStorageAdapter
+netconf.config.persister.storageAdapterClass=org.opendaylight.controller.config.persist.storage.file.FileStorageAdapter
+fileStorage=configuration/controller.config
+numberOfBackups=1
yangstore.blacklist=.*controller.model.*
# Set Default start level for framework
# Logging configuration for Tomcat-JUL logging
java.util.logging.config.file=configuration/tomcat-logging.properties
+
+#Hosttracker hostsdb key scheme setting
+hosttracker.keyscheme=IP
--- /dev/null
+//START OF CONFIG-LAST
+<data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+<modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:schema-service-singleton</type>
+ <name>yang-schema-service</name>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:hash-map-data-store</type>
+ <name>hash-map-data-store</name>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:dom-broker-impl</type>
+ <name>dom-broker</name>
+ <data-store xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-data-store</type>
+ <name>ref_hash-map-data-store</name>
+ </data-store>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-broker-impl</type>
+ <name>binding-broker-impl</name>
+ <notification-service xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-notification-service</type>
+ <name>ref_binding-notification-broker</name>
+ </notification-service>
+ <data-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
+ <name>ref_binding-data-broker</name>
+ </data-broker>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:runtime-generated-mapping</type>
+ <name>runtime-mapping-singleton</name>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-notification-broker</type>
+ <name>binding-notification-broker</name>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-data-broker</type>
+ <name>binding-data-broker</name>
+ <dom-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
+ <name>ref_dom-broker</name>
+ </dom-broker>
+ <mapping-service xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding:binding-dom-mapping-service</type>
+ <name>ref_runtime-mapping-singleton</name>
+ </mapping-service>
+ </module>
+</modules>
+<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <service>
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
+ <instance>
+ <name>ref_yang-schema-service</name>
+ <provider>/config/modules/module[name='schema-service-singleton']/instance[name='yang-schema-service']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-notification-service</type>
+ <instance>
+ <name>ref_binding-notification-broker</name>
+ <provider>/config/modules/module[name='binding-notification-broker']/instance[name='binding-notification-broker']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-data-store</type>
+ <instance>
+ <name>ref_hash-map-data-store</name>
+ <provider>/config/modules/module[name='hash-map-data-store']/instance[name='hash-map-data-store']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-broker-osgi-registry</type>
+ <instance>
+ <name>ref_binding-broker-impl</name>
+ <provider>/config/modules/module[name='binding-broker-impl']/instance[name='binding-broker-impl']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:binding-impl="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding-impl:binding-dom-mapping-service</type>
+ <instance>
+ <name>ref_runtime-mapping-singleton</name>
+ <provider>/config/modules/module[name='runtime-generated-mapping']/instance[name='runtime-mapping-singleton']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
+ <instance>
+ <name>ref_dom-broker</name>
+ <provider>/config/modules/module[name='dom-broker-impl']/instance[name='dom-broker']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
+ <instance>
+ <name>ref_binding-data-broker</name>
+ <provider>/config/modules/module[name='binding-data-broker']/instance[name='binding-data-broker']</provider>
+ </instance>
+ </service>
+</services>
+</data>
+
+//END OF SNAPSHOT
+urn:opendaylight:l2:types?module=opendaylight-l2-types&revision=2013-08-27
+urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&revision=2013-10-28
+urn:opendaylight:params:xml:ns:yang:controller:threadpool?module=threadpool&revision=2013-04-09
+urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom?module=opendaylight-md-sal-dom&revision=2013-10-28
+urn:opendaylight:params:xml:ns:yang:controller:config?module=config&revision=2013-04-05
+urn:ietf:params:netconf:capability:candidate:1.0
+urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring?module=ietf-netconf-monitoring&revision=2010-10-04
+urn:opendaylight:params:xml:ns:yang:controller:netty:eventexecutor?module=netty-event-executor&revision=2013-11-12
+urn:ietf:params:xml:ns:yang:rpc-context?module=rpc-context&revision=2013-06-17
+urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl?module=opendaylight-sal-binding-broker-impl&revision=2013-10-28
+urn:ietf:params:xml:ns:yang:ietf-inet-types?module=ietf-inet-types&revision=2010-09-24
+urn:ietf:params:netconf:capability:rollback-on-error:1.0
+urn:ietf:params:xml:ns:yang:ietf-yang-types?module=ietf-yang-types&revision=2010-09-24
+urn:opendaylight:params:xml:ns:yang:controller:threadpool:impl?module=threadpool-impl&revision=2013-04-05
+urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl?module=opendaylight-sal-dom-broker-impl&revision=2013-10-28
+urn:opendaylight:params:xml:ns:yang:controller:logback:config?module=config-logging&revision=2013-07-16
+urn:opendaylight:yang:extension:yang-ext?module=yang-ext&revision=2013-07-09
+urn:opendaylight:params:xml:ns:yang:iana?module=iana&revision=2013-08-16
+urn:opendaylight:params:xml:ns:yang:controller:md:sal:common?module=opendaylight-md-sal-common&revision=2013-10-28
+urn:opendaylight:params:xml:ns:yang:ieee754?module=ieee754&revision=2013-08-19
+//END OF CONFIG
<logger name="org.opendaylight.controller.sal.implementation" level="INFO"/>
<logger name="org.opendaylight.controller.sal.implementation.internal.Inventory" level="INFO"/>
<logger name="org.opendaylight.controller.sal.implementation.internal.Topology" level="INFO"/>
+ <!-- zeromq router and zeromq routing table -->
+ <logger name="org.opendaylight.controller.sal.connector.remoterpc" level="INFO" />
<!-- Functional Modules -->
<logger name="org.opendaylight.controller.arphandler" level="INFO"/>
<logger name="org.opendaylight.controller.hosttracker" level="INFO"/>
<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
- <version>1.4.1-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
- </parent>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
- <tag>HEAD</tag>
- </scm>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.opendaylight</artifactId>
+ <version>1.4.1-SNAPSHOT</version>
+ <relativePath>../../commons/opendaylight</relativePath>
+ </parent>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
+ <tag>HEAD</tag>
+ </scm>
- <artifactId>forwarding.staticrouting</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- <packaging>bundle</packaging>
+ <artifactId>forwarding.staticrouting</artifactId>
+ <version>0.5.1-SNAPSHOT</version>
+ <packaging>bundle</packaging>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>${bundle.plugin.version}</version>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Import-Package>
- org.opendaylight.controller.sal.utils,
- org.opendaylight.controller.sal.core,
- org.opendaylight.controller.configuration,
- org.opendaylight.controller.forwardingrulesmanager,
- org.opendaylight.controller.hosttracker,
- org.opendaylight.controller.hosttracker.hostAware,
- org.opendaylight.controller.clustering.services,
- org.opendaylight.controller.sal.packet,
- org.opendaylight.controller.sal.routing,
- org.opendaylight.controller.topologymanager,
- org.eclipse.osgi.framework.console,
- org.osgi.framework,
- org.slf4j,
- org.apache.felix.dm,
- org.apache.commons.lang3.builder
- </Import-Package>
- <Export-Package>
- org.opendaylight.controller.forwarding.staticrouting
- </Export-Package>
- <Bundle-Activator>
- org.opendaylight.controller.forwarding.staticrouting.internal.Activator
- </Bundle-Activator>
- </instructions>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- </plugins>
- </build>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>topologymanager</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>forwardingrulesmanager</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>configuration</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>0.5.1-SNAPSHOT</version>
- </dependency>
- </dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Import-Package>
+ org.opendaylight.controller.sal.packet.address,
+ org.opendaylight.controller.sal.utils,
+ org.opendaylight.controller.sal.core,
+ org.opendaylight.controller.configuration,
+ org.opendaylight.controller.forwardingrulesmanager,
+ org.opendaylight.controller.hosttracker,
+ org.opendaylight.controller.hosttracker.hostAware,
+ org.opendaylight.controller.clustering.services,
+ org.opendaylight.controller.sal.packet,
+ org.opendaylight.controller.sal.routing,
+ org.opendaylight.controller.topologymanager,
+ org.eclipse.osgi.framework.console,
+ org.osgi.framework,
+ org.slf4j,
+ org.apache.felix.dm,
+ org.apache.commons.lang3.builder
+ </Import-Package>
+ <Export-Package>
+ org.opendaylight.controller.forwarding.staticrouting
+ </Export-Package>
+ <Bundle-Activator>
+ org.opendaylight.controller.forwarding.staticrouting.internal.Activator
+ </Bundle-Activator>
+ </instructions>
+ <manifestLocation>${project.basedir}/META-INF</manifestLocation>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>topologymanager</artifactId>
+ <version>0.4.1-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>forwardingrulesmanager</artifactId>
+ <version>0.4.1-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>hosttracker</artifactId>
+ <version>${hosttracker.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>configuration</artifactId>
+ <version>0.4.1-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal</artifactId>
+ <version>0.5.1-SNAPSHOT</version>
+ </dependency>
+ </dependencies>
</project>
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import org.opendaylight.controller.forwarding.staticrouting.IStaticRoutingAware;
import org.opendaylight.controller.forwarding.staticrouting.StaticRoute;
import org.opendaylight.controller.forwarding.staticrouting.StaticRouteConfig;
+import org.opendaylight.controller.hosttracker.HostIdFactory;
+import org.opendaylight.controller.hosttracker.IHostId;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.IfNewHostNotify;
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
/**
* Static Routing feature provides the bridge between SDN and Non-SDN networks.
*/
-public class StaticRoutingImplementation implements IfNewHostNotify,
- IForwardingStaticRouting, IObjectReader, IConfigurationContainerAware {
- private static Logger log = LoggerFactory
- .getLogger(StaticRoutingImplementation.class);
+public class StaticRoutingImplementation implements IfNewHostNotify, IForwardingStaticRouting, IObjectReader,
+ IConfigurationContainerAware {
+ private static Logger log = LoggerFactory.getLogger(StaticRoutingImplementation.class);
private static String ROOT = GlobalConstants.STARTUPHOME.toString();
ConcurrentMap<String, StaticRoute> staticRoutes;
ConcurrentMap<String, StaticRouteConfig> staticRouteConfigs;
}
@Override
- public Object readObject(ObjectInputStream ois)
- throws FileNotFoundException, IOException, ClassNotFoundException {
+ public Object readObject(ObjectInputStream ois) throws FileNotFoundException, IOException, ClassNotFoundException {
// Perform the class deserialization locally, from inside the package
// where the class is defined
return ois.readObject();
@SuppressWarnings("unchecked")
private void loadConfiguration() {
ObjectReader objReader = new ObjectReader();
- ConcurrentMap<String, StaticRouteConfig> confList = (ConcurrentMap<String, StaticRouteConfig>) objReader
- .read(this, staticRoutesFileName);
+ ConcurrentMap<String, StaticRouteConfig> confList = (ConcurrentMap<String, StaticRouteConfig>) objReader.read(
+ this, staticRoutesFileName);
if (confList == null) {
return;
}
}
-
private Status saveConfig() {
return saveConfigInternal();
}
Status status;
ObjectWriter objWriter = new ObjectWriter();
- status = objWriter.write(
- new ConcurrentHashMap<String, StaticRouteConfig>(
- staticRouteConfigs), staticRoutesFileName);
+ status = objWriter.write(new ConcurrentHashMap<String, StaticRouteConfig>(staticRouteConfigs),
+ staticRoutesFileName);
if (status.isSuccess()) {
return status;
}
@SuppressWarnings("deprecation")
- private void allocateCaches() {
+ private void allocateCaches() {
if (this.clusterContainerService == null) {
- log
- .info("un-initialized clusterContainerService, can't create cache");
+ log.info("un-initialized clusterContainerService, can't create cache");
return;
}
try {
- clusterContainerService.createCache(
- "forwarding.staticrouting.routes", EnumSet
- .of(IClusterServices.cacheMode.TRANSACTIONAL));
- clusterContainerService.createCache(
- "forwarding.staticrouting.configs", EnumSet
- .of(IClusterServices.cacheMode.TRANSACTIONAL));
+ clusterContainerService.createCache("forwarding.staticrouting.routes",
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+ clusterContainerService.createCache("forwarding.staticrouting.configs",
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
} catch (CacheExistException cee) {
- log
- .error("\nCache already exists - destroy and recreate if needed");
+ log.error("\nCache already exists - destroy and recreate if needed");
} catch (CacheConfigException cce) {
log.error("\nCache configuration invalid - check cache mode");
}
@SuppressWarnings({ "unchecked", "deprecation" })
private void retrieveCaches() {
if (this.clusterContainerService == null) {
- log
- .info("un-initialized clusterContainerService, can't retrieve cache");
+ log.info("un-initialized clusterContainerService, can't retrieve cache");
return;
}
try {
ra.staticRouteUpdate(s, update);
} catch (Exception e) {
- log.error("",e);
+ log.error("", e);
}
}
}
@Override
public Object call() throws Exception {
- if (!added
- || (staticRoute.getType() == StaticRoute.NextHopType.SWITCHPORT)) {
+ if (!added || (staticRoute.getType() == StaticRoute.NextHopType.SWITCHPORT)) {
notifyStaticRouteUpdate(staticRoute, added);
} else {
InetAddress nh = staticRoute.getNextHopAddress();
- HostNodeConnector host = hostTracker.hostQuery(nh);
+ // HostTracker hosts db key scheme implementation
+ IHostId id = HostIdFactory.create(nh, null);
+ HostNodeConnector host = hostTracker.hostQuery(id);
if (host == null) {
log.debug("Next hop {} is not present, try to discover it", nh.getHostAddress());
- Future<HostNodeConnector> future = hostTracker.discoverHost(nh);
+ Future<HostNodeConnector> future = hostTracker.discoverHost(id);
if (future != null) {
try {
host = future.get();
public StaticRoute getBestMatchStaticRoute(InetAddress ipAddress) {
ByteBuffer bblongestPrefix = null;
try {
- bblongestPrefix = ByteBuffer.wrap(InetAddress.getByName("0.0.0.0")
- .getAddress());
+ bblongestPrefix = ByteBuffer.wrap(InetAddress.getByName("0.0.0.0").getAddress());
} catch (Exception e) {
return null;
}
return status;
}
if (staticRouteConfigs.get(config.getName()) != null) {
- return new Status(StatusCode.CONFLICT,
- "A valid Static Route configuration with this name " +
- "already exists. Please use a different name");
+ return new Status(StatusCode.CONFLICT, "A valid Static Route configuration with this name "
+ + "already exists. Please use a different name");
}
// Update database
for (Map.Entry<String, StaticRoute> entry : staticRoutes.entrySet()) {
if (entry.getValue().compareTo(sRoute) == 0) {
- return new Status(StatusCode.CONFLICT,
- "This conflicts with an existing Static Route " +
- "Configuration. Please check the configuration " +
- "and try again");
+ return new Status(StatusCode.CONFLICT, "This conflicts with an existing Static Route "
+ + "Configuration. Please check the configuration " + "and try again");
}
}
staticRoutes.put(config.getName(), sRoute);
checkAndUpdateListeners(name, sRoute, false);
return new Status(StatusCode.SUCCESS, null);
}
- return new Status(StatusCode.NOTFOUND,
- "Static Route with name " + name + " is not found");
+ return new Status(StatusCode.NOTFOUND, "Static Route with name " + name + " is not found");
}
void setClusterContainerService(IClusterContainerServices s) {
containerName = "";
}
- staticRoutesFileName = ROOT + "staticRouting_" + containerName
- + ".conf";
+ staticRoutesFileName = ROOT + "staticRouting_" + containerName + ".conf";
- log.debug("forwarding.staticrouting starting on container {}",
- containerName);
+ log.debug("forwarding.staticrouting starting on container {}", containerName);
allocateCaches();
retrieveCaches();
this.executor = Executors.newFixedThreadPool(1);
}
/*
- * Slow probe to identify any gateway that might have silently appeared
- * after the Static Routing Configuration.
+ * Slow probe to identify any gateway that might have silently appeared
+ * after the Static Routing Configuration.
*/
gatewayProbeTimer = new Timer();
gatewayProbeTimer.schedule(new TimerTask() {
public void run() {
for (Map.Entry<String, StaticRoute> s : staticRoutes.entrySet()) {
StaticRoute route = s.getValue();
- if ((route.getType() == StaticRoute.NextHopType.IPADDRESS)
- && route.getHost() == null) {
+ if ((route.getType() == StaticRoute.NextHopType.IPADDRESS) && route.getHost() == null) {
checkAndUpdateListeners(s.getKey(), route, true);
}
}
}
}, 60 * 1000, 60 * 1000);
+
}
/**
- * Function called by the dependency manager when at least one
- * dependency become unsatisfied or when the component is shutting
- * down because for example bundle is being stopped.
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
*
*/
void destroy() {
- log.debug("Destroy all the Static Routing Rules given we are "
- + "shutting down");
+ log.debug("Destroy all the Static Routing Rules given we are " + "shutting down");
gatewayProbeTimer.cancel();
}
/**
- * Function called by dependency manager after "init ()" is called
- * and after the services provided by the class are registered in
- * the service registry
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
*
*/
void start() {
}
/**
- * Function called by the dependency manager before the services
- * exported by the component are unregistered, this will be
- * followed by a "destroy ()" calls
+ * Function called by the dependency manager before the services exported by
+ * the component are unregistered, this will be followed by a "destroy ()"
+ * calls
*
*/
void stop() {
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker.implementation</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.opendaylight</artifactId>
- <version>1.4.1-SNAPSHOT</version>
- <relativePath>../../commons/opendaylight</relativePath>
- </parent>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
- <tag>HEAD</tag>
- </scm>
- <artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- <packaging>bundle</packaging>
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.opendaylight</artifactId>
+ <version>1.4.1-SNAPSHOT</version>
+ <relativePath>../../commons/opendaylight</relativePath>
+ </parent>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
+ <tag>HEAD</tag>
+ </scm>
+ <artifactId>hosttracker</artifactId>
+ <version>0.5.1-SNAPSHOT</version>
+ <packaging>bundle</packaging>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>${bundle.plugin.version}</version>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Export-Package>
- org.opendaylight.controller.hosttracker,
- org.opendaylight.controller.hosttracker.hostAware
- </Export-Package>
- <Import-Package>
- org.opendaylight.controller.sal.core,
- org.opendaylight.controller.sal.utils,
- org.opendaylight.controller.topologymanager,
- org.opendaylight.controller.sal.packet.address,
- org.opendaylight.controller.switchmanager,
- org.opendaylight.controller.clustering.services,
- javax.xml.bind.annotation,
- javax.xml.bind,
- org.apache.felix.dm,
- org.apache.commons.lang3.builder,
- org.osgi.service.component,
- org.slf4j,
- org.eclipse.osgi.framework.console,
- org.osgi.framework
- </Import-Package>
- </instructions>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- </plugins>
- </build>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>topologymanager</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>switchmanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>0.5.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- </dependency>
- </dependencies>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>${bundle.plugin.version}</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Export-Package>
+ org.opendaylight.controller.hosttracker,
+ org.opendaylight.controller.hosttracker.hostAware
+ </Export-Package>
+ <Import-Package>
+ org.opendaylight.controller.sal.core,
+ org.opendaylight.controller.sal.utils,
+ org.opendaylight.controller.topologymanager,
+ org.opendaylight.controller.sal.packet.address,
+ org.opendaylight.controller.switchmanager,
+ org.opendaylight.controller.clustering.services,
+ javax.xml.bind.annotation,
+ javax.xml.bind,
+ org.apache.felix.dm,
+ org.apache.commons.lang3.builder,
+ org.osgi.service.component,
+ org.slf4j,
+ org.eclipse.osgi.framework.console,
+ org.osgi.framework
+ </Import-Package>
+ </instructions>
+ <manifestLocation>${project.basedir}/META-INF</manifestLocation>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>topologymanager</artifactId>
+ <version>0.4.1-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>switchmanager</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>clustering.services</artifactId>
+ <version>0.4.1-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal</artifactId>
+ <version>0.5.1-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
+ </dependencies>
</project>
+
--- /dev/null
+/*
+ * Copyright IBM Corporation, 2013. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.hosttracker;
+
+import java.net.InetAddress;
+
+import org.opendaylight.controller.sal.packet.address.DataLinkAddress;
+
+/*
+ * Class used to generate a key based on the scheme choosen for hostsdb storage in hosttracker.
+ * @author Deepak Udapudi
+ */
+public class HostIdFactory {
+ public static final String DEFAULT_IP_KEY_SCHEME = "IP";
+ public static final String IP_MAC_KEY_SCHEME = "IP+MAC";
+ private static String scheme = null;
+ static {
+ scheme = System.getProperty("hosttracker.keyscheme");
+ }
+
+ public static String getScheme() {
+ return scheme;
+ }
+
+ public static IHostId create(InetAddress ip, DataLinkAddress mac) {
+ IHostId ipHostId = new IPHostId(ip);
+ if (scheme != null) {
+ switch (scheme) {
+
+ case DEFAULT_IP_KEY_SCHEME:
+ return ipHostId;
+ case IP_MAC_KEY_SCHEME:
+ IHostId ipMacHostId = new IPMacHostId(ip, mac);
+ return ipMacHostId;
+ default:
+ return ipHostId;
+
+ }
+ }
+ return ipHostId;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright IBM Corporation, 2013. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.hosttracker;
+
+import java.io.Serializable;
+
+/*
+ * Marker interface used by the key classes for their implementation
+ * @author Deepak Udapudi
+ */
+
+public interface IHostId extends Serializable {
+
+}
--- /dev/null
+/*
+ * Copyright IBM Corporation, 2013. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.hosttracker;
+
+import java.io.Serializable;
+import java.net.InetAddress;
+
+/*
+ * IP only key class implementation using the marker interface IHostId
+ * @author Deepak Udapudi
+ */
+
+public class IPHostId implements IHostId, Serializable {
+ private static final long serialVersionUID = 1L;
+ private InetAddress ipAddress;
+
+ public InetAddress getIpAddress() {
+ return ipAddress;
+ }
+
+ public void setIpAddress(InetAddress ipAddress) {
+ this.ipAddress = ipAddress;
+ }
+
+ public IPHostId(InetAddress ipAddress) {
+ super();
+ this.ipAddress = ipAddress;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((ipAddress == null) ? 0 : ipAddress.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ IPHostId other = (IPHostId) obj;
+ if (ipAddress == null) {
+ if (other.ipAddress != null)
+ return false;
+ } else if (!ipAddress.equals(other.ipAddress))
+ return false;
+ return true;
+ }
+
+ public static IHostId fromIP(InetAddress addr) {
+ return new IPHostId(addr);
+ }
+
+}
--- /dev/null
+/*
+ * Copyright IBM Corporation, 2013. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.hosttracker;
+
+import java.io.Serializable;
+import java.net.InetAddress;
+
+/*
+ * IP + Mac key class implementation using the marker interface IHostId
+ * @author Deepak Udapudi
+ */
+
+import org.opendaylight.controller.sal.packet.address.DataLinkAddress;
+
+public class IPMacHostId implements IHostId, Serializable {
+
+ private static final long serialVersionUID = 1L;
+ private InetAddress ipAddress;
+ private DataLinkAddress macAddr;
+
+ public IPMacHostId(InetAddress ipAddress, DataLinkAddress macAddr) {
+ super();
+ this.ipAddress = ipAddress;
+ this.macAddr = macAddr;
+ }
+
+ public InetAddress getIpAddress() {
+ return ipAddress;
+ }
+
+ public void setIpAddress(InetAddress ipAddress) {
+ this.ipAddress = ipAddress;
+ }
+
+ public DataLinkAddress getMacAddr() {
+ return macAddr;
+ }
+
+ public void setMacAddr(DataLinkAddress macAddr) {
+ this.macAddr = macAddr;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((ipAddress == null) ? 0 : ipAddress.hashCode());
+ result = prime * result + ((macAddr == null) ? 0 : macAddr.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ IPMacHostId other = (IPMacHostId) obj;
+ if (ipAddress == null) {
+ if (other.ipAddress != null)
+ return false;
+ } else if (!ipAddress.equals(other.ipAddress))
+ return false;
+ if (macAddr == null) {
+ if (other.macAddr != null)
+ return false;
+ } else if (!macAddr.equals(other.macAddr))
+ return false;
+ return true;
+ }
+
+ public static IHostId fromIPAndMac(InetAddress ip, DataLinkAddress mac) {
+ return new IPMacHostId(ip, mac);
+ }
+
+}
* statically through Northbound APIs. If a binding is unknown, then an ARP
* request is initiated immediately to discover the host.
*
- * @param networkAddress
- * IP Address of the Host encapsulated in class InetAddress
+ * @param id
+ * IP address and Mac Address combination encapsulated in IHostId
+ * interface
* @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
* Class that contains the Host info such as its MAC address, Switch
* ID, port, VLAN. If Host is not found, returns NULL
*/
- public HostNodeConnector hostFind(InetAddress networkAddress);
+ public HostNodeConnector hostFind(IHostId id);
+
+ /**
+ * Applications call this interface methods to determine IP address to MAC
+ * binding and its connectivity to an OpenFlow switch in term of Node, Port,
+ * and VLAN. These bindings are learned dynamically as well as can be added
+ * statically through Northbound APIs. If a binding is unknown, then an ARP
+ * request is initiated immediately to discover the host.
+ *
+ * @param addr
+ * IP address of the host
+ * @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * Class that contains the Host info such as its MAC address, Switch
+ * ID, port, VLAN. If Host is not found, returns NULL
+ */
+ public HostNodeConnector hostFind(InetAddress addr);
/**
* Checks the local Host Database to see if a Host has been learned for a
- * given IP address.
+ * given IP address and Mac combination using the HostId.
*
- * @param networkAddress
- * IP Address of the Host encapsulated in class InetAddress
+ * @param id
+ * IP address and Mac Address combination encapsulated in IHostId
+ * interface
+ * @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
+ * Class that contains the Host info such as its MAC address, Switch
+ * ID, port, VLAN. If Host is not found, returns NULL
+ *
+ */
+ public HostNodeConnector hostQuery(IHostId id);
+
+ /**
+ * Checks the local Host Database to see if a Host has been learned for a
+ * given IP address and Mac combination using the HostId.
+ *
+ * @param addr
+ * IP address of the Host
* @return {@link org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector}
* Class that contains the Host info such as its MAC address, Switch
* ID, port, VLAN. If Host is not found, returns NULL
*
*/
- public HostNodeConnector hostQuery(InetAddress networkAddress);
+ public HostNodeConnector hostQuery(InetAddress addr);
/**
- * Initiates an immediate discovery of the Host for a given IP address. This
+ * Initiates an immediate discovery of the Host for a given Host id. This
* provides for the calling applications to block on the host discovery.
*
- * @param networkAddress
- * IP address encapsulated in InetAddress class
+ * @param id
+ * IP address and Mac Address combination encapsulated in IHostId
+ * interface
+ * @return Future
+ * {@link org.opendaylight.controller.hosttracker.HostTrackerCallable}
+ */
+ public Future<HostNodeConnector> discoverHost(IHostId id);
+
+ /**
+ * Initiates an immediate discovery of the Host for a given Host id. This
+ * provides for the calling applications to block on the host discovery.
+ *
+ * @param addr
+ * IP address of the host
* @return Future
* {@link org.opendaylight.controller.hosttracker.HostTrackerCallable}
*/
- public Future<HostNodeConnector> discoverHost(InetAddress networkAddress);
+ public Future<HostNodeConnector> discoverHost(InetAddress addr);
/**
* Returns the Network Hierarchy for a given Host. This API is typically
* used by applications like Hadoop for Rack Awareness functionality.
*
- * @param IP
- * address of the Host encapsulated in InetAddress class
+ * @param id
+ * IP address and Mac Address combination encapsulated in IHostId
+ * interface
* @return List of String ArrayList containing the Hierarchies.
*/
- public List<List<String>> getHostNetworkHierarchy(InetAddress hostAddress);
+ public List<List<String>> getHostNetworkHierarchy(IHostId id);
+
+ /**
+ * Returns the Network Hierarchy for a given Host. This API is typically
+ * used by applications like Hadoop for Rack Awareness functionality.
+ *
+ * @param addr
+ * IP address of the host
+ * @return List of String ArrayList containing the Hierarchies.
+ */
+ public List<List<String>> getHostNetworkHierarchy(InetAddress addr);
/**
* Returns all the the Hosts either learned dynamically or added statically
* @return The status object as described in {@code Status} indicating the
* result of this action.
*/
- public Status addStaticHost(String networkAddress, String dataLayerAddress,
- NodeConnector nc, String vlan);
+ public Status addStaticHost(String networkAddress, String dataLayerAddress, NodeConnector nc, String vlan);
/**
* Allows the deletion of statically learned Host
* result of this action.
*/
public Status removeStaticHost(String networkAddress);
+
+ /**
+ * Allows the deletion of statically learned Host
+ *
+ * @param networkAddress
+ * @param macAddress
+ * @return The status object as described in {@code Status} indicating the
+ * result of this action.
+ */
+ public Status removeStaticHostUsingIPAndMac(String networkAddress, String macAddress);
}
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker.implementation</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.1-SNAPSHOT</version>
<packaging>bundle</packaging>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
import org.opendaylight.controller.clustering.services.ICacheUpdateAware;
import org.opendaylight.controller.clustering.services.IClusterContainerServices;
import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.hosttracker.HostIdFactory;
+import org.opendaylight.controller.hosttracker.IHostId;
+import org.opendaylight.controller.hosttracker.IPHostId;
+import org.opendaylight.controller.hosttracker.IPMacHostId;
import org.opendaylight.controller.hosttracker.IfHostListener;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.IfNewHostNotify;
* removed the database
*/
+/***
+ *
+ * HostTracker db key scheme implementation support. Support has been added for
+ * IP only or IP + MAC scheme as of now. User can use either of the schemes
+ * based on the configuration done in config.ini file. By default IP only key
+ * scheme is choosen. The attribute to be set in config.ini is
+ * hosttracker.keyscheme. It could have a value of 0 or 1 as of now. 0 is for IP
+ * only scheme. 1 is for IP + MAC scheme.
+ *
+ *
+ */
+
public class HostTracker implements IfIptoHost, IfHostListener, ISwitchManagerAware, IInventoryListener,
- ITopologyManagerAware, ICacheUpdateAware<InetAddress, HostNodeConnector>, CommandProvider {
+ ITopologyManagerAware, ICacheUpdateAware<IHostId, HostNodeConnector>, CommandProvider {
static final String ACTIVE_HOST_CACHE = "hosttracker.ActiveHosts";
static final String INACTIVE_HOST_CACHE = "hosttracker.InactiveHosts";
private static final Logger logger = LoggerFactory.getLogger(HostTracker.class);
protected final Set<IHostFinder> hostFinder = new CopyOnWriteArraySet<IHostFinder>();;
- protected ConcurrentMap<InetAddress, HostNodeConnector> hostsDB;
+ protected ConcurrentMap<IHostId, HostNodeConnector> hostsDB;
/*
* Following is a list of hosts which have been requested by NB APIs to be
* added, but either the switch or the port is not sup, so they will be
protected boolean stopping;
private static boolean hostRefresh = true;
private static int hostRetryCount = 5;
+ private String keyScheme = null;
+
private static class ARPPending {
- protected InetAddress hostIP;
+ protected IHostId hostId;
protected short sent_count;
protected HostTrackerCallable hostTrackerCallable;
- public InetAddress getHostIP() {
- return hostIP;
+ public IHostId getHostId() {
+ return hostId;
}
public short getSent_count() {
return hostTrackerCallable;
}
- public void setHostIP(InetAddress networkAddr) {
- this.hostIP = networkAddr;
+ public void setHostId(IHostId id) {
+ this.hostId = id;
}
public void setSent_count(short count) {
// This list contains the hosts for which ARP requests are being sent
// periodically
- ConcurrentMap<InetAddress, ARPPending> ARPPendingList;
+ ConcurrentMap<IHostId, ARPPending> ARPPendingList;
/*
* This list below contains the hosts which were initially in ARPPendingList
* above, but ARP response didn't come from there hosts after multiple
*
* We can't recover from condition 3 above
*/
- ConcurrentMap<InetAddress, ARPPending> failedARPReqList;
+ ConcurrentMap<IHostId, ARPPending> failedARPReqList;
public HostTracker() {
}
/* ARP Refresh Timer to go off every 5 seconds to implement ARP aging */
arpRefreshTimer = new Timer();
arpRefreshTimer.schedule(new ARPRefreshHandler(), 5000, 5000);
+ keyScheme = HostIdFactory.getScheme();
logger.debug("startUp: Caches created, timers started");
}
return;
}
logger.debug("Retrieving cache for HostTrackerAH");
- hostsDB = (ConcurrentMap<InetAddress, HostNodeConnector>) this.clusterContainerService
- .getCache(ACTIVE_HOST_CACHE);
+ hostsDB = (ConcurrentMap<IHostId, HostNodeConnector>) this.clusterContainerService.getCache(ACTIVE_HOST_CACHE);
if (hostsDB == null) {
logger.error("Cache couldn't be retrieved for HostTracker");
}
}
public void nonClusterObjectCreate() {
- hostsDB = new ConcurrentHashMap<InetAddress, HostNodeConnector>();
+ hostsDB = new ConcurrentHashMap<IHostId, HostNodeConnector>();
inactiveStaticHosts = new ConcurrentHashMap<NodeConnector, HostNodeConnector>();
- ARPPendingList = new ConcurrentHashMap<InetAddress, ARPPending>();
- failedARPReqList = new ConcurrentHashMap<InetAddress, ARPPending>();
+ ARPPendingList = new ConcurrentHashMap<IHostId, ARPPending>();
+ failedARPReqList = new ConcurrentHashMap<IHostId, ARPPending>();
}
public void shutDown() {
}
private boolean hostExists(HostNodeConnector host) {
- HostNodeConnector lhost = hostsDB.get(host.getNetworkAddress());
+ IHostId id = HostIdFactory.create(host.getNetworkAddress(), host.getDataLayerAddress());
+ HostNodeConnector lhost = hostsDB.get(id);
return host.equals(lhost);
}
- private HostNodeConnector getHostFromOnActiveDB(InetAddress networkAddress) {
- return hostsDB.get(networkAddress);
+ private HostNodeConnector getHostFromOnActiveDB(IHostId id) {
+ return hostsDB.get(id);
}
- private Entry<NodeConnector, HostNodeConnector> getHostFromInactiveDB(InetAddress networkAddress) {
+ private Entry<NodeConnector, HostNodeConnector> getHostFromInactiveDB(IHostId id) {
for (Entry<NodeConnector, HostNodeConnector> entry : inactiveStaticHosts.entrySet()) {
- if (entry.getValue().equalsByIP(networkAddress)) {
- logger.debug("getHostFromInactiveDB(): Inactive Host found for IP:{} ", networkAddress.getHostAddress());
+ HostNodeConnector hnc = entry.getValue();
+ IHostId cmpId = HostIdFactory.create(hnc.getNetworkAddress(), hnc.getDataLayerAddress());
+ if (cmpId.equals(id)) {
+ logger.debug("getHostFromInactiveDB(): Inactive Host found for ID:{} ", decodeIPFromId(id));
return entry;
}
}
- logger.debug("getHostFromInactiveDB() Inactive Host Not found for IP: {}", networkAddress.getHostAddress());
+ logger.debug("getHostFromInactiveDB() Inactive Host Not found for ID: {}", decodeIPFromId(id));
return null;
}
- private void removeHostFromInactiveDB(InetAddress networkAddress) {
+ private void removeHostFromInactiveDB(IHostId id) {
NodeConnector nodeConnector = null;
for (Entry<NodeConnector, HostNodeConnector> entry : inactiveStaticHosts.entrySet()) {
- if (entry.getValue().equalsByIP(networkAddress)) {
+ HostNodeConnector hnc = entry.getValue();
+ IHostId cmpId = HostIdFactory.create(hnc.getNetworkAddress(), hnc.getDataLayerAddress());
+ if (cmpId.equals(id)) {
nodeConnector = entry.getKey();
break;
}
}
if (nodeConnector != null) {
inactiveStaticHosts.remove(nodeConnector);
- logger.debug("removeHostFromInactiveDB(): Host Removed for IP: {}", networkAddress.getHostAddress());
+ logger.debug("removeHostFromInactiveDB(): Host Removed for IP: {}", decodeIPFromId(id));
return;
}
- logger.debug("removeHostFromInactiveDB(): Host Not found for IP: {}", networkAddress.getHostAddress());
+ logger.debug("removeHostFromInactiveDB(): Host Not found for IP: {}", decodeIPFromId(id));
}
protected boolean hostMoved(HostNodeConnector host) {
- if (hostQuery(host.getNetworkAddress()) != null) {
+ IHostId id = HostIdFactory.create(host.getNetworkAddress(), host.getDataLayerAddress());
+ if (hostQuery(id) != null) {
return true;
}
return false;
}
@Override
- public HostNodeConnector hostQuery(InetAddress networkAddress) {
- return hostsDB.get(networkAddress);
+ public HostNodeConnector hostQuery(IHostId id) {
+ return hostsDB.get(id);
}
@Override
- public Future<HostNodeConnector> discoverHost(InetAddress networkAddress) {
+ public Future<HostNodeConnector> discoverHost(IHostId id) {
if (executor == null) {
logger.debug("discoverHost: Null executor");
return null;
}
- Callable<HostNodeConnector> worker = new HostTrackerCallable(this, networkAddress);
+ Callable<HostNodeConnector> worker = new HostTrackerCallable(this, id);
Future<HostNodeConnector> submit = executor.submit(worker);
return submit;
}
@Override
- public HostNodeConnector hostFind(InetAddress networkAddress) {
+ public HostNodeConnector hostFind(IHostId id) {
/*
* Sometimes at boot with containers configured in the startup we hit
* this path (from TIF) when hostFinder has not been set yet Caller
return null;
}
- HostNodeConnector host = hostQuery(networkAddress);
+ HostNodeConnector host = hostQuery(id);
if (host != null) {
- logger.debug("hostFind(): Host found for IP: {}", networkAddress.getHostAddress());
+ logger.debug("hostFind(): Host found for IP: {}", id);
return host;
}
/* Add this host to ARPPending List for any potential retries */
- addToARPPendingList(networkAddress);
- logger.debug("hostFind(): Host Not Found for IP: {}, Inititated Host Discovery ...",
- networkAddress.getHostAddress());
+ addToARPPendingList(id);
+ logger.debug("hostFind(): Host Not Found for IP: {}, Inititated Host Discovery ...", id);
/* host is not found, initiate a discovery */
for (IHostFinder hf : hostFinder) {
- hf.find(networkAddress);
+ InetAddress addr = decodeIPFromId(id);
+ hf.find(addr);
}
return null;
}
@Override
public Set<HostNodeConnector> getActiveStaticHosts() {
Set<HostNodeConnector> list = new HashSet<HostNodeConnector>();
- for (Entry<InetAddress, HostNodeConnector> entry : hostsDB.entrySet()) {
+ for (Entry<IHostId, HostNodeConnector> entry : hostsDB.entrySet()) {
HostNodeConnector host = entry.getValue();
if (host.isStaticHost()) {
list.add(host);
return list;
}
- private void addToARPPendingList(InetAddress networkAddr) {
+ private void addToARPPendingList(IHostId id) {
ARPPending arphost = new ARPPending();
- arphost.setHostIP(networkAddr);
+ arphost.setHostId(id);
arphost.setSent_count((short) 1);
- ARPPendingList.put(networkAddr, arphost);
- logger.debug("Host Added to ARPPending List, IP: {}", networkAddr);
+ ARPPendingList.put(id, arphost);
+ logger.debug("Host Added to ARPPending List, IP: {}", decodeIPFromId(id));
+
}
- public void setCallableOnPendingARP(InetAddress networkAddr, HostTrackerCallable callable) {
+ public void setCallableOnPendingARP(IHostId id, HostTrackerCallable callable) {
ARPPending arphost;
- for (Entry<InetAddress, ARPPending> entry : ARPPendingList.entrySet()) {
+ for (Entry<IHostId, ARPPending> entry : ARPPendingList.entrySet()) {
arphost = entry.getValue();
- if (arphost.getHostIP().equals(networkAddr)) {
+ if (arphost.getHostId().equals(id)) {
arphost.setHostTrackerCallable(callable);
}
}
}
- private void processPendingARPReqs(InetAddress networkAddr) {
+ private void processPendingARPReqs(IHostId id) {
ARPPending arphost;
- if ((arphost = ARPPendingList.remove(networkAddr)) != null) {
+ if ((arphost = ARPPendingList.remove(id)) != null) {
// Remove the arphost from ARPPendingList as it has been learned now
- logger.debug("Host Removed from ARPPending List, IP: {}", networkAddr);
+ logger.debug("Host Removed from ARPPending List, IP: {}", id);
HostTrackerCallable htCallable = arphost.getHostTrackerCallable();
if (htCallable != null) {
htCallable.wakeup();
* It could have been a host from the FailedARPReqList
*/
- if (failedARPReqList.containsKey(networkAddr)) {
- failedARPReqList.remove(networkAddr);
- logger.debug("Host Removed from FailedARPReqList List, IP: {}", networkAddr);
+ if (failedARPReqList.containsKey(id)) {
+ failedARPReqList.remove(id);
+ logger.debug("Host Removed from FailedARPReqList List, IP: {}", decodeIPFromId(id));
}
}
// Learn a new Host
private void learnNewHost(HostNodeConnector host) {
+ IHostId id = HostIdFactory.create(host.getNetworkAddress(), host.getDataLayerAddress());
host.initArpSendCountDown();
- HostNodeConnector rHost = hostsDB.putIfAbsent(host.getNetworkAddress(), host);
+ HostNodeConnector rHost = hostsDB.putIfAbsent(id, host);
if (rHost != null) {
// Another host is already learned for this IP address, replace it
- replaceHost(host.getNetworkAddress(), rHost, host);
+ replaceHost(id, rHost, host);
} else {
logger.debug("New Host Learned: MAC: {} IP: {}", HexEncode.bytesToHexString(host
.getDataLayerAddressBytes()), host.getNetworkAddress().getHostAddress());
}
}
- private void replaceHost(InetAddress networkAddr, HostNodeConnector removedHost, HostNodeConnector newHost) {
+ private void replaceHost(IHostId id, HostNodeConnector removedHost, HostNodeConnector newHost) {
// Ignore ARP messages from internal nodes
NodeConnector newHostNc = newHost.getnodeConnector();
boolean newHostIsInternal = topologyManager.isInternal(newHostNc);
newHost.initArpSendCountDown();
- if (hostsDB.replace(networkAddr, removedHost, newHost)) {
+ if (hostsDB.replace(id, removedHost, newHost)) {
logger.debug("Host move occurred: Old Host IP:{}, New Host IP: {}", removedHost.getNetworkAddress()
.getHostAddress(), newHost.getNetworkAddress().getHostAddress());
logger.debug("Old Host MAC: {}, New Host MAC: {}",
/*
* Host replacement has failed, do the recovery
*/
- hostsDB.put(networkAddr, newHost);
+ hostsDB.put(id, newHost);
logger.error("Host replacement failed. Overwrite the host. Repalced Host: {}, New Host: {}", removedHost,
newHost);
}
notifyHostLearnedOrRemoved(removedHost, false);
notifyHostLearnedOrRemoved(newHost, true);
if (!newHost.isStaticHost()) {
- processPendingARPReqs(networkAddr);
+ processPendingARPReqs(id);
}
}
// Remove known Host
- private void removeKnownHost(InetAddress key) {
+ private void removeKnownHost(IHostId key) {
HostNodeConnector host = hostsDB.get(key);
if (host != null) {
logger.debug("Removing Host: IP:{}", host.getNetworkAddress().getHostAddress());
hostsDB.remove(key);
} else {
- logger.error("removeKnownHost(): Host for IP address {} not found in hostsDB", key.getHostAddress());
+ logger.error("removeKnownHost(): Host for IP address {} not found in hostsDB", decodeIPFromId(key));
}
}
public void run() {
HostNodeConnector removedHost = null;
InetAddress networkAddr = host.getNetworkAddress();
-
+ IHostId id = HostIdFactory.create(networkAddr, host.getDataLayerAddress());
/* Check for Host Move case */
if (hostMoved(host)) {
/*
* location parameters with new information, and notify the
* applications listening to host move.
*/
- removedHost = hostsDB.get(networkAddr);
+
+ removedHost = hostsDB.get(id);
if (removedHost != null) {
- replaceHost(networkAddr, removedHost, host);
+ replaceHost(id, removedHost, host);
return;
} else {
logger.error("Host to be removed not found in hostsDB");
learnNewHost(host);
/* check if there is an outstanding request for this host */
- processPendingARPReqs(networkAddr);
+ processPendingARPReqs(id);
notifyHostLearnedOrRemoved(host, true);
}
}
logger.debug("Received for Host: IP {}, MAC {}, {}", host.getNetworkAddress().getHostAddress(),
HexEncode.bytesToHexString(host.getDataLayerAddressBytes()), host);
if (hostExists(host)) {
- HostNodeConnector existinghost = hostsDB.get(host.getNetworkAddress());
+ IHostId id = HostIdFactory.create(host.getNetworkAddress(), host.getDataLayerAddress());
+ HostNodeConnector existinghost = hostsDB.get(id);
existinghost.initArpSendCountDown();
// Update the host
- hostsDB.put(host.getNetworkAddress(), existinghost);
+
+ hostsDB.put(id, existinghost);
+ logger.debug("hostListener returned without adding the host");
return;
}
new NotifyHostThread(host).start();
* Switch-Ids as String).
*/
@Override
- public List<List<String>> getHostNetworkHierarchy(InetAddress hostAddress) {
- HostNodeConnector host = hostQuery(hostAddress);
+ public List<List<String>> getHostNetworkHierarchy(IHostId id) {
+ HostNodeConnector host = hostQuery(id);
if (host == null) {
return null;
}
public void subnetNotify(Subnet sub, boolean add) {
logger.debug("Received subnet notification: {} add={}", sub, add);
if (add) {
- for (Entry<InetAddress, ARPPending> entry : failedARPReqList.entrySet()) {
+ for (Entry<IHostId, ARPPending> entry : failedARPReqList.entrySet()) {
ARPPending arphost;
arphost = entry.getValue();
if (hostFinder == null) {
logger.warn("ARPHandler Services are not available on subnet addition");
continue;
}
- logger.debug("Sending the ARP from FailedARPReqList fors IP: {}", arphost.getHostIP().getHostAddress());
+ logger.debug("Sending the ARP from FailedARPReqList fors IP: {}", decodeIPFromId(arphost.getHostId()));
for (IHostFinder hf : hostFinder) {
- hf.find(arphost.getHostIP());
+ hf.find(decodeIPFromId(arphost.getHostId()));
}
}
}
/* This routine runs every 4 seconds */
logger.trace("Number of Entries in ARP Pending/Failed Lists: ARPPendingList = {}, failedARPReqList = {}",
ARPPendingList.size(), failedARPReqList.size());
- for (Entry<InetAddress, ARPPending> entry : ARPPendingList.entrySet()) {
+ for (Entry<IHostId, ARPPending> entry : ARPPendingList.entrySet()) {
arphost = entry.getValue();
- if (hostsDB.containsKey(arphost.getHostIP())) {
+ if (hostsDB.containsKey(arphost.getHostId())) {
// this host is already learned, shouldn't be in
// ARPPendingList
// Remove it and continue
- logger.warn("Learned Host {} found in ARPPendingList", arphost.getHostIP());
+ logger.warn("Learned Host {} found in ARPPendingList", decodeIPFromId(arphost.getHostId()));
ARPPendingList.remove(entry.getKey());
continue;
}
continue;
}
for (IHostFinder hf : hostFinder) {
- hf.find(arphost.getHostIP());
+ hf.find(decodeIPFromId(arphost.getHostId()));
}
arphost.sent_count++;
- logger.debug("ARP Sent from ARPPending List, IP: {}", arphost.getHostIP().getHostAddress());
+ logger.debug("ARP Sent from ARPPending List, IP: {}", decodeIPFromId(arphost.getHostId()));
} else if (arphost.getSent_count() >= hostRetryCount) {
/*
* ARP requests have been sent without receiving a reply,
*/
ARPPendingList.remove(entry.getKey());
logger.debug("ARP reply not received after multiple attempts, removing from Pending List IP: {}",
- arphost.getHostIP().getHostAddress());
+ decodeIPFromId(arphost.getHostId()));
/*
* Add this host to a different list which will be processed
* on link up events
*/
- logger.debug("Adding the host to FailedARPReqList IP: {}", arphost.getHostIP().getHostAddress());
+ logger.debug("Adding the host to FailedARPReqList IP: {}", decodeIPFromId(arphost.getHostId()));
failedARPReqList.put(entry.getKey(), arphost);
} else {
logger.error("ARPRefreshHandler(): hostsDB is not allocated yet:");
return;
}
- for (Entry<InetAddress, HostNodeConnector> entry : hostsDB.entrySet()) {
+ for (Entry<IHostId, HostNodeConnector> entry : hostsDB.entrySet()) {
HostNodeConnector host = entry.getValue();
if (host.isStaticHost()) {
/* this host was learned via API3, don't age it out */
HostNodeConnector host = null;
try {
host = new HostNodeConnector(dataLayerAddress, networkAddr, nc, vlan);
+ IHostId id = HostIdFactory.create(networkAddr, new EthernetAddress(dataLayerAddress));
if (hostExists(host)) {
// This host is already learned either via ARP or through a
// northbound request
return new Status(StatusCode.SUCCESS);
}
- if (hostsDB.get(networkAddr) != null) {
+ if (hostsDB.get(id) != null) {
// There is already a host with this IP address (but behind
// a different (switch, port, vlan) tuple. Return an error
return new Status(StatusCode.CONFLICT, "Host with this IP already exists.");
*/
if (switchManager.isNodeConnectorEnabled(nc)) {
learnNewHost(host);
- processPendingARPReqs(networkAddr);
+ processPendingARPReqs(id);
notifyHostLearnedOrRemoved(host, true);
} else {
inactiveStaticHosts.put(nc, host);
return new Status(StatusCode.BADREQUEST, "Host already exists");
}
+ IHostId id = HostIdFactory.create(networkAddr, new EthernetAddress(dataLayerAddress));
+
if ((tobeUpdatedHost = hostsDB.get(networkAddr)) != null) {
- if (hostsDB.replace(networkAddr, tobeUpdatedHost, host)) {
+ if (hostsDB.replace(id, tobeUpdatedHost, host)) {
logger.debug("Host replaced from hostsDB. Old host: {} New Host: {}", tobeUpdatedHost, host);
notifyHostLearnedOrRemoved(tobeUpdatedHost, false);
notifyHostLearnedOrRemoved(host, true);
* otherwise
*/
- public Status removeStaticHostReq(InetAddress networkAddress) {
+ public Status removeStaticHostReq(InetAddress networkAddress, DataLinkAddress mac) {
// Check if host is in active hosts database
- HostNodeConnector host = getHostFromOnActiveDB(networkAddress);
+ IHostId id = HostIdFactory.create(networkAddress, mac);
+ HostNodeConnector host = getHostFromOnActiveDB(id);
if (host != null) {
// Validation check
if (!host.isStaticHost()) {
}
// Remove and notify
notifyHostLearnedOrRemoved(host, false);
- removeKnownHost(networkAddress);
+ removeKnownHost(id);
return new Status(StatusCode.SUCCESS, null);
}
// Check if host is in inactive hosts database
- Entry<NodeConnector, HostNodeConnector> entry = getHostFromInactiveDB(networkAddress);
+ Entry<NodeConnector, HostNodeConnector> entry = getHostFromInactiveDB(id);
if (entry != null) {
host = entry.getValue();
// Validation check
if (!host.isStaticHost()) {
return new Status(StatusCode.FORBIDDEN, "Host " + networkAddress.getHostName() + " is not static");
}
- this.removeHostFromInactiveDB(networkAddress);
+ this.removeHostFromInactiveDB(id);
return new Status(StatusCode.SUCCESS, null);
}
switch (type) {
case REMOVED:
logger.debug("Received removed node {}", node);
- for (Entry<InetAddress, HostNodeConnector> entry : hostsDB.entrySet()) {
+ for (Entry<IHostId, HostNodeConnector> entry : hostsDB.entrySet()) {
HostNodeConnector host = entry.getValue();
if (host.getnodeconnectorNode().equals(node)) {
logger.debug("Node: {} is down, remove from Hosts_DB", node);
@Override
public Status removeStaticHost(String networkAddress) {
try {
+ if ((keyScheme != null) && (!keyScheme.equals(HostIdFactory.DEFAULT_IP_KEY_SCHEME))) {
+ return new Status(StatusCode.NOTALLOWED, "Host DB Key scheme used is not IP only scheme.");
+ }
InetAddress address = InetAddress.getByName(networkAddress);
- return removeStaticHostReq(address);
+ return removeStaticHostReq(address, null);
} catch (UnknownHostException e) {
logger.debug("Invalid IP Address when trying to remove host", e);
return new Status(StatusCode.BADREQUEST, "Invalid IP Address when trying to remove host");
}
}
+ @Override
+ public Status removeStaticHostUsingIPAndMac(String networkAddress, String macAddress) {
+ try {
+ if ((keyScheme != null) && (keyScheme.equals(HostIdFactory.DEFAULT_IP_KEY_SCHEME))) {
+ return new Status(StatusCode.NOTALLOWED, "Host DB Key scheme used is not IP only scheme.");
+ }
+ InetAddress address = InetAddress.getByName(networkAddress);
+ DataLinkAddress mac = new EthernetAddress(HexEncode.bytesFromHexString(macAddress));
+ return removeStaticHostReq(address, mac);
+ } catch (UnknownHostException e) {
+ logger.debug("Invalid IP Address when trying to remove host", e);
+ return new Status(StatusCode.BADREQUEST, "Invalid IP Address when trying to remove host");
+ } catch (ConstructionException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ return new Status(StatusCode.BADREQUEST, "Invalid Input parameters have been passed.");
+ }
+ }
+
+ private InetAddress decodeIPFromId(IHostId id) {
+ if ((keyScheme != null) && (keyScheme.equals(HostIdFactory.DEFAULT_IP_KEY_SCHEME))) {
+ IPHostId ipId = (IPHostId) id;
+ return (ipId.getIpAddress());
+ } else if ((keyScheme != null) && (keyScheme.equals(HostIdFactory.IP_MAC_KEY_SCHEME))) {
+ IPMacHostId ipMacId = (IPMacHostId) id;
+ return (ipMacId.getIpAddress());
+ }
+ return null;
+ }
+
+ private DataLinkAddress decodeMacFromId(IHostId id) {
+ if ((keyScheme != null) && (!keyScheme.equals(HostIdFactory.DEFAULT_IP_KEY_SCHEME))) {
+ IPMacHostId ipMacId = (IPMacHostId) id;
+ return (ipMacId.getMacAddr());
+ }
+
+ return null;
+ }
+
private void handleNodeConnectorStatusUp(NodeConnector nodeConnector) {
ARPPending arphost;
HostNodeConnector host = null;
logger.trace("handleNodeConnectorStatusUp {}", nodeConnector);
- for (Entry<InetAddress, ARPPending> entry : failedARPReqList.entrySet()) {
+ for (Entry<IHostId, ARPPending> entry : failedARPReqList.entrySet()) {
arphost = entry.getValue();
- logger.trace("Sending the ARP from FailedARPReqList fors IP: {}", arphost.getHostIP().getHostAddress());
+ logger.trace("Sending the ARP from FailedARPReqList fors IP: {}", arphost.getHostId());
if (hostFinder == null) {
logger.warn("ARPHandler is not available at interface up");
logger.warn("Since this event is missed, host(s) connected to interface {} may not be discovered",
// Use hostFinder's "probe" method
try {
byte[] dataLayerAddress = NetUtils.getBroadcastMACAddr();
- host = new HostNodeConnector(dataLayerAddress, arphost.getHostIP(), nodeConnector, (short) 0);
+ host = new HostNodeConnector(dataLayerAddress, decodeIPFromId(arphost.getHostId()), nodeConnector,
+ (short) 0);
for (IHostFinder hf : hostFinder) {
hf.probe(host);
}
} catch (ConstructionException e) {
logger.debug("HostNodeConnector couldn't be created for Host: {}, NodeConnector: {}",
- arphost.getHostIP(), nodeConnector);
+ arphost.getHostId(), nodeConnector);
logger.error("", e);
}
}
if (host != null) {
inactiveStaticHosts.remove(nodeConnector);
learnNewHost(host);
- processPendingARPReqs(host.getNetworkAddress());
+ IHostId id = HostIdFactory.create(host.getNetworkAddress(), host.getDataLayerAddress());
+ processPendingARPReqs(id);
notifyHostLearnedOrRemoved(host, true);
}
}
private void handleNodeConnectorStatusDown(NodeConnector nodeConnector) {
logger.trace("handleNodeConnectorStatusDown {}", nodeConnector);
- for (Entry<InetAddress, HostNodeConnector> entry : hostsDB.entrySet()) {
+ for (Entry<IHostId, HostNodeConnector> entry : hostsDB.entrySet()) {
HostNodeConnector host = entry.getValue();
if (host.getnodeConnector().equals(nodeConnector)) {
logger.debug(" NodeConnector: {} is down, remove from Hosts_DB", nodeConnector);
this.containerName = "";
}
startUp();
+
+ logger.debug("key Scheme in hosttracker is {}", keyScheme);
}
/**
}
@Override
- public void entryCreated(InetAddress key, String cacheName, boolean originLocal) {
+ public void entryCreated(IHostId key, String cacheName, boolean originLocal) {
if (originLocal) {
return;
}
}
@Override
- public void entryUpdated(InetAddress key, HostNodeConnector new_value, String cacheName, boolean originLocal) {
+ public void entryUpdated(IHostId key, HostNodeConnector new_value, String cacheName, boolean originLocal) {
}
@Override
- public void entryDeleted(InetAddress key, String cacheName, boolean originLocal) {
+ public void entryDeleted(IHostId key, String cacheName, boolean originLocal) {
}
private void registerWithOSGIConsole() {
public void _dumpPendingARPReqList(CommandInterpreter ci) {
ARPPending arphost;
- for (Entry<InetAddress, ARPPending> entry : ARPPendingList.entrySet()) {
+ for (Entry<IHostId, ARPPending> entry : ARPPendingList.entrySet()) {
arphost = entry.getValue();
- ci.println(arphost.getHostIP().toString());
+ ci.println(arphost.getHostId().toString());
}
}
public void _dumpFailedARPReqList(CommandInterpreter ci) {
ARPPending arphost;
- for (Entry<InetAddress, ARPPending> entry : failedARPReqList.entrySet()) {
+ for (Entry<IHostId, ARPPending> entry : failedARPReqList.entrySet()) {
arphost = entry.getValue();
- ci.println(arphost.getHostIP().toString());
+ ci.println(arphost.getHostId().toString());
}
}
+
+ @Override
+ public HostNodeConnector hostFind(InetAddress addr) {
+ IHostId id = HostIdFactory.create(addr, null);
+ return (hostFind(id));
+ }
+
+ @Override
+ public HostNodeConnector hostQuery(InetAddress addr) {
+ IHostId id = HostIdFactory.create(addr, null);
+ return (hostQuery(id));
+ }
+
+ @Override
+ public Future<HostNodeConnector> discoverHost(InetAddress addr) {
+ IHostId id = HostIdFactory.create(addr, null);
+ return discoverHost(id);
+ }
+
+ @Override
+ public List<List<String>> getHostNetworkHierarchy(InetAddress addr) {
+ IHostId id = HostIdFactory.create(addr, null);
+ return getHostNetworkHierarchy(id);
+ }
}
* find a host in HostTracker's database and want to discover the host
* in the same thread without being called by a callback function.
*/
-import java.net.InetAddress;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
+import org.opendaylight.controller.hosttracker.IHostId;
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
+/**
+ *
+ *
+ */
public class HostTrackerCallable implements Callable<HostNodeConnector> {
- InetAddress trackedHost;
+ //host id which could be ip or a combination of ip + mac based on the scheme chosen.
+ IHostId trackedHost;
HostTracker hostTracker;
protected CountDownLatch latch;
- public HostTrackerCallable(HostTracker tracker, InetAddress inet) {
+ public HostTrackerCallable(HostTracker tracker, IHostId inet) {
trackedHost = inet;
hostTracker = tracker;
latch = new CountDownLatch(1);
\r
import java.net.InetAddress;\r
import java.net.UnknownHostException;\r
+\r
import junit.framework.TestCase;\r
\r
import org.junit.Assert;\r
import org.junit.Test;\r
+import org.opendaylight.controller.hosttracker.IHostId;\r
+import org.opendaylight.controller.hosttracker.IPHostId;\r
\r
public class HostTrackerTest extends TestCase {\r
\r
Assert.assertFalse(hostTracker == null);\r
\r
InetAddress hostIP = InetAddress.getByName("192.168.0.8");\r
+ IHostId id = IPHostId.fromIP(hostIP);\r
\r
HostTrackerCallable htCallable = new HostTrackerCallable(hostTracker,\r
- hostIP);\r
- Assert.assertTrue(htCallable.trackedHost.equals(hostIP));\r
+ id);\r
+ Assert.assertTrue(htCallable.trackedHost.equals(id));\r
Assert.assertTrue(htCallable.hostTracker.equals(hostTracker));\r
\r
long count = htCallable.latch.getCount();\r
Assert.assertFalse(hostTracker == null);\r
\r
InetAddress hostIP_1 = InetAddress.getByName("192.168.0.8");\r
+ IHostId id1 = IPHostId.fromIP(hostIP_1);\r
InetAddress hostIP_2 = InetAddress.getByName("192.168.0.18");\r
- hostTracker.discoverHost(hostIP_1);\r
- hostTracker.discoverHost(hostIP_2);\r
+ IHostId id2 = IPHostId.fromIP(hostIP_2);\r
+ hostTracker.discoverHost(id1);\r
+ hostTracker.discoverHost(id2);\r
hostTracker.nonClusterObjectCreate();\r
}\r
\r
</scm>
<artifactId>hosttracker.integrationtest</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.1-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker.implementation</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>arphandler</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${arphandler.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
+import org.opendaylight.controller.hosttracker.IHostId;
+import org.opendaylight.controller.hosttracker.IPHostId;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
import org.opendaylight.controller.sal.core.Node;
import org.osgi.framework.ServiceReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
//import org.opendaylight.controller.hosttracker.*;
@RunWith(PaxExam.class)
private IfIptoHost hosttracker = null;
private IInventoryListener invtoryListener = null;
+
// Configure the OSGi container
@Configuration
public Option[] config() {
st = this.hosttracker.addStaticHost("192.168.0.13", "11:22:33:44:55:77", nc1_2, "0");
Assert.assertFalse(st.isSuccess());
-
this.invtoryListener.notifyNodeConnector(nc1_1, UpdateType.ADDED, null);
// check all host list
Status st = this.hosttracker.addStaticHost("192.168.0.8", "11:22:33:44:55:66", nc1_1, null);
st = this.hosttracker.addStaticHost("192.168.0.13", "11:22:33:44:55:77", nc1_2, "");
- HostNodeConnector hnc_1 = this.hosttracker.hostFind(InetAddress.getByName("192.168.0.8"));
+ IHostId id1 = IPHostId.fromIP(InetAddress.getByName("192.168.0.8"));
+ HostNodeConnector hnc_1 = this.hosttracker.hostFind(id1);
assertNull(hnc_1);
this.invtoryListener.notifyNodeConnector(nc1_1, UpdateType.ADDED, null);
- hnc_1 = this.hosttracker.hostFind(InetAddress.getByName("192.168.0.8"));
+ IHostId id2 = IPHostId.fromIP(InetAddress.getByName("192.168.0.8"));
+ hnc_1 = this.hosttracker.hostFind(id2);
+
assertNotNull(hnc_1);
}
private IContainer container;
private static final String NAMEREGEX = "^[a-zA-Z0-9]+$";
private static ConcurrentMap<Integer, Flow> staticFlows;
- private static ConcurrentMap<Integer, Integer> staticFlowsOrdinal;
+ private static ConcurrentMap<Integer, Integer> staticFlowsOrdinal = new ConcurrentHashMap<Integer, Integer>();
/*
* Inactive flow list. This is for the global instance of FRM It will
* contain all the flow entries which were installed on the global container
private boolean inContainerMode; // being used by global instance only
public FlowConsumerImpl() {
- InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder().node(Flows.class).toInstance();
+ InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder(Flows.class).child(Flow.class)
+ .toInstance();
flowService = FRMConsumerImpl.getProviderSession().getRpcService(SalFlowService.class);
if (null == flowService) {
@Override
public void onNodeErrorNotification(NodeErrorNotification notification) {
// TODO Auto-generated method stub
-
+
}
@Override
- public void onNodeExperimenterErrorNotification(
- NodeExperimenterErrorNotification notification) {
+ public void onNodeExperimenterErrorNotification(NodeExperimenterErrorNotification notification) {
// TODO Auto-generated method stub
-
+
};
}
public GroupConsumerImpl() {
- InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder().node(Groups.class)
- .node(Group.class).toInstance();
+ InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder(Groups.class).child(Group.class)
+ .toInstance();
groupService = FRMConsumerImpl.getProviderSession().getRpcService(SalGroupService.class);
clusterGroupContainerService = FRMConsumerImpl.getClusterContainerService();
private IContainer container;
public MeterConsumerImpl() {
- InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder().node(Meters.class)
- .node(Meter.class).toInstance();
+ InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder(Meters.class).child(Meter.class)
+ .toInstance();
meterService = FRMConsumerImpl.getProviderSession().getRpcService(SalMeterService.class);
clusterMeterContainerService = FRMConsumerImpl.getClusterContainerService();
--- /dev/null
+package org.opendaylight.controller.forwardingrulesmanager.consumer.impl;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
+import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.DataModification;
+import org.opendaylight.controller.sal.common.util.Rpcs;
+import org.opendaylight.controller.sal.core.IContainer;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+import org.opendaylight.controller.sal.utils.Status;
+import org.opendaylight.controller.sal.utils.StatusCode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.config.rev131024.Tables;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.config.rev131024.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.SalTableService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.UpdateTableInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.table.update.UpdatedTableBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TableFeaturesConsumerImpl {
+ protected static final Logger logger = LoggerFactory.getLogger(TableFeaturesConsumerImpl.class);
+ private SalTableService tableService;
+ private TableDataCommitHandler commitHandler;
+ private final IClusterContainerServices clusterContainerService = null;
+ private IContainer container;
+ private static final String NAMEREGEX = "^[a-zA-Z0-9]+$";
+ private boolean inContainerMode; // being used by global instance only
+
+ public TableFeaturesConsumerImpl() {
+ InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder(Tables.class).child(Table.class)
+ .toInstance();
+ tableService = FRMConsumerImpl.getProviderSession().getRpcService(SalTableService.class);
+
+ if (null == tableService) {
+ logger.error("Consumer SAL Service is down or NULL. FRM may not function as intended");
+ System.out.println("Consumer SAL Service is down or NULL.");
+ return;
+ }
+
+ System.out.println("-------------------------------------------------------------------");
+ commitHandler = new TableDataCommitHandler();
+ FRMConsumerImpl.getDataProviderService().registerCommitHandler(path, commitHandler);
+ container = (IContainer) ServiceHelper.getGlobalInstance(IContainer.class, this);
+ }
+
+ /**
+ * Updates TableFeatures to the southbound plugin and our internal database
+ *
+ * @param path
+ * @param dataObject
+ */
+ private void updateTableFeatures(InstanceIdentifier<?> path, TableFeatures dataObject) {
+
+ UpdateTableInputBuilder input = new UpdateTableInputBuilder();
+ UpdatedTableBuilder updatedtablebuilder = new UpdatedTableBuilder();
+ updatedtablebuilder.fieldsFrom(dataObject);
+ List<TableFeatures> features = updatedtablebuilder.build().getTableFeatures();
+ for (TableFeatures feature : features) {
+ if (feature != null && feature.getMaxEntries() != null) {
+ logger.error("Max Entries field is read-only, cannot be changed");
+ return;
+ }
+ }
+ input.setUpdatedTable(updatedtablebuilder.build());
+
+ // We send table feature update request to the sounthbound plugin
+ tableService.updateTable(input.build());
+ }
+
+ @SuppressWarnings("unchecked")
+ private void commitToPlugin(internalTransaction transaction) {
+
+ for (@SuppressWarnings("unused")
+ Entry<InstanceIdentifier<?>, TableFeatures> entry : transaction.updates.entrySet()) {
+ System.out.println("Coming update cc in TableDatacommitHandler");
+ updateTableFeatures(entry.getKey(), entry.getValue());
+ }
+
+ }
+
+ private final class TableDataCommitHandler implements DataCommitHandler<InstanceIdentifier<?>, DataObject> {
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public DataCommitTransaction requestCommit(DataModification<InstanceIdentifier<?>, DataObject> modification) {
+ // We should verify transaction
+ System.out.println("Coming in TableFeaturesDatacommitHandler");
+ internalTransaction transaction = new internalTransaction(modification);
+ transaction.prepareUpdate();
+ return transaction;
+ }
+ }
+
+ private final class internalTransaction implements DataCommitTransaction<InstanceIdentifier<?>, DataObject> {
+
+ private final DataModification<InstanceIdentifier<?>, DataObject> modification;
+
+ @Override
+ public DataModification<InstanceIdentifier<?>, DataObject> getModification() {
+ return modification;
+ }
+
+ public internalTransaction(DataModification<InstanceIdentifier<?>, DataObject> modification) {
+ this.modification = modification;
+ }
+
+ Map<InstanceIdentifier<?>, TableFeatures> updates = new HashMap<>();
+
+ /**
+ * We create a plan which table features will be updated.
+ *
+ */
+ void prepareUpdate() {
+
+ Set<Entry<InstanceIdentifier<?>, DataObject>> puts = modification.getUpdatedConfigurationData().entrySet();
+ for (Entry<InstanceIdentifier<?>, DataObject> entry : puts) {
+
+ // validating the DataObject
+
+ Status status = validate(container, (TableFeatures) entry);
+ if (!status.isSuccess()) {
+ logger.warn("Invalid Configuration for table features The failure is {}", entry,
+ status.getDescription());
+ String error = "Invalid Configuration (" + status.getDescription() + ")";
+ logger.error(error);
+ return;
+ }
+ if (entry.getValue() instanceof TableFeatures) {
+ TableFeatures tablefeatures = (TableFeatures) entry.getValue();
+ preparePutEntry(entry.getKey(), tablefeatures);
+ }
+
+ }
+ }
+
+ private void preparePutEntry(InstanceIdentifier<?> key, TableFeatures tablefeatures) {
+ if (tablefeatures != null) {
+ // Updating the Map
+ System.out.println("Coming update in TableFeaturesDatacommitHandler");
+ updates.put(key, tablefeatures);
+ }
+ }
+
+ /**
+ * We are OK to go with execution of plan
+ *
+ */
+ @Override
+ public RpcResult<Void> finish() throws IllegalStateException {
+
+ commitToPlugin(this);
+ // We return true if internal transaction is successful.
+ // return Rpcs.getRpcResult(true, null, Collections.emptySet());
+ return Rpcs.getRpcResult(true, null, null);
+ }
+
+ /**
+ *
+ * We should rollback our preparation
+ *
+ */
+ @Override
+ public RpcResult<Void> rollback() throws IllegalStateException {
+ // NOOP - we did not modified any internal state during
+ // requestCommit phase
+ // return Rpcs.getRpcResult(true, null, Collections.emptySet());
+ return Rpcs.getRpcResult(true, null, null);
+
+ }
+
+ public Status validate(IContainer container, TableFeatures dataObject) {
+
+ String tablename = dataObject.getName();
+ if (tablename == null || tablename.trim().isEmpty() || !tablename.matches(NAMEREGEX)
+ || tablename.length() != 32) {
+ return new Status(StatusCode.BADREQUEST, "Invalid table name");
+ }
+
+ return new Status(StatusCode.SUCCESS);
+ }
+ }
+}
bit SEND_FLOW_REM;
}
}
+
+ typedef removed_reason_flags {
+ type bits {
+ bit IDLE_TIMEOUT;
+ bit HARD_TIMEOUT;
+ bit DELETE;
+ bit GROUP_DELETE;
+ }
+ }
grouping generic_flow_attributes {
leaf priority {
grouping flow-mod-removed {
uses generic_flow_attributes;
+ leaf removed_reason {
+ type removed_reason_flags;
+ }
+
leaf duration_nsec {
type uint32;
}
leaf duration_sec {
type uint32;
}
-
- leaf idle_timeout {
- type uint16;
- }
-
- leaf hard_timeout {
- type uint16;
- }
-
+
leaf packet_count {
type uint64;
}
-
+
leaf byte_count {
type uint64;
}
+
container match {
uses match:match;
}
leaf group-type {
type enumeration {
enum group-all;
- enum group_select;
- enum group_indirect;
- enum group_ff;
+ enum group-select;
+ enum group-indirect;
+ enum group-ff;
}
}
}
+ typedef group-capabilities {
+ type enumeration {
+ enum select-weight;
+ enum select-liveness;
+ enum chaining;
+ enum chaining-checks;
+ }
+ }
+
+
grouping group {
uses group-types;
}
grouping group-features {
- uses group-types;
+ leaf types {
+ type bits {
+ bit group-all;
+ bit group-select;
+ bit group-indirect;
+ bit group-ff;
+ }
+ }
leaf capabilities {
- type enumeration {
- enum select-weight;
- enum select-liveness;
- enum chaining;
- enum chaining-checks;
+ type bits {
+ bit select-weight;
+ bit select-liveness;
+ bit chaining;
+ bit chaining-checks;
}
}
--- /dev/null
+module opendaylight-queue-types {
+ namespace "urn:opendaylight:flow:types:queue";
+ prefix queue-types;
+
+ import ietf-inet-types {prefix inet; revision-date "2010-09-24";}
+ import ietf-yang-types {prefix yang; revision-date "2010-09-24";}
+
+ revision "2013-09-25" {
+ description "Initial revision of Queue Inventory model";
+ }
+
+ typedef queue-properties {
+ type enumeration {
+ enum min_rate;
+ enum max_rate;
+ }
+ }
+
+
+ grouping common-queue {
+
+ leaf property {
+ type uint16;
+ description "One of OFPQT_.";
+ }
+
+ }
+
+
+ grouping queue-prop-min-rate {
+
+ uses common-queue;
+
+ leaf rate {
+ type uint16;
+ description "OFPQT_MIN, len: 16";
+ }
+
+ }
+
+
+
+ grouping queue-prop-max-rate {
+
+ uses common-queue;
+
+ leaf rate {
+ type uint16;
+ description "OFPQT_MAX, len: 16";
+ }
+
+ }
+ grouping queue-packet {
+
+
+ leaf queue-id {
+ type uint32;
+ description "id for the specific queue.";
+ }
+
+ leaf port {
+ type uint32;
+ description "Port this queue is attached to.";
+ }
+ uses common-queue;
+ }
+
+ grouping queue-config-request
+ {
+ leaf port {
+ type uint32;
+ description "Port to be queried.";
+ }
+
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module queue-management {
+ namespace "urn:opendaylight:queue:config";
+ prefix queue-cfg;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+
+ import opendaylight-queue-types {prefix queue; revision-date "2013-09-25";}
+
+
+ revision "2013-10-24" {
+ description "Initial revision of queue service";
+ }
+
+ grouping queue-entry {
+ leaf node {
+ type inv:node-connector-ref;
+
+ }
+ uses queue:queue-config-request;
+ }
+
+ container queues {
+ list queue {
+ key "id node";
+
+ leaf id {
+ type uint32;
+ }
+
+ uses queue-entry;
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module sal-queue {
+ namespace "urn:opendaylight:queue:service";
+ prefix queue;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv; revision-date "2013-08-19";}
+ import opendaylight-queue-types {prefix queue-type; revision-date "2013-09-25";}
+
+ revision "2013-11-07" {
+ description "Initial revision of queue service";
+ }
+
+ grouping node-queue {
+ uses "inv:node-context-ref";
+
+ uses queue-type:queue-packet;
+ }
+
+
+ rpc get-queue {
+ output {
+ uses queue-type:queue-packet;
+ }
+ }
+
+ notification queue-get-config-reply {
+ uses node-queue;
+ }
+}
\ No newline at end of file
rpc get-group-statistics {
input {
uses inv:node;
+ leaf group-id{
+ type group-types:group-id;
+ }
+
}
output {
uses group-types:group-statistics-reply;
<build>
<plugins>
-
<plugin>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-maven-plugin</artifactId>
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-binding-broker-impl yang module local name: binding-broker-impl
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 17:33:01 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.binding.impl;
+
+import org.opendaylight.controller.sal.binding.impl.BindingAwareBrokerImpl;
+import org.osgi.framework.BundleContext;
+
+/**
+*
+*/
+public final class BindingBrokerImplModule extends org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractBindingBrokerImplModule
+{
+
+ private BundleContext bundleContext;
+
+ public BindingBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public BindingBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, BindingBrokerImplModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void validate(){
+ super.validate();
+ // Add custom validation for module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ BindingAwareBrokerImpl broker = new BindingAwareBrokerImpl(getBundleContext());
+ broker.setDataBroker(getDataBrokerDependency());
+ broker.setNotifyBroker(getNotificationServiceDependency());
+ broker.start();
+ return broker;
+ }
+
+ public BundleContext getBundleContext() {
+ return bundleContext;
+ }
+
+ public void setBundleContext(BundleContext bundleContext) {
+ this.bundleContext = bundleContext;
+ }
+}
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-binding-broker-impl yang module local name: binding-broker-impl
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 17:33:01 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.binding.impl;
+
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
+import org.opendaylight.controller.config.spi.Module;
+import org.osgi.framework.BundleContext;
+
+/**
+*
+*/
+public class BindingBrokerImplModuleFactory extends org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractBindingBrokerImplModuleFactory
+{
+
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver, BundleContext bundleContext) {
+ BindingBrokerImplModule module = (BindingBrokerImplModule) super.createModule(instanceName, dependencyResolver, bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver,
+ DynamicMBeanWithInstance old, BundleContext bundleContext) throws Exception {
+ BindingBrokerImplModule module = (BindingBrokerImplModule) super.createModule(instanceName, dependencyResolver, old, bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
+
+}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.md.sal.binding.impl;
-
-import org.opendaylight.controller.sal.binding.impl.BindingAwareBrokerImpl;
-import org.osgi.framework.BundleContext;
-
-import com.google.common.base.Preconditions;
-
-/**
-*
-*/
-public final class BindingBrokerImplSingletonModule extends org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractBindingBrokerImplSingletonModule
-{
-
- private BundleContext bundleContext;
-
- public BindingBrokerImplSingletonModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
- super(identifier, dependencyResolver);
- }
-
- public BindingBrokerImplSingletonModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, BindingBrokerImplSingletonModule oldModule, java.lang.AutoCloseable oldInstance) {
- super(identifier, dependencyResolver, oldModule, oldInstance);
- }
-
- @Override
- public void validate() {
- super.validate();
- Preconditions.checkNotNull(getBundleContext());
- }
-
-
- @Override
- public boolean canReuseInstance(AbstractBindingBrokerImplSingletonModule oldModule) {
- return true;
- }
-
-
- public java.lang.AutoCloseable createInstance() {
- BindingAwareBrokerImpl broker = new BindingAwareBrokerImpl(getBundleContext());
- broker.start();
- return broker;
- }
-
- public BundleContext getBundleContext() {
- return bundleContext;
- }
-
- public void setBundleContext(BundleContext bundleContext) {
- this.bundleContext = bundleContext;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.yang.md.sal.binding.impl;
-
-import org.opendaylight.controller.config.api.DependencyResolver;
-import org.opendaylight.controller.config.api.DependencyResolverFactory;
-import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
-import org.opendaylight.controller.config.api.ModuleIdentifier;
-import org.opendaylight.controller.config.spi.Module;
-import org.osgi.framework.BundleContext;
-
-import java.util.Collections;
-import java.util.Set;
-
-/**
-*
-*/
-public class BindingBrokerImplSingletonModuleFactory extends
- org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractBindingBrokerImplSingletonModuleFactory {
-
- private static final String SINGLETON_NAME = "binding-broker-singleton";
- public static ModuleIdentifier SINGLETON_IDENTIFIER = new ModuleIdentifier(NAME, SINGLETON_NAME);
-
- @Override
- public Module createModule(String instanceName, DependencyResolver dependencyResolver, BundleContext bundleContext) {
- throw new UnsupportedOperationException("Only default instance supported.");
- }
-
- @Override
- public Module createModule(String instanceName, DependencyResolver dependencyResolver,
- DynamicMBeanWithInstance old, BundleContext bundleContext) throws Exception {
- Module instance = super.createModule(instanceName, dependencyResolver, old, bundleContext);
- ((BindingBrokerImplSingletonModule)instance).setBundleContext(bundleContext);
- return instance;
- }
-
- @Override
- public Set<BindingBrokerImplSingletonModule> getDefaultModules(DependencyResolverFactory dependencyResolverFactory,
- BundleContext bundleContext) {
-
- DependencyResolver dependencyResolver = dependencyResolverFactory
- .createDependencyResolver(SINGLETON_IDENTIFIER);
- BindingBrokerImplSingletonModule instance = new BindingBrokerImplSingletonModule(SINGLETON_IDENTIFIER,
- dependencyResolver);
- instance.setBundleContext(bundleContext);
-
- return Collections.singleton(instance);
- }
-
-}
--- /dev/null
+/**
+ * Generated file
+
+ * Generated from: yang module name: opendaylight-sal-binding-broker-impl yang module local name: binding-data-broker
+ * Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+ * Generated at: Wed Nov 20 17:33:01 CET 2013
+ *
+ * Do not modify this file unless it is present under src/main directory
+ */
+package org.opendaylight.controller.config.yang.md.sal.binding.impl;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+
+import org.opendaylight.controller.md.sal.common.impl.routing.AbstractDataReadRouter;
+import org.opendaylight.controller.sal.binding.impl.DataBrokerImpl;
+import org.opendaylight.controller.sal.binding.impl.connect.dom.BindingIndependentDataServiceConnector;
+import org.opendaylight.controller.sal.binding.impl.connect.dom.BindingIndependentMappingService;
+import org.opendaylight.controller.sal.core.api.Broker;
+import org.opendaylight.controller.sal.core.api.data.DataProviderService;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.osgi.framework.BundleContext;
+
+import com.google.common.util.concurrent.MoreExecutors;
+
+/**
+*
+*/
+public final class DataBrokerImplModule extends
+ org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractDataBrokerImplModule {
+
+ private BundleContext bundleContext;
+
+ public DataBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier,
+ org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public DataBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier,
+ org.opendaylight.controller.config.api.DependencyResolver dependencyResolver,
+ DataBrokerImplModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void validate() {
+ super.validate();
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ DataBrokerImpl dataBindingBroker = new DataBrokerImpl();
+
+ // FIXME: obtain via dependency management
+ ExecutorService executor = Executors.newCachedThreadPool();
+ ExecutorService listeningExecutor = MoreExecutors.listeningDecorator(executor);
+ dataBindingBroker.setExecutor(listeningExecutor);
+
+
+
+ Broker domBroker = getDomBrokerDependency();
+ BindingIndependentMappingService mappingService = getMappingServiceDependency();
+
+ if (domBroker != null && mappingService != null) {
+ BindingIndependentDataServiceConnector runtimeMapping = new BindingIndependentDataServiceConnector();
+ runtimeMapping.setMappingService(mappingService);
+ runtimeMapping.setBaDataService(dataBindingBroker);
+ domBroker.registerProvider(runtimeMapping, getBundleContext());
+ }
+
+ return dataBindingBroker;
+ }
+
+ public BundleContext getBundleContext() {
+ return bundleContext;
+ }
+
+ public void setBundleContext(BundleContext bundleContext2) {
+ this.bundleContext = bundleContext2;
+ }
+}
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-binding-broker-impl yang module local name: binding-data-broker
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 17:33:01 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.binding.impl;
+
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
+import org.opendaylight.controller.config.spi.Module;
+import org.osgi.framework.BundleContext;
+
+/**
+*
+*/
+public class DataBrokerImplModuleFactory extends org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractDataBrokerImplModuleFactory
+{
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver, BundleContext bundleContext) {
+ DataBrokerImplModule module = (DataBrokerImplModule) super.createModule(instanceName, dependencyResolver, bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver,
+ DynamicMBeanWithInstance old, BundleContext bundleContext) throws Exception {
+ DataBrokerImplModule module = (DataBrokerImplModule) super.createModule(instanceName, dependencyResolver, old, bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
+
+}
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-binding-broker-impl yang module local name: binding-notification-broker
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 17:33:01 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.binding.impl;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+
+import org.opendaylight.controller.sal.binding.impl.NotificationBrokerImpl;
+
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+
+/**
+*
+*/
+public final class NotificationBrokerImplModule extends org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractNotificationBrokerImplModule
+{
+
+ public NotificationBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public NotificationBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, NotificationBrokerImplModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void validate(){
+ super.validate();
+ // Add custom validation for module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ ExecutorService executor = Executors.newFixedThreadPool(5);
+ ListeningExecutorService listeningExecutor = MoreExecutors.listeningDecorator(executor);
+ NotificationBrokerImpl broker = new NotificationBrokerImpl(listeningExecutor);
+ return broker;
+ }
+}
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-binding-broker-impl yang module local name: binding-notification-broker
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 17:33:01 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.binding.impl;
+
+/**
+*
+*/
+public class NotificationBrokerImplModuleFactory extends org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractNotificationBrokerImplModuleFactory
+{
+
+
+}
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-binding-broker-impl yang module local name: binding-rpc-broker
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 17:33:01 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.binding.impl;
+
+/**
+*
+*/
+public final class RpcBrokerImplModule extends org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractRpcBrokerImplModule
+{
+
+ public RpcBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public RpcBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, RpcBrokerImplModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void validate(){
+ super.validate();
+ // Add custom validation for module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ //TODO:implement
+ throw new java.lang.UnsupportedOperationException("Unimplemented stub method");
+ }
+}
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-binding-broker-impl yang module local name: binding-rpc-broker
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 17:33:01 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.binding.impl;
+
+/**
+*
+*/
+public class RpcBrokerImplModuleFactory extends org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractRpcBrokerImplModuleFactory
+{
+
+
+}
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-binding-broker-impl yang module local name: runtime-generated-mapping
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 18:20:19 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.binding.impl;
+
+import javassist.ClassPool;
+
+import org.opendaylight.controller.sal.binding.dom.serializer.impl.RuntimeGeneratedMappingServiceImpl;
+import org.osgi.framework.BundleContext;
+
+import com.google.common.base.Preconditions;
+
+/**
+*
+*/
+public final class RuntimeMappingModule extends org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractRuntimeMappingModule
+{
+
+ private BundleContext bundleContext;
+
+ public RuntimeMappingModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public RuntimeMappingModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, RuntimeMappingModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void validate(){
+ super.validate();
+ Preconditions.checkNotNull(bundleContext);
+ // Add custom validation for module attributes here.
+ }
+
+ @Override
+ public boolean canReuseInstance(AbstractRuntimeMappingModule oldModule) {
+ return true;
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ RuntimeGeneratedMappingServiceImpl service = new RuntimeGeneratedMappingServiceImpl();
+ ClassPool pool = new ClassPool(); // Should be default singleton
+ service.setPool(pool);
+ service.start(getBundleContext());
+ return service;
+ }
+
+ private BundleContext getBundleContext() {
+ return bundleContext;
+ }
+
+ public void setBundleContext(BundleContext bundleContext) {
+ this.bundleContext = bundleContext;
+ }
+}
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-binding-broker-impl yang module local name: runtime-generated-mapping
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 18:20:19 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.binding.impl;
+
+import java.util.Collections;
+import java.util.Set;
+
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.DependencyResolverFactory;
+import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
+import org.opendaylight.controller.config.api.ModuleIdentifier;
+import org.opendaylight.controller.config.spi.Module;
+import org.osgi.framework.BundleContext;
+
+/**
+*
+*/
+public class RuntimeMappingModuleFactory extends org.opendaylight.controller.config.yang.md.sal.binding.impl.AbstractRuntimeMappingModuleFactory
+{
+
+
+ private static RuntimeMappingModule SINGLETON = null;
+ private static ModuleIdentifier IDENTIFIER = new ModuleIdentifier(NAME, "runtime-mapping-singleton");
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver, BundleContext bundleContext) {
+ throw new UnsupportedOperationException("Only default instance supported");
+ }
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver,
+ DynamicMBeanWithInstance old, BundleContext bundleContext) throws Exception {
+ RuntimeMappingModule module = (RuntimeMappingModule) super.createModule(instanceName, dependencyResolver, old, bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
+
+ @Override
+ public Set<RuntimeMappingModule> getDefaultModules(DependencyResolverFactory dependencyResolverFactory,
+ BundleContext bundleContext) {
+ if(SINGLETON == null) {
+ DependencyResolver dependencyResolver = dependencyResolverFactory.createDependencyResolver(IDENTIFIER);
+ SINGLETON = new RuntimeMappingModule(IDENTIFIER , dependencyResolver);
+ SINGLETON.setBundleContext(bundleContext);
+ }
+
+
+ return Collections.singleton(SINGLETON);
+ }
+
+}
}
- override getRoute(InstanceIdentifier nodeInstance) {
+ override getRoute(InstanceIdentifier<? extends Object> nodeInstance) {
val ret = routes.get(nodeInstance);
if(ret !== null) {
return ret;
@SuppressWarnings("rawtypes")
override updateRoute(InstanceIdentifier<? extends Object> path, S service) {
- routes.put(path as InstanceIdentifier,service);
+ routes.put(path as InstanceIdentifier<? extends DataObject>,service);
}
}
\ No newline at end of file
import org.opendaylight.yangtools.yang.binding.RpcService
import javassist.CtClass
-import static com.google.common.base.Preconditions.*
import javassist.CtMethod
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier
import org.opendaylight.yangtools.yang.binding.annotations.RoutingContext
}
override <T extends RpcService> getRouterFor(Class<T> iface) {
- val contexts = new HashSet<Class<? extends BaseIdentity>>
-
- val instance = <RpcRouterCodegenInstance<T>>withClassLoader(iface.classLoader) [ |
+ val instance = <RpcRouterCodegenInstance<T>>withClassLoaderAndLock(iface.classLoader,lock) [ |
val supertype = iface.asCtClass
val metadata = supertype.rpcMetadata;
val targetCls = createClass(iface.routerName, supertype) [
]
val finalClass = targetCls.toClass(iface.classLoader, iface.protectionDomain)
return new RuntimeGeneratedInvokerPrototype(supportedNotification,
- finalClass as Class<? extends org.opendaylight.controller.sal.binding.api.NotificationListener>);
+ finalClass as Class<? extends org.opendaylight.controller.sal.binding.api.NotificationListener<?>>);
}
protected def resolveInvokerClass(Class<? extends NotificationListener> class1) {
- val invoker = invokerClasses.get(class1);
- if (invoker !== null) {
- return invoker;
- }
- val newInvoker = generateListenerInvoker(class1);
- invokerClasses.put(class1, newInvoker);
- return newInvoker
+ return <RuntimeGeneratedInvokerPrototype>withClassLoaderAndLock(class1.classLoader,lock) [|
+ val invoker = invokerClasses.get(class1);
+ if (invoker !== null) {
+ return invoker;
+ }
+ val newInvoker = generateListenerInvoker(class1);
+ invokerClasses.put(class1, newInvoker);
+ return newInvoker
+
+ ]
}
}
val NotificationListener delegate;
@Property
- var org.opendaylight.controller.sal.binding.api.NotificationListener invocationProxy;
+ var org.opendaylight.controller.sal.binding.api.NotificationListener<Notification> invocationProxy;
@Property
var RuntimeGeneratedInvokerPrototype prototype;
new(NotificationListener delegate, RuntimeGeneratedInvokerPrototype prototype) {
_delegate = delegate;
_prototype = prototype;
- _invocationProxy = prototype.protoClass.newInstance;
+ _invocationProxy = prototype.protoClass.newInstance as org.opendaylight.controller.sal.binding.api.NotificationListener<Notification>;
RuntimeCodeHelper.setDelegate(_invocationProxy, delegate);
}
val Set<Class<? extends Notification>> supportedNotifications;
@Property
- val Class<? extends org.opendaylight.controller.sal.binding.api.NotificationListener> protoClass;
+ val Class<? extends org.opendaylight.controller.sal.binding.api.NotificationListener<?>> protoClass;
}
package class RpcServiceMetadata {
void onBindingClassCaptured(Class<?> cls);
+ void onBindingClassProcessed(Class<?> cls);
}
}
}
- public static void setClassToCaseMap(Class<? extends BindingCodec> codec,
- Map<Class,BindingCodec> classToCaseRawCodec) {
+ public static void setClassToCaseMap(Class<? extends BindingCodec<?,?>> codec,
+ Map<Class<?>,BindingCodec<?,?>> classToCaseRawCodec) {
Field instanceIdField;
try {
instanceIdField = codec.getField(CLASS_TO_CASE_MAP);
}
- public static void setCompositeNodeToCaseMap(Class<? extends BindingCodec> codec,
- Map<CompositeNode,BindingCodec> compositeToCase) {
+ public static void setCompositeNodeToCaseMap(Class<? extends BindingCodec<?,?>> codec,
+ Map<CompositeNode,BindingCodec<?,?>> compositeToCase) {
Field instanceIdField;
try {
instanceIdField = codec.getField(COMPOSITE_TO_CASE);
}
public static void setAugmentationCodec(Class<? extends BindingCodec<Map<QName, Object>, Object>> dataCodec,
- BindingCodec augmentableCodec) {
+ BindingCodec<?,?> augmentableCodec) {
Field instanceIdField;
try {
instanceIdField = dataCodec.getField(AUGMENTATION_CODEC);
+ void onClassProcessed(Class<?> cl);
+
void onCodecCreated(Class<?> codec);
void onValueCodecCreated(Class<?> valueClass,Class<?> valueCodec);
void onChoiceCodecCreated(Class<?> choiceClass,Class<? extends BindingCodec<Map<QName, Object>,Object>> choiceCodec);
return new NodeIdentifier(QName.create(previousQname,qname.localName));
}
+ @SuppressWarnings("rawtypes")
private def dispatch PathArgument serializePathArgument(IdentifiableItem argument, QName previousQname) {
val Map<QName,Object> predicates = new HashMap();
val type = argument.type;
return new NodeIdentifierWithPredicates(QName.create(previousQname,qname.localName),predicates);
}
- def resolveQname(Class class1) {
+ def resolveQname(Class<?> class1) {
val qname = classToQName.get(class1);
if(qname !== null) {
return qname;
val qnameField = class1.getField("QNAME");
val qnameValue = qnameField.get(null) as QName;
classToQName.put(class1,qnameValue);
+ return qnameValue;
}
}
\ No newline at end of file
import org.opendaylight.yangtools.yang.binding.Augmentation;
import org.opendaylight.yangtools.yang.binding.BindingCodec;
import org.opendaylight.yangtools.yang.binding.DataContainer;
+import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.Identifier;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
ReferencedTypeImpl typeref = new ReferencedTypeImpl(type.getPackageName(), type.getName());
@SuppressWarnings("rawtypes")
WeakReference<Class> weakRef = typeToClass.get(typeref);
+ if(weakRef == null) {
+ LOG.error("Could not find loaded class for path: {} and type: {}",path,typeref.getFullyQualifiedName());
+ }
return weakRef.get();
}
@Override
@SuppressWarnings("rawtypes")
public void bindingClassEncountered(Class cls) {
+
ConcreteType typeRef = Types.typeForClass(cls);
+ if(typeToClass.containsKey(typeRef)) {
+ return;
+ }
+ LOG.info("Binding Class {} encountered.",cls);
WeakReference<Class> weakRef = new WeakReference<>(cls);
typeToClass.put(typeRef, weakRef);
+ if(DataObject.class.isAssignableFrom(cls)) {
+ @SuppressWarnings({"unchecked","unused"})
+ Object cdc = getCodecForDataObject((Class<? extends DataObject>) cls);
+ }
+ }
+
+ @Override
+ public void onClassProcessed(Class<?> cls) {
+ ConcreteType typeRef = Types.typeForClass(cls);
+ if(typeToClass.containsKey(typeRef)) {
+ return;
+ }
+ LOG.info("Binding Class {} encountered.",cls);
+ WeakReference<Class> weakRef = new WeakReference<>((Class) cls);
+ typeToClass.put(typeRef, weakRef);
}
private DataSchemaNode getSchemaNode(List<QName> path) {
ReferencedTypeImpl typeref = new ReferencedTypeImpl(caseNode.getValue().getPackageName(), caseNode
.getValue().getName());
ChoiceCaseNode node = (ChoiceCaseNode) SchemaContextUtil.findDataSchemaNode(module, caseNode.getKey());
+ if (node == null) {
+ LOG.error("YANGTools Bug: SchemaNode for {}, with path {} was not found in context.",
+ typeref.getFullyQualifiedName(), caseNode.getKey());
+ continue;
+ }
+
@SuppressWarnings("rawtypes")
ChoiceCaseCodecImpl value = new ChoiceCaseCodecImpl(node);
typeToCaseNodes.putIfAbsent(typeref, value);
BindingCodec<Map<QName, Object>, Object> delegate = newInstanceOf(choiceCodec);
ChoiceCodecImpl<?> newCodec = new ChoiceCodecImpl(delegate);
choiceCodecs.put(choiceClass, newCodec);
- CodecMapping.setClassToCaseMap(choiceCodec, (Map<Class, BindingCodec>) classToCaseRawCodec);
+ CodecMapping.setClassToCaseMap(choiceCodec, (Map<Class<?>, BindingCodec<?, ?>>) classToCaseRawCodec);
CodecMapping.setCompositeNodeToCaseMap(choiceCodec, newCodec.getCompositeToCase());
}
public BindingCodec get(Object key) {
if (key instanceof Class) {
Class cls = (Class) key;
- bindingClassEncountered(cls);
+ //bindingClassEncountered(cls);
ChoiceCaseCodecImpl caseCodec = getCaseCodecFor(cls);
return caseCodec.getDelegate();
}
this.choiceCases = choiceCases;
}
- @Override
- public Set<java.util.Map.Entry<CompositeNode, BindingCodec>> entrySet() {
- return null;
- }
-
@Override
public BindingCodec get(Object key) {
if (false == (key instanceof CompositeNode)) {
}
return null;
}
+
+
}
/**
* Key type
*/
@SuppressWarnings("rawtypes")
- private static abstract class MapFacadeBase<T> implements Map<T, BindingCodec> {
+ private static abstract class MapFacadeBase<T> implements Map<T, BindingCodec<?, ?>> {
@Override
public boolean containsKey(Object key) {
}
@Override
- public Collection<BindingCodec> values() {
+ public Collection<BindingCodec<?, ?>> values() {
return null;
}
}
@Override
- public BindingCodec<Map<QName, Object>, Object> put(T key, BindingCodec value) {
+ public BindingCodec<Map<QName, Object>, Object> put(T key, BindingCodec<?,?> value) {
throw notModifiable();
}
@Override
- public void putAll(Map<? extends T, ? extends BindingCodec> m) {
+ public void putAll(Map<? extends T, ? extends BindingCodec<?, ?>> m) {
throw notModifiable();
}
}
@Override
- public Set<java.util.Map.Entry<T, BindingCodec>> entrySet() {
+ public Set<java.util.Map.Entry<T, BindingCodec<?, ?>>> entrySet() {
+ // TODO Auto-generated method stub
return null;
}
import java.util.AbstractMap.SimpleEntry
import org.opendaylight.yangtools.yang.model.api.SchemaPath
import org.opendaylight.yangtools.yang.model.util.SchemaContextUtil
-import java.util.ArrayList
-import org.opendaylight.yangtools.yang.common.QName
import org.opendaylight.yangtools.yang.binding.DataContainer
-import static com.google.common.base.Preconditions.*;
-import java.util.List
-import org.opendaylight.yangtools.yang.data.api.Node
-import org.opendaylight.yangtools.yang.data.impl.CompositeNodeTOImpl
-import org.opendaylight.yangtools.yang.data.impl.SimpleNodeTOImpl
-import org.opendaylight.yangtools.concepts.Delegator
import java.util.concurrent.ConcurrentMap
import org.opendaylight.yangtools.sal.binding.model.api.GeneratedType
-import org.opendaylight.yangtools.yang.binding.BindingCodec
import com.google.common.collect.HashMultimap
import com.google.common.util.concurrent.SettableFuture
import java.util.concurrent.Future
import org.opendaylight.controller.sal.binding.dom.serializer.api.ValueWithQName
import org.opendaylight.controller.sal.binding.dom.serializer.api.DataContainerCodec
import org.opendaylight.yangtools.binding.generator.util.Types
+import org.osgi.framework.BundleContext
+import java.util.Hashtable
+import org.osgi.framework.ServiceRegistration
-class RuntimeGeneratedMappingServiceImpl implements BindingIndependentMappingService, SchemaServiceListener {
+class RuntimeGeneratedMappingServiceImpl implements BindingIndependentMappingService, SchemaServiceListener, AutoCloseable {
@Property
ClassPool pool;
val promisedTypeDefinitions = HashMultimap.<Type, SettableFuture<GeneratedTypeBuilder>>create;
val promisedSchemas = HashMultimap.<Type, SettableFuture<SchemaNode>>create;
+
+ ServiceRegistration<SchemaServiceListener> listenerRegistration
override onGlobalContextUpdated(SchemaContext arg0) {
recreateBindingContext(arg0);
val ret = transformer.deserialize(node)?.value as DataObject;
return ret;
}
+
+ override fromDataDom(org.opendaylight.yangtools.yang.data.api.InstanceIdentifier entry) {
+ return registry.instanceIdentifierCodec.deserialize(entry);
+ }
private def void updateBindingFor(Map<SchemaPath, GeneratedTypeBuilder> map, SchemaContext module) {
for (entry : map.entrySet) {
}
}
- public def void start() {
+ public def void start(BundleContext ctx) {
binding = new TransformerGenerator(pool);
registry = new LazyGeneratedCodecRegistry()
registry.generator = binding
binding.typeToDefinition = typeToDefinition
binding.typeToSchemaNode = typeToSchemaNode
binding.typeDefinitions = typeDefinitions
-
+ if(ctx !== null) {
+ listenerRegistration = ctx.registerService(SchemaServiceListener,this,new Hashtable<String,String>());
+ }
}
private def getTypeDefinition(Type type) {
}
promisedSchemas.removeAll(builder);
}
+
+ override close() throws Exception {
+ listenerRegistration?.unregister();
+ }
+
}
return withClassLoaderAndLock(inputType.classLoader, lock) [ |
val ret = getGeneratedClass(inputType)
if (ret !== null) {
+ listener.onClassProcessed(inputType);
return ret as Class<? extends BindingCodec<Map<QName,Object>, Object>>;
}
val ref = Types.typeForClass(inputType)
val typeSpecBuilder = typeToDefinition.get(ref)
val typeSpec = typeSpecBuilder.toInstance();
val newret = generateTransformerFor(inputType, typeSpec, node);
+ listener.onClassProcessed(inputType);
return newret as Class<? extends BindingCodec<Map<QName,Object>, Object>>;
]
}
val typeSpecBuilder = typeToDefinition.get(ref)
val typeSpec = typeSpecBuilder.toInstance();
val newret = generateAugmentationTransformerFor(inputType, typeSpec, node);
+ listener.onClassProcessed(inputType);
return newret as Class<? extends BindingCodec<Map<QName,Object>, Object>>;
]
}
]
}
- private def Class getGeneratedClass(Class<? extends Object> cls) {
+ private def Class<?> getGeneratedClass(Class<? extends Object> cls) {
try {
return loadClassWithTCCL(cls.codecClassName)
if (transformer !== null) {
return transformer;
}
- val valueTransformer = generateValueTransformer(cls, type);
- return valueTransformer;
+ return withClassLoaderAndLock(cls.classLoader,lock) [|
+ val valueTransformer = generateValueTransformer(cls, type);
+ return valueTransformer;
+ ]
}
private def generateKeyTransformerFor(Class<? extends Object> inputType, GeneratedType typeSpec, ListSchemaNode node) {
try {
- log.info("Generating DOM Codec for {} with {}", inputType, inputType.classLoader)
+ //log.info("Generating DOM Codec for {} with {}", inputType, inputType.classLoader)
val properties = typeSpec.allProperties;
val ctCls = createClass(inputType.codecClassName) [
//staticField(Map,"AUGMENTATION_SERIALIZERS");
_resultName = QNAME;
}
java.util.List _childNodes = new java.util.ArrayList();
- «inputType.name» value = («inputType.name») $2;
+ «inputType.resolvedName» value = («inputType.name») $2;
«FOR key : node.keyDefinition»
«val propertyName = key.getterName»
«val keyDef = node.getDataChildByName(key)»
«val property = properties.get(propertyName)»
«deserializeProperty(keyDef, property, propertyName)»;
«ENDFOR»
- «inputType.name» _value = new «inputType.name»(«node.keyDefinition.keyConstructorList»);
+ «inputType.resolvedName» _value = new «inputType.name»(«node.keyDefinition.keyConstructorList»);
return _value;
}
'''
}
}
- private def Class<? extends BindingCodec<Object, Object>> generateCaseCodec(Class inputType, GeneratedType type,
+ private def Class<? extends BindingCodec<Object, Object>> generateCaseCodec(Class<?> inputType, GeneratedType type,
ChoiceCaseNode node) {
try {
- log.info("Generating DOM Codec for {} with {}", inputType, inputType.classLoader)
+ //log.info("Generating DOM Codec for {} with {}, TCCL is: {}", inputType, inputType.classLoader,Thread.currentThread.contextClassLoader)
val ctCls = createClass(type.codecClassName) [
//staticField(Map,"AUGMENTATION_SERIALIZERS");
implementsType(BINDING_CODEC)
«QName.name» _resultName = «QName.name».create($1,QNAME.getLocalName());
java.util.List _childNodes = new java.util.ArrayList();
«type.resolvedName» value = («type.resolvedName») $2;
- «transformDataContainerBody(type.allProperties, node)»
+ «transformDataContainerBody(type,type.allProperties, node)»
return ($r) _childNodes;
}
'''
}
private def dispatch Class<? extends BindingCodec<Map<QName, Object>, Object>> generateTransformerFor(
- Class inputType, GeneratedType typeSpec, SchemaNode node) {
+ Class<?> inputType, GeneratedType typeSpec, SchemaNode node) {
try {
- log.info("Generating DOM Codec for {} with {}", inputType, inputType.classLoader)
+ //log.info("Generating DOM Codec for {} with {}", inputType, inputType.classLoader)
val ctCls = createClass(typeSpec.codecClassName) [
//staticField(Map,"AUGMENTATION_SERIALIZERS");
staticQNameField(inputType);
}
private def Class<? extends BindingCodec<Map<QName, Object>, Object>> generateAugmentationTransformerFor(
- Class inputType, GeneratedType type, AugmentationSchema node) {
+ Class<?> inputType, GeneratedType type, AugmentationSchema node) {
try {
- log.info("Generating DOM Codec for {} with {}", inputType, inputType.classLoader)
+ //log.info("Generating DOM Codec for {} with {}", inputType, inputType.classLoader)
val properties = type.allProperties
val ctCls = createClass(type.codecClassName) [
//staticField(Map,"AUGMENTATION_SERIALIZERS");
return null;
}
java.util.Map _compositeNode = (java.util.Map) $2;
- ////System.out.println(_localQName + " " + _compositeNode);
+ //System.out.println(_localQName + " " + _compositeNode);
«type.builderName» _builder = new «type.builderName»();
«FOR child : node.childNodes»
«val signature = properties.getFor(child)»
}
private def dispatch Class<? extends BindingCodec<Map<QName, Object>, Object>> generateTransformerFor(
- Class inputType, GeneratedType typeSpec, ChoiceNode node) {
+ Class<?> inputType, GeneratedType typeSpec, ChoiceNode node) {
try {
- log.info("Generating DOM Codec for {} with {}", inputType, inputType.classLoader)
+ //log.info("Generating DOM Codec for {} with {}", inputType, inputType.classLoader)
val ctCls = createClass(typeSpec.codecClassName) [
//staticField(Map,"AUGMENTATION_SERIALIZERS");
//staticQNameField(inputType);
return null;
}
java.util.Map.Entry _input = new «SimpleEntry.name»($1,_baValue);
- return (java.util.List) _codec.serialize(_input);
+ Object _ret = _codec.serialize(_input);
+ //System.out.println("«typeSpec.name»#toDomStatic: " + _ret);
+ return («List.name») _ret;
}
'''
]
return null;
}
java.util.Map _compositeNode = (java.util.Map) $2;
- ////System.out.println(_localQName + " " + _compositeNode);
+ //System.out.println(_localQName + " " + _compositeNode);
«type.builderName» _builder = new «type.builderName»();
«deserializeDataNodeContainerBody(type, node)»
«deserializeAugmentations»
String propertyName) '''
java.util.List _dom_«propertyName» = _compositeNode.get(«QName.name».create(_localQName,"«schema.QName.
localName»"));
- ////System.out.println("«propertyName»#deCode"+_dom_«propertyName»);
+ //System.out.println("«propertyName»#deCode"+_dom_«propertyName»);
java.util.List «propertyName» = new java.util.ArrayList();
if(_dom_«propertyName» != null) {
java.util.List _serialized = new java.util.ArrayList();
boolean _hasNext = _iterator.hasNext();
while(_hasNext) {
Object _listItem = _iterator.next();
- ////System.out.println(" item" + _listItem);
+ //System.out.println(" item" + _listItem);
Object _value = «type.actualTypeArguments.get(0).serializer.resolvedName».fromDomStatic(_localQName,_listItem);
- ////System.out.println(" value" + _value);
+ //System.out.println(" value" + _value);
«propertyName».add(_value);
_hasNext = _iterator.hasNext();
}
}
- ////System.out.println(" list" + «propertyName»);
+ //System.out.println(" list" + «propertyName»);
'''
private def dispatch CharSequence deserializeProperty(LeafListSchemaNode schema, ParameterizedType type,
val returnType = typeSpec.valueReturnType;
if (returnType == null) {
-
val ctCls = createDummyImplementation(inputType, typeSpec);
val ret = ctCls.toClassImpl(inputType.classLoader, inputType.protectionDomain)
return ret as Class<? extends BindingCodec<Map<QName,Object>, Object>>;
}
+ var hasBinding = false;
+ try {
+ val bindingCodecClass = loadClassWithTCCL(BINDING_CODEC.name);
+ hasBinding = bindingCodecClass !== null;
+ } catch (ClassNotFoundException e) {
+ hasBinding = false;
+ }
+ val hasYangBinding = hasBinding
val ctCls = createClass(typeSpec.codecClassName) [
//staticField(Map,"AUGMENTATION_SERIALIZERS");
- implementsType(BINDING_CODEC)
- staticField(it, INSTANCE_IDENTIFIER_CODEC, BindingCodec)
- implementsType(BindingDeserializer.asCtClass)
+ if(hasYangBinding) {
+ implementsType(BINDING_CODEC)
+ staticField(it, INSTANCE_IDENTIFIER_CODEC, BindingCodec)
+ implementsType(BindingDeserializer.asCtClass)
+ }
method(Object, "toDomValue", Object) [
modifiers = PUBLIC + FINAL + STATIC
body = '''
{
- ////System.out.println("«inputType.simpleName»#toDomValue: "+$1);
+ //System.out.println("«inputType.simpleName»#toDomValue: "+$1);
if($1 == null) {
return null;
}
«typeSpec.resolvedName» _encapsulatedValue = («typeSpec.resolvedName») $1;
- ////System.out.println("«inputType.simpleName»#toDomValue:Enc: "+_encapsulatedValue);
+ //System.out.println("«inputType.simpleName»#toDomValue:Enc: "+_encapsulatedValue);
«returnType.resolvedName» _value = _encapsulatedValue.getValue();
- ////System.out.println("«inputType.simpleName»#toDomValue:DeEnc: "+_value);
+ //System.out.println("«inputType.simpleName»#toDomValue:DeEnc: "+_value);
Object _domValue = «serializeValue(returnType, "_value")»;
return _domValue;
}
modifiers = PUBLIC + FINAL + STATIC
body = '''
{
- ////System.out.println("«inputType.simpleName»#fromDomValue: "+$1);
+ //System.out.println("«inputType.simpleName»#fromDomValue: "+$1);
if($1 == null) {
return null;
return null;
}
- private def dispatch Class<? extends BindingCodec<Map<QName, Object>, Object>> generateValueTransformer(
+ private def dispatch Class<?> generateValueTransformer(
Class<?> inputType, Enumeration typeSpec) {
try {
- log.info("Generating DOM Codec for {} with {}", inputType, inputType.classLoader)
+ //log.info("Generating DOM Codec for {} with {}", inputType, inputType.classLoader)
val ctCls = createClass(typeSpec.codecClassName) [
//staticField(Map,"AUGMENTATION_SERIALIZERS");
- implementsType(BINDING_CODEC)
+ //implementsType(BINDING_CODEC)
+
method(Object, "toDomValue", Object) [
modifiers = PUBLIC + FINAL + STATIC
body = '''
val ret = ctCls.toClassImpl(inputType.classLoader, inputType.protectionDomain)
log.info("DOM Codec for {} was generated {}", inputType, ret)
- return ret as Class<? extends BindingCodec<Map<QName,Object>, Object>>;
+ return ret;
} catch (CodeGenerationException e) {
throw new CodeGenerationException("Cannot compile Transformator for " + inputType, e);
} catch (Exception e) {
«QName.name» _resultName = «QName.name».create($1,QNAME.getLocalName());
java.util.List _childNodes = new java.util.ArrayList();
«type.resolvedName» value = («type.resolvedName») $2;
- «transformDataContainerBody(type.allProperties, node)»
+ «transformDataContainerBody(type,type.allProperties, node)»
«serializeAugmentations»
return ($r) java.util.Collections.singletonMap(_resultName,_childNodes);
}
«QName.name» _resultName = «QName.name».create($1,QNAME.getLocalName());
java.util.List _childNodes = new java.util.ArrayList();
«type.resolvedName» value = («type.resolvedName») $2;
- «transformDataContainerBody(type.allProperties, node)»
+ «transformDataContainerBody(type,type.allProperties, node)»
«serializeAugmentations»
return ($r) java.util.Collections.singletonMap(_resultName,_childNodes);
}
«QName.name» _resultName = «QName.name».create($1,QNAME.getLocalName());
java.util.List _childNodes = new java.util.ArrayList();
«type.resolvedName» value = («type.resolvedName») $2;
- «transformDataContainerBody(type.allProperties, node)»
+ «transformDataContainerBody(type,type.allProperties, node)»
«serializeAugmentations»
return ($r) java.util.Collections.singletonMap(_resultName,_childNodes);
}
}
'''
- private def transformDataContainerBody(Map<String, Type> properties, DataNodeContainer node) {
+ private def transformDataContainerBody(Type type,Map<String, Type> properties, DataNodeContainer node) {
val ret = '''
«FOR child : node.childNodes.filter[!augmenting]»
«var signature = properties.getFor(child)»
- //System.out.println("«signature.key»" + value.«signature.key»());
+ //System.out.println("«type.name»#«signature.key»" + value.«signature.key»());
«serializeProperty(child, signature.value, signature.key)»
«ENDFOR»
'''
return '''«typeSpec.resolvedName»$Broker$Codec$DOM'''
}
- private def codecClassName(Class typeSpec) {
+ private def codecClassName(Class<?> typeSpec) {
return '''«typeSpec.name»$Broker$Codec$DOM'''
}
- private def dispatch HashMap<String, Type> getAllProperties(GeneratedType type) {
+ private def HashMap<String, Type> getAllProperties(GeneratedType type) {
val ret = new HashMap<String, Type>();
type.collectAllProperties(ret);
return ret;
return type.asCtClass.name;
}
- def String getResolvedName(Class type) {
+ def String getResolvedName(Class<?> type) {
return type.asCtClass.name;
}
def CtClass asCtClass(Type type) {
- val name = type.fullyQualifiedName
val cls = loadClassWithTCCL(type.fullyQualifiedName)
return cls.asCtClass;
}
import java.util.concurrent.Executors
import java.util.Collections
import org.opendaylight.yangtools.yang.binding.DataObject
-import org.opendaylight.controller.sal.binding.impl.connect.dom.ConnectorActivator
+import java.util.concurrent.locks.ReentrantLock
+import java.util.concurrent.Callable
+import java.util.WeakHashMap
+import javax.annotation.concurrent.GuardedBy
class BindingAwareBrokerImpl implements BindingAwareBroker, AutoCloseable {
private static val log = LoggerFactory.getLogger(BindingAwareBrokerImpl)
-
private InstanceIdentifier<? extends DataObject> root = InstanceIdentifier.builder().toInstance();
- private val clsPool = ClassPool.getDefault()
- private var RuntimeCodeGenerator generator;
-
+ private static val clsPool = ClassPool.getDefault()
+ public static var RuntimeCodeGenerator generator;
/**
* Map of all Managed Direct Proxies
*
*
*/
- private val Map<Class<? extends RpcService>, RpcRouter<? extends RpcService>> rpcRouters = new ConcurrentHashMap();
+ private val Map<Class<? extends RpcService>, RpcRouter<? extends RpcService>> rpcRouters = new WeakHashMap();
@Property
- private var NotificationBrokerImpl notifyBroker
-
+ private var NotificationProviderService notifyBroker
+
@Property
- private var DataBrokerImpl dataBroker
-
+ private var DataProviderService dataBroker
+
@Property
var BundleContext brokerBundleContext
-
+
ServiceRegistration<NotificationProviderService> notifyProviderRegistration
-
+
ServiceRegistration<NotificationService> notifyConsumerRegistration
-
+
ServiceRegistration<DataProviderService> dataProviderRegistration
-
+
ServiceRegistration<DataBrokerService> dataConsumerRegistration
-
- ConnectorActivator connectorActivator
-
-
+
+ private val proxyGenerationLock = new ReentrantLock;
+
+ private val routerGenerationLock = new ReentrantLock;
+
public new(BundleContext bundleContext) {
_brokerBundleContext = bundleContext;
}
initGenerator();
val executor = Executors.newCachedThreadPool;
+
// Initialization of notificationBroker
log.info("Starting MD-SAL: Binding Aware Notification Broker");
- notifyBroker = new NotificationBrokerImpl(executor);
- notifyBroker.invokerFactory = generator.invokerFactory;
log.info("Starting MD-SAL: Binding Aware Data Broker");
- dataBroker = new DataBrokerImpl();
- dataBroker.executor = executor;
- val brokerProperties = newProperties();
-
-
log.info("Starting MD-SAL: Binding Aware Data Broker");
- notifyProviderRegistration = brokerBundleContext.registerService(NotificationProviderService, notifyBroker,
- brokerProperties)
- notifyConsumerRegistration = brokerBundleContext.registerService(NotificationService, notifyBroker, brokerProperties)
- dataProviderRegistration = brokerBundleContext.registerService(DataProviderService, dataBroker, brokerProperties)
- dataConsumerRegistration = brokerBundleContext.registerService(DataBrokerService, dataBroker, brokerProperties)
-
- connectorActivator = new ConnectorActivator(dataBroker,brokerBundleContext);
- connectorActivator.start();
log.info("MD-SAL: Binding Aware Broker Started");
}
* If proxy class does not exist for supplied service class it will be generated automatically.
*/
private def <T extends RpcService> getManagedDirectProxy(Class<T> service) {
-
var RpcProxyContext existing = null
+
if ((existing = managedProxies.get(service)) != null) {
return existing.proxy
}
- val proxyInstance = generator.getDirectProxyFor(service)
- val rpcProxyCtx = new RpcProxyContext(proxyInstance.class)
- val properties = new Hashtable<String, String>()
- rpcProxyCtx.proxy = proxyInstance as RpcService
-
- properties.salServiceType = SAL_SERVICE_TYPE_CONSUMER_PROXY
- rpcProxyCtx.registration = brokerBundleContext.registerService(service, rpcProxyCtx.proxy as T, properties)
- managedProxies.put(service, rpcProxyCtx)
- return rpcProxyCtx.proxy
+ return withLock(proxyGenerationLock) [ |
+ val maybeProxy = managedProxies.get(service);
+ if (maybeProxy !== null) {
+ return maybeProxy.proxy;
+ }
+
+
+ val proxyInstance = generator.getDirectProxyFor(service)
+ val rpcProxyCtx = new RpcProxyContext(proxyInstance.class)
+ val properties = new Hashtable<String, String>()
+ rpcProxyCtx.proxy = proxyInstance as RpcService
+ properties.salServiceType = SAL_SERVICE_TYPE_CONSUMER_PROXY
+ rpcProxyCtx.registration = brokerBundleContext.registerService(service, rpcProxyCtx.proxy as T, properties)
+ managedProxies.put(service, rpcProxyCtx)
+ return rpcProxyCtx.proxy
+ ]
+ }
+
+ private static def <T> T withLock(ReentrantLock lock, Callable<T> method) {
+ try {
+ lock.lock();
+ val ret = method.call;
+ return ret;
+ } finally {
+ lock.unlock();
+ }
}
/**
val osgiReg = context.bundleContext.registerService(type, service, properties);
proxy.delegate = service;
- return new RpcServiceRegistrationImpl<T>(type, service, osgiReg,this);
+ return new RpcServiceRegistrationImpl<T>(type, service, osgiReg, this);
}
def <T extends RpcService> RoutedRpcRegistration<T> registerRoutedRpcImplementation(Class<T> type, T service,
}
// We created Router
- val newRouter = generator.getRouterFor(type);
- checkState(newRouter !== null);
- rpcRouters.put(type, newRouter);
-
- // We create / update Direct Proxy for router
- val proxy = getManagedDirectProxy(type);
- proxy.delegate = newRouter.invocationProxy
- return newRouter;
+ return withLock(routerGenerationLock) [ |
+ val maybeRouter = rpcRouters.get(type);
+ if (maybeRouter !== null) {
+ return maybeRouter as RpcRouter<T>;
+ }
+
+ val newRouter = generator.getRouterFor(type);
+ checkState(newRouter !== null);
+ rpcRouters.put(type, newRouter);
+ // We create / update Direct Proxy for router
+ val proxy = getManagedDirectProxy(type);
+ proxy.delegate = newRouter.invocationProxy
+ return newRouter;
+ ]
}
// Updating internal structure of registration
routingTable.updateRoute(path, registration.instance)
+
// Update routing table / send announce to message bus
-
val success = paths.put(context, path);
}
routingTable.removeRoute(path)
}
}
-
+
protected def <T extends RpcService> void unregisterRpcService(RpcServiceRegistrationImpl<T> registration) {
val type = registration.serviceType;
-
+
val proxy = managedProxies.get(type);
- if(proxy.proxy.delegate === registration.instance) {
+ if (proxy.proxy.delegate === registration.instance) {
proxy.proxy.delegate = null;
}
}
-
+
def createDelegate(Class<? extends RpcService> type) {
getManagedDirectProxy(type);
}
-
+
def getRpcRouters() {
return Collections.unmodifiableMap(rpcRouters);
}
-
+
override close() {
dataConsumerRegistration.unregister()
dataProviderRegistration.unregister()
notifyConsumerRegistration.unregister()
notifyProviderRegistration.unregister()
}
-
+
}
class RoutedRpcRegistrationImpl<T extends RpcService> extends AbstractObjectRegistration<T> implements RoutedRpcRegistration<T> {
checkClosed()
broker.unregisterPath(this, context, path);
}
-
+
override getServiceType() {
return router.serviceType;
}
}
}
-class RpcServiceRegistrationImpl<T extends RpcService> extends AbstractObjectRegistration<T> implements RpcRegistration<T> {
+
+class RpcServiceRegistrationImpl<T extends RpcService> extends AbstractObjectRegistration<T> implements RpcRegistration<T> {
val ServiceRegistration<T> osgiRegistration;
private var BindingAwareBrokerImpl broker;
-
+
@Property
val Class<T> serviceType;
- public new(Class<T> type, T service, ServiceRegistration<T> osgiReg,BindingAwareBrokerImpl broker) {
+ public new(Class<T> type, T service, ServiceRegistration<T> osgiReg, BindingAwareBrokerImpl broker) {
super(service);
this._serviceType = type;
this.osgiRegistration = osgiReg;
- this.broker= broker;
+ this.broker = broker;
}
override protected removeRegistration() {
broker.unregisterRpcService(this);
broker = null;
}
-
+
}
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
import org.opendaylight.controller.md.sal.common.api.data.DataReader;
import org.opendaylight.controller.md.sal.common.impl.service.AbstractDataBroker;
public class DataBrokerImpl extends AbstractDataBroker<InstanceIdentifier<? extends DataObject>, DataObject, DataChangeListener> implements
- DataProviderService {
+ DataProviderService, AutoCloseable {
+
+
+ private final AtomicLong nextTransaction = new AtomicLong();
+
public DataBrokerImpl() {
setDataReadRouter(new BindingAwareDataReaderRouter());
}
@Override
public DataTransactionImpl beginTransaction() {
- return new DataTransactionImpl(this);
+ String transactionId = "BA-" + nextTransaction.getAndIncrement();
+ return new DataTransactionImpl(transactionId,this);
}
@Override
+ @Deprecated
public <T extends DataRoot> T getData(DataStoreIdentifier store, Class<T> rootType) {
- // TODO Auto-generated method stub
- return null;
+ throw new UnsupportedOperationException("Deprecated");
}
@Override
+ @Deprecated
public <T extends DataRoot> T getData(DataStoreIdentifier store, T filter) {
- // TODO Auto-generated method stub
- return null;
+ throw new UnsupportedOperationException("Deprecated");
}
@Override
+ @Deprecated
public <T extends DataRoot> T getCandidateData(DataStoreIdentifier store, Class<T> rootType) {
- // TODO Auto-generated method stub
- return null;
+ throw new UnsupportedOperationException("Deprecated");
}
@Override
+ @Deprecated
public <T extends DataRoot> T getCandidateData(DataStoreIdentifier store, T filter) {
- // TODO Auto-generated method stub
- return null;
+ throw new UnsupportedOperationException("Deprecated");
}
@Override
+ @Deprecated
public RpcResult<DataRoot> editCandidateData(DataStoreIdentifier store, DataRoot changeSet) {
- // TODO Auto-generated method stub
- return null;
+ throw new UnsupportedOperationException("Deprecated");
}
@Override
+ @Deprecated
public Future<RpcResult<Void>> commit(DataStoreIdentifier store) {
- // TODO Auto-generated method stub
- return null;
+ throw new UnsupportedOperationException("Deprecated");
}
@Override
+ @Deprecated
public DataObject getData(InstanceIdentifier<? extends DataObject> data) {
- // TODO Auto-generated method stub
- return null;
+ throw new UnsupportedOperationException("Deprecated");
}
@Override
+ @Deprecated
public DataObject getConfigurationData(InstanceIdentifier<?> data) {
- // TODO Auto-generated method stub
- return null;
+ throw new UnsupportedOperationException("Deprecated");
}
@Override
+ @Deprecated
public void registerChangeListener(InstanceIdentifier<? extends DataObject> path, DataChangeListener changeListener) {
- // TODO Auto-generated method stub
-
+ throw new UnsupportedOperationException("Deprecated");
}
@Override
+ @Deprecated
public void unregisterChangeListener(InstanceIdentifier<? extends DataObject> path,
DataChangeListener changeListener) {
- // TODO Auto-generated method stub
-
+ throw new UnsupportedOperationException("Deprecated");
}
-
+ @Override
+ public void close() throws Exception {
+
+ }
}
\ No newline at end of file
- public DataTransactionImpl(DataBrokerImpl dataBroker) {
- super(dataBroker);
+ public DataTransactionImpl(Object identifier,DataBrokerImpl dataBroker) {
+ super(identifier,dataBroker);
}
@Override
import org.slf4j.LoggerFactory
import java.util.concurrent.Callable
-class NotificationBrokerImpl implements NotificationProviderService {
+class NotificationBrokerImpl implements NotificationProviderService, AutoCloseable {
val Multimap<Class<? extends Notification>, NotificationListener<?>> listeners;
@Property
var ExecutorService executor;
- @Property
- var RuntimeCodeGenerator generator;
-
- @Property
- var NotificationInvokerFactory invokerFactory;
-
new(ExecutorService executor) {
listeners = HashMultimap.create()
this.executor = executor;
override registerNotificationListener(
org.opendaylight.yangtools.yang.binding.NotificationListener listener) {
- val invoker = invokerFactory.invokerFor(listener);
+ val invoker = BindingAwareBrokerImpl.generator.invokerFactory.invokerFor(listener);
for (notifyType : invoker.supportedNotifications) {
listeners.put(notifyType, invoker.invocationProxy)
}
listeners.remove(notifyType, reg.invoker.invocationProxy)
}
}
+
+ override close() {
+ //FIXME: implement properly.
+ }
+
}
class GenericNotificationRegistration<T extends Notification> extends AbstractObjectRegistration<NotificationListener<T>> implements ListenerRegistration<NotificationListener<T>> {
override call() {
try {
+ log.info("Delivering notification {} to {}",notification,listener);
listener.onNotification(notification);
+ log.info("Notification delivered {} to {}",notification,listener);
} catch (Exception e) {
log.error("Unhandled exception thrown by listener: {}", listener, e);
}
package org.opendaylight.controller.sal.binding.impl.connect.dom;
+import java.util.Collection;
import java.util.Collections;
+import java.util.Map;
import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
-import org.opendaylight.controller.config.api.jmx.CommitStatus;
+import org.opendaylight.controller.md.sal.common.api.RegistrationListener;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
-import org.opendaylight.controller.md.sal.common.api.data.DataModification;
-import org.opendaylight.controller.md.sal.common.api.data.DataReader;
import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandlerRegistration;
+import org.opendaylight.controller.md.sal.common.api.data.DataModification;
import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
import org.opendaylight.controller.sal.binding.api.data.RuntimeDataProvider;
import org.opendaylight.controller.sal.common.util.Rpcs;
-import org.opendaylight.controller.sal.core.api.data.DataBrokerService;
+import org.opendaylight.controller.sal.core.api.Provider;
+import org.opendaylight.controller.sal.core.api.Broker.ProviderSession;
import org.opendaylight.controller.sal.core.api.data.DataModificationTransaction;
+import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-
-import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class BindingIndependentDataServiceConnector implements //
RuntimeDataProvider, //
- DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject> {
+ Provider {
+
+ private final Logger LOG = LoggerFactory.getLogger(BindingIndependentDataServiceConnector.class);
private static final InstanceIdentifier<? extends DataObject> ROOT = InstanceIdentifier.builder().toInstance();
+ private static final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier ROOT_BI = org.opendaylight.yangtools.yang.data.api.InstanceIdentifier
+ .builder().toInstance();
+
private BindingIndependentMappingService mappingService;
- private DataBrokerService biDataService;
+ private org.opendaylight.controller.sal.core.api.data.DataProviderService biDataService;
private DataProviderService baDataService;
+ private ConcurrentMap<Object, BindingToDomTransaction> domOpenedTransactions = new ConcurrentHashMap<>();
+ private ConcurrentMap<Object, DomToBindingTransaction> bindingOpenedTransactions = new ConcurrentHashMap<>();
+
+ private BindingToDomCommitHandler bindingToDomCommitHandler = new BindingToDomCommitHandler();
+ private DomToBindingCommitHandler domToBindingCommitHandler = new DomToBindingCommitHandler();
+
+ private Registration<DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject>> baCommitHandlerRegistration;
+
+ private Registration<DataCommitHandler<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode>> biCommitHandlerRegistration;
+
@Override
public DataObject readOperationalData(InstanceIdentifier<? extends DataObject> path) {
- // TODO Auto-generated method stub
org.opendaylight.yangtools.yang.data.api.InstanceIdentifier biPath = mappingService.toDataDom(path);
CompositeNode result = biDataService.readOperationalData(biPath);
return mappingService.dataObjectFromDataDom(path, result);
return mappingService.dataObjectFromDataDom(path, result);
}
- @Override
- public org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction<InstanceIdentifier<? extends DataObject>, DataObject> requestCommit(
- DataModification<InstanceIdentifier<? extends DataObject>, DataObject> modification) {
-
- DataModificationTransaction translated = translateTransaction(modification);
- return new WrappedTransaction(translated, modification);
- }
-
- private DataModificationTransaction translateTransaction(
+ private DataModificationTransaction createBindingToDomTransaction(
DataModification<InstanceIdentifier<? extends DataObject>, DataObject> source) {
DataModificationTransaction target = biDataService.beginTransaction();
for (Entry<InstanceIdentifier<? extends DataObject>, DataObject> entry : source.getUpdatedConfigurationData()
.toDataDom(entry);
target.putOperationalData(biEntry.getKey(), biEntry.getValue());
}
- for(InstanceIdentifier<? extends DataObject> entry : source.getRemovedConfigurationData()) {
+ for (InstanceIdentifier<? extends DataObject> entry : source.getRemovedConfigurationData()) {
org.opendaylight.yangtools.yang.data.api.InstanceIdentifier biEntry = mappingService.toDataDom(entry);
target.removeConfigurationData(biEntry);
}
- for(InstanceIdentifier<? extends DataObject> entry : source.getRemovedOperationalData()) {
+ for (InstanceIdentifier<? extends DataObject> entry : source.getRemovedOperationalData()) {
org.opendaylight.yangtools.yang.data.api.InstanceIdentifier biEntry = mappingService.toDataDom(entry);
target.removeOperationalData(biEntry);
}
return target;
}
- private class WrappedTransaction implements
+ private org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction createDomToBindingTransaction(
+ DataModification<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> source) {
+ org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction target = baDataService
+ .beginTransaction();
+ for (Entry<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> entry : source
+ .getUpdatedConfigurationData().entrySet()) {
+ InstanceIdentifier<?> baKey = mappingService.fromDataDom(entry.getKey());
+ DataObject baData = mappingService.dataObjectFromDataDom(baKey, entry.getValue());
+ target.putConfigurationData(baKey, baData);
+ }
+ for (Entry<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> entry : source
+ .getUpdatedOperationalData().entrySet()) {
+ InstanceIdentifier<?> baKey = mappingService.fromDataDom(entry.getKey());
+ DataObject baData = mappingService.dataObjectFromDataDom(baKey, entry.getValue());
+ target.putOperationalData(baKey, baData);
+ }
+ for (org.opendaylight.yangtools.yang.data.api.InstanceIdentifier entry : source.getRemovedConfigurationData()) {
+ InstanceIdentifier<?> baEntry = mappingService.fromDataDom(entry);
+ target.removeConfigurationData(baEntry);
+ }
+ for (org.opendaylight.yangtools.yang.data.api.InstanceIdentifier entry : source.getRemovedOperationalData()) {
+ InstanceIdentifier<?> baEntry = mappingService.fromDataDom(entry);
+ target.removeOperationalData(baEntry);
+ }
+ return target;
+ }
+
+ public org.opendaylight.controller.sal.core.api.data.DataProviderService getBiDataService() {
+ return biDataService;
+ }
+
+ public void setBiDataService(org.opendaylight.controller.sal.core.api.data.DataProviderService biDataService) {
+ this.biDataService = biDataService;
+ }
+
+ public DataProviderService getBaDataService() {
+ return baDataService;
+ }
+
+ public void setBaDataService(DataProviderService baDataService) {
+ this.baDataService = baDataService;
+ }
+
+ public void start() {
+ baDataService.registerDataReader(ROOT, this);
+ baCommitHandlerRegistration = baDataService.registerCommitHandler(ROOT, bindingToDomCommitHandler);
+ biCommitHandlerRegistration = biDataService.registerCommitHandler(ROOT_BI, domToBindingCommitHandler);
+ baDataService.registerCommitHandlerListener(domToBindingCommitHandler);
+ }
+
+ public void setMappingService(BindingIndependentMappingService mappingService) {
+ this.mappingService = mappingService;
+ }
+
+ @Override
+ public Collection<ProviderFunctionality> getProviderFunctionality() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public void onSessionInitiated(ProviderSession session) {
+ setBiDataService(session.getService(org.opendaylight.controller.sal.core.api.data.DataProviderService.class));
+ start();
+ }
+
+ private class DomToBindingTransaction implements
+ DataCommitTransaction<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> {
+
+ private final org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction backing;
+ private final DataModification<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> modification;
+
+ public DomToBindingTransaction(
+ org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction backing,
+ DataModification<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> modification) {
+ super();
+ this.backing = backing;
+ this.modification = modification;
+ bindingOpenedTransactions.put(backing.getIdentifier(), this);
+ }
+
+ @Override
+ public DataModification<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> getModification() {
+ return modification;
+ }
+
+ @Override
+ public RpcResult<Void> rollback() throws IllegalStateException {
+ // backing.cancel();
+ return Rpcs.<Void> getRpcResult(true, null, Collections.<RpcError> emptySet());
+ }
+
+ @Override
+ public RpcResult<Void> finish() throws IllegalStateException {
+ Future<RpcResult<TransactionStatus>> result = backing.commit();
+ try {
+ RpcResult<TransactionStatus> baResult = result.get();
+ return Rpcs.<Void> getRpcResult(baResult.isSuccessful(), null, baResult.getErrors());
+ } catch (InterruptedException e) {
+ throw new IllegalStateException("", e);
+ } catch (ExecutionException e) {
+ throw new IllegalStateException("", e);
+ }
+ }
+ }
+
+ private class BindingToDomTransaction implements
DataCommitTransaction<InstanceIdentifier<? extends DataObject>, DataObject> {
private DataModificationTransaction backing;
private DataModification<InstanceIdentifier<? extends DataObject>, DataObject> modification;
- public WrappedTransaction(DataModificationTransaction backing,
+ public BindingToDomTransaction(DataModificationTransaction backing,
DataModification<InstanceIdentifier<? extends DataObject>, DataObject> modification) {
this.backing = backing;
this.modification = modification;
+ domOpenedTransactions.put(backing.getIdentifier(), this);
}
@Override
public RpcResult<Void> finish() throws IllegalStateException {
Future<RpcResult<TransactionStatus>> result = backing.commit();
try {
- RpcResult<TransactionStatus> biresult = result.get();
+ RpcResult<TransactionStatus> biResult = result.get();
+ return Rpcs.<Void> getRpcResult(biResult.isSuccessful(), null, biResult.getErrors());
} catch (InterruptedException e) {
throw new IllegalStateException("", e);
} catch (ExecutionException e) {
throw new IllegalStateException("", e);
+ } finally {
+ domOpenedTransactions.remove(backing.getIdentifier());
}
- return Rpcs.<Void> getRpcResult(true, null, Collections.<RpcError> emptySet());
}
@Override
public RpcResult<Void> rollback() throws IllegalStateException {
- // backing.cancel();
+ domOpenedTransactions.remove(backing.getIdentifier());
return Rpcs.<Void> getRpcResult(true, null, Collections.<RpcError> emptySet());
}
-
}
- public DataBrokerService getBiDataService() {
- return biDataService;
- }
+ private class BindingToDomCommitHandler implements
+ DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject> {
- public void setBiDataService(DataBrokerService biDataService) {
- this.biDataService = biDataService;
- }
+ @Override
+ public org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction<InstanceIdentifier<? extends DataObject>, DataObject> requestCommit(
+ DataModification<InstanceIdentifier<? extends DataObject>, DataObject> bindingTransaction) {
- public DataProviderService getBaDataService() {
- return baDataService;
- }
+ /**
+ * Transaction was created as DOM transaction, in that case we do
+ * not need to forward it back.
+ */
+ if (bindingOpenedTransactions.containsKey(bindingTransaction.getIdentifier())) {
- public void setBaDataService(DataProviderService baDataService) {
- this.baDataService = baDataService;
+ return CommitHandlersTransactions.allwaysSuccessfulTransaction(bindingTransaction);
+ }
+ DataModificationTransaction domTransaction = createBindingToDomTransaction(bindingTransaction);
+ BindingToDomTransaction wrapped = new BindingToDomTransaction(domTransaction, bindingTransaction);
+ LOG.info("Forwarding Binding Transaction: {} as DOM Transaction: {} .", bindingTransaction.getIdentifier(),
+ domTransaction.getIdentifier());
+ return wrapped;
+ }
}
- public void start() {
- baDataService.registerDataReader(ROOT, this);
- baDataService.registerCommitHandler(ROOT, this);
- }
+ private class DomToBindingCommitHandler implements //
+ RegistrationListener<DataCommitHandlerRegistration<InstanceIdentifier<?>, DataObject>>, //
+ DataCommitHandler<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> {
- public void setMappingService(BindingIndependentMappingService mappingService) {
- this.mappingService = mappingService;
- }
+ @Override
+ public void onRegister(DataCommitHandlerRegistration<InstanceIdentifier<?>, DataObject> registration) {
+
+ org.opendaylight.yangtools.yang.data.api.InstanceIdentifier domPath = mappingService.toDataDom(registration.getPath());
+ // FIXME: do registration based on only active commit handlers.
+
+ }
+ @Override
+ public void onUnregister(DataCommitHandlerRegistration<InstanceIdentifier<?>, DataObject> registration) {
+ // NOOP for now
+ // FIXME: do registration based on only active commit handlers.
+ }
+
+ public org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> requestCommit(
+ DataModification<org.opendaylight.yangtools.yang.data.api.InstanceIdentifier, CompositeNode> domTransaction) {
+ Object identifier = domTransaction.getIdentifier();
+
+ /**
+ * We checks if the transcation was originated in this mapper. If it
+ * was originated in this mapper we are returing allways success
+ * commit hanlder to prevent creating loop in two-phase commit and
+ * duplicating data.
+ */
+ if (domOpenedTransactions.containsKey(identifier)) {
+ return CommitHandlersTransactions.allwaysSuccessfulTransaction(domTransaction);
+ }
+
+ org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction baTransaction = createDomToBindingTransaction(domTransaction);
+ DomToBindingTransaction forwardedTransaction = new DomToBindingTransaction(baTransaction, domTransaction);
+ LOG.info("Forwarding DOM Transaction: {} as Binding Transaction: {}.", domTransaction.getIdentifier(),
+ baTransaction.getIdentifier());
+ return forwardedTransaction;
+ }
+ }
}
org.opendaylight.yangtools.yang.data.api.InstanceIdentifier toDataDom(InstanceIdentifier<? extends DataObject> path);
DataObject dataObjectFromDataDom(InstanceIdentifier<? extends DataObject> path, CompositeNode result);
-
-
+
+ InstanceIdentifier<?> fromDataDom(org.opendaylight.yangtools.yang.data.api.InstanceIdentifier entry);
+
}
--- /dev/null
+package org.opendaylight.controller.sal.binding.impl.connect.dom;
+
+import java.util.Collections;
+
+import org.opendaylight.controller.md.sal.common.api.data.DataModification;
+import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction;
+import org.opendaylight.controller.sal.common.util.Rpcs;
+import org.opendaylight.yangtools.concepts.Path;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+public class CommitHandlersTransactions {
+
+ private static class AllwaysSuccessfulTransaction<P,D> implements DataCommitTransaction<P, D> {
+
+ private final DataModification<P, D> modification;
+
+ public AllwaysSuccessfulTransaction(DataModification<P, D> modification) {
+ this.modification = modification;
+ }
+ @Override
+ public RpcResult<Void> rollback() throws IllegalStateException {
+ return Rpcs.<Void>getRpcResult(true, null, Collections.<RpcError>emptyList());
+ }
+ @Override
+ public RpcResult<Void> finish() throws IllegalStateException {
+ return Rpcs.<Void>getRpcResult(true, null, Collections.<RpcError>emptyList());
+ }
+
+ @Override
+ public DataModification<P, D> getModification() {
+ return modification;
+ }
+ }
+
+
+ public static final <P extends Path<P>,D> AllwaysSuccessfulTransaction<P, D> allwaysSuccessfulTransaction(DataModification<P, D> modification) {
+ return new AllwaysSuccessfulTransaction<>(modification);
+ }
+}
+++ /dev/null
-package org.opendaylight.controller.sal.binding.impl.connect.dom;
-
-import java.util.Collection;
-import java.util.Collections;
-
-import javassist.ClassPool;
-
-import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
-import org.opendaylight.controller.sal.binding.dom.serializer.impl.RuntimeGeneratedMappingServiceImpl;
-import org.opendaylight.controller.sal.binding.dom.serializer.impl.TransformerGenerator;
-import org.opendaylight.controller.sal.core.api.Broker;
-import org.opendaylight.controller.sal.core.api.Provider;
-import org.opendaylight.controller.sal.core.api.Broker.ProviderSession;
-import org.opendaylight.controller.sal.core.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.core.api.model.SchemaService;
-import org.osgi.framework.BundleContext;
-import org.osgi.framework.ServiceReference;
-import org.osgi.util.tracker.ServiceTracker;
-import org.osgi.util.tracker.ServiceTrackerCustomizer;
-
-public class ConnectorActivator implements Provider, ServiceTrackerCustomizer<Broker, Broker> {
-
- BindingIndependentDataServiceConnector dataConnector;
- BindingIndependentMappingService mappingService;
-
- private final DataProviderService baDataService;
- private BundleContext context;
-
- private ServiceTracker<Broker, Broker> brokerTracker;
-
- public ConnectorActivator(DataProviderService dataService, BundleContext context) {
- baDataService = dataService;
- this.context = context;
- brokerTracker = new ServiceTracker<>(context, Broker.class, this);
- }
-
- @Override
- public Collection<ProviderFunctionality> getProviderFunctionality() {
- return Collections.emptySet();
- }
-
- @Override
- public void onSessionInitiated(ProviderSession session) {
-
- RuntimeGeneratedMappingServiceImpl mappingImpl = new RuntimeGeneratedMappingServiceImpl();
- mappingImpl.setPool(new ClassPool());
- SchemaService schemaService = (session.getService(SchemaService.class));
- ClassPool pool = new ClassPool();
- mappingImpl.setBinding(new TransformerGenerator(pool));
- mappingImpl.start();
- schemaService.registerSchemaServiceListener(mappingImpl);
- mappingService = mappingImpl;
- dataConnector = new BindingIndependentDataServiceConnector();
- dataConnector.setBaDataService(baDataService);
- dataConnector.setBiDataService(session.getService(DataBrokerService.class));
- dataConnector.setMappingService(mappingService);
- dataConnector.start();
- }
-
- @Override
- public Broker addingService(ServiceReference<Broker> reference) {
- Broker br= context.getService(reference);
- br.registerProvider(this, context);
- return br;
- }
-
- @Override
- public void modifiedService(ServiceReference<Broker> reference, Broker service) {
- // NOOP
- }
-
- @Override
- public void removedService(ServiceReference<Broker> reference, Broker service) {
- // NOOP
- }
-
- public void start() {
- brokerTracker.open();
- }
-}
--- /dev/null
+package org.opendaylight.controller.sal.binding.impl.connect.dom;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.opendaylight.controller.md.sal.common.api.data.DataModification;
+import org.opendaylight.yangtools.concepts.Path;
+
+import com.google.common.util.concurrent.JdkFutureAdapters;
+
+public final class DataModificationTracker<P extends Path<P>,D> {
+
+ ConcurrentMap<Object, DataModification<P,D>> trackedTransactions = new ConcurrentHashMap<>();
+
+
+ public void startTrackingModification(DataModification<P,D> modification) {
+ trackedTransactions.putIfAbsent(modification.getIdentifier(), modification);
+
+
+ }
+
+ public boolean containsIdentifier(Object identifier) {
+ return trackedTransactions.containsKey(identifier);
+ }
+}
--- /dev/null
+package org.opendaylight.controller.sal.binding.spi;
+
+import org.opendaylight.yangtools.yang.binding.BaseIdentity;
+import org.opendaylight.yangtools.yang.binding.RpcService;
+
+public interface RpcRoutingContext<C extends BaseIdentity,S extends RpcService> {
+
+ Class<C> getContextType();
+ Class<S> getServiceType();
+}
namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl";
prefix "binding-impl";
- import config { prefix config; revision-date 2013-04-05; }
- import opendaylight-md-sal-binding {prefix sal;}
+ import config { prefix config; revision-date 2013-04-05; }
+ import opendaylight-md-sal-binding {prefix sal;}
+ import opendaylight-md-sal-dom {prefix dom;}
+ import opendaylight-md-sal-common {prefix common;}
description
"Service definition for Binding Aware MD-SAL.";
"Initial revision";
}
- identity binding-broker-impl-singleton {
+ identity binding-dom-mapping-service {
+ base config:service-type;
+ config:java-class "org.opendaylight.controller.sal.binding.impl.connect.dom.BindingIndependentMappingService";
+ }
+
+
+ identity binding-broker-impl {
base config:module-type;
config:provided-service sal:binding-broker-osgi-registry;
- config:java-name-prefix BindingBrokerImplSingleton;
+ config:java-name-prefix BindingBrokerImpl;
+ }
+
+ identity binding-data-broker {
+ base config:module-type;
+ config:provided-service sal:binding-data-broker;
+ config:provided-service sal:binding-data-consumer-broker;
+ config:java-name-prefix DataBrokerImpl;
}
-
- grouping rpc-routing-table {
-
+ identity binding-rpc-broker {
+ base config:module-type;
+ config:provided-service sal:binding-rpc-registry;
+ config:java-name-prefix RpcBrokerImpl;
+ }
+
+ identity binding-notification-broker {
+ base config:module-type;
+ config:provided-service sal:binding-notification-service;
+ config:provided-service sal:binding-notification-subscription-service;
+ config:java-name-prefix NotificationBrokerImpl;
}
+ identity runtime-generated-mapping {
+ base config:module-type;
+ config:provided-service binding-dom-mapping-service;
+ config:java-name-prefix RuntimeMapping;
+ }
- grouping rpc-router {
- leaf module {
- type string;
+ augment "/config:modules/config:module/config:configuration" {
+ case binding-broker-impl {
+ when "/config:modules/config:module/config:type = 'binding-broker-impl'";
+
+ /*
+ container rpc-registry {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity sal:binding-rpc-registry;
+ }
+ }
+ }*/
+
+ container data-broker {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity sal:binding-data-broker;
+ }
+ }
+ }
+
+ container notification-service {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity sal:binding-notification-service;
+ }
+ }
+ }
}
- container routing-tables {
- list routing-table {
- uses rpc-routing-table;
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case binding-data-broker {
+ when "/config:modules/config:module/config:type = 'binding-data-broker'";
+ container dom-broker {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity dom:dom-broker-osgi-registry;
+ }
+ }
}
+ container mapping-service {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity binding-dom-mapping-service;
+ }
+ }
+ }
}
}
+
+ augment "/config:modules/config:module/config:state" {
+ case runtime-generated-mapping {
+ when "/config:modules/config:module/config:type = 'runtime-generated-mapping'";
+ }
+ }
augment "/config:modules/config:module/config:state" {
- case binding-broker-impl-singleton {
- when "/config:modules/config:module/config:type = 'binding-broker-impl-singleton'";
-
- container rpc-routers {
- list rpc-router {
- uses rpc-router;
- }
- }
+ case binding-data-broker {
+ when "/config:modules/config:module/config:type = 'binding-data-broker'";
+ uses common:data-state;
+ }
+ }
+ augment "/config:modules/config:module/config:state" {
+ case binding-rpc-broker {
+ when "/config:modules/config:module/config:type = 'binding-rpc-broker'";
+ uses common:rpc-state;
+ }
+ }
+ augment "/config:modules/config:module/config:state" {
+ case binding-notification-broker {
+ when "/config:modules/config:module/config:type = 'binding-notification-broker'";
+ uses common:notification-state;
}
}
}
\ No newline at end of file
import com.google.common.util.concurrent.MoreExecutors;
public abstract class AbstractDataServiceTest {
- protected DataBrokerService biDataService;
+ protected org.opendaylight.controller.sal.core.api.data.DataProviderService biDataService;
protected DataProviderService baDataService;
/**
mappingService = mappingServiceImpl;
File pathname = new File("target/gen-classes-debug");
//System.out.println("Generated classes are captured in " + pathname.getAbsolutePath());
- mappingServiceImpl.start();
+ mappingServiceImpl.start(null);
//mappingServiceImpl.getBinding().setClassFileCapturePath(pathname);
connectorServiceImpl = new BindingIndependentDataServiceConnector();
package org.opendaylight.controller.sal.binding.test.bugfix;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
+
+
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.OutputActionBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.PopMplsAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.DropAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.DropActionBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.PopMplsActionBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.ActionBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.Flow;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.FlowBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.FlowKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Instructions;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.InstructionsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Match;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.ApplyActionsBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodesBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.types.rev130827.VlanId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.VlanMatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4MatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._4.match.TcpMatchBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.vlan.match.fields.VlanIdBuilder;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import com.google.common.collect.FluentIterable;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
private static final Map<QName, Object> NODE_KEY_BI = Collections.<QName, Object> singletonMap(NODE_ID_QNAME,
NODE_ID);
- private static final InstanceIdentifier<Nodes> NODES_INSTANCE_ID_BA = InstanceIdentifier.builder() //
- .node(Nodes.class) //
- .toInstance();
-
- private static final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier NODES_INSTANCE_ID_BI = //
- org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.builder() //
- .node(Nodes.QNAME) //
- .toInstance();
-
- private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder() //
- .node(Nodes.class) //
+ private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
.child(Node.class, NODE_KEY).toInstance();
private static final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier NODE_INSTANCE_ID_BI = //
.nodeWithKey(Flow.QNAME, FLOW_KEY_BI) //
.toInstance();
private static final InstanceIdentifier<? extends DataObject> FLOW_INSTANCE_ID_BA = //
- InstanceIdentifier.builder() //
- .node(Flows.class) //
- .node(Flow.class, FLOW_KEY) //
+ InstanceIdentifier.builder(Flows.class) //
+ .child(Flow.class, FLOW_KEY) //
.toInstance();
ListenableFuture<Void> task3Future = listenablePool.submit(task3);
+ @SuppressWarnings("unchecked")
ListenableFuture<List<Void>> compositeFuture = Futures.allAsList(task1Future,task2Future,task3Future);
Thread.sleep(500);
flow.setKey(FLOW_KEY);
flow.setMatch(match.build());
flow.setNode(NODE_REF);
-
-
InstructionsBuilder instructions = new InstructionsBuilder();
InstructionBuilder instruction = new InstructionBuilder();
ApplyActionsBuilder applyActions = new ApplyActionsBuilder();
actionList.add(new ActionBuilder().setAction(popMplsAction.build()).build());
applyActions.setAction(actionList );
+
instruction.setInstruction(applyActions.build());
assertNotNull(ret);
assertEquals(TransactionStatus.COMMITED, ret.getResult());
}
+
+ private void createFlow2() throws Exception {
+ DataModificationTransaction modification = baDataService.beginTransaction();
+ long id = 123;
+ FlowKey key = new FlowKey(id, new NodeRef(NODE_INSTANCE_ID_BA));
+ InstanceIdentifier<?> path1;
+ FlowBuilder flow = new FlowBuilder();
+ flow.setKey(key);
+ MatchBuilder match = new MatchBuilder();
+ Ipv4MatchBuilder ipv4Match = new Ipv4MatchBuilder();
+ // ipv4Match.setIpv4Destination(new Ipv4Prefix(cliInput.get(4)));
+ match.setLayer4Match(new TcpMatchBuilder().build());
+ flow.setMatch(match.build());
+ DropAction dropAction = new DropActionBuilder().build();
+ // ActionBuilder action = new ActionBuilder();
+
+ // List<org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev130819.flow.Action> actions = Collections
+ // .singletonList(action.build());
+ // flow.setAction(actions);
+ flow.setPriority(2);
+ System.out.println("Putting the configuration Data................");
+ path1 = InstanceIdentifier.builder(Flows.class).child(Flow.class, key).toInstance();
+ // DataObject cls = (DataObject) modification.readConfigurationData(path1);
+ modification.putConfigurationData(path1, flow.build());
+ modification.commit();
+
+ }
private class CreateFlowTask implements Callable<Void> {
- final Object startSyncObject;
-
public CreateFlowTask(Object startSync) {
- startSyncObject = startSync;
}
@Override
//startSyncObject.wait();
//Thread.sleep(500);
createFlow();
+ createFlow2();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void verifyDataAreStoredProperly() {
- CompositeNode biFlow = biDataService.readConfigurationData(FLOW_INSTANCE_ID_BI);
+ CompositeNode biFlows = biDataService.readConfigurationData(org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.of(Flows.QNAME));
+ assertNotNull(biFlows);
+ CompositeNode biFlow = biFlows.getFirstCompositeByName(Flow.QNAME);
assertNotNull(biFlow);
- CompositeNode biMatch = biFlow.getFirstCompositeByName(QName.create(Flow.QNAME, Match.QNAME.getLocalName()));
- assertNotNull(biMatch);
}
package org.opendaylight.controller.sal.binding.test.bugfix;
-import java.util.Arrays;
import java.util.Collections;
-import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.Flows;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.Flow;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.FlowBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.FlowKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Match;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodesBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.types.rev130827.VlanId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.VlanMatchBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.vlan.match.fields.VlanIdBuilder;
-import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
import static org.junit.Assert.*;
public class DOMCodecBug02Test extends AbstractDataServiceTest {
private static final QName NODE_ID_QNAME = QName.create(Node.QNAME, "id");
- private static final QName FLOW_ID_QNAME = QName.create(Flow.QNAME, "id");
- private static final QName FLOW_NODE_QNAME = QName.create(Flow.QNAME, "node");
-
- private static final String FLOW_ID = "foo";
private static final String NODE_ID = "node:1";
private static final NodeKey NODE_KEY = new NodeKey(new NodeId(NODE_ID));
private static final Map<QName, Object> NODE_KEY_BI = Collections.<QName, Object> singletonMap(NODE_ID_QNAME,
NODE_ID);
- private static final InstanceIdentifier<Nodes> NODES_INSTANCE_ID_BA = InstanceIdentifier.builder() //
- .node(Nodes.class) //
+ private static final InstanceIdentifier<Nodes> NODES_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
.toInstance();
private static final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier NODES_INSTANCE_ID_BI = //
.node(Nodes.QNAME) //
.toInstance();
- private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder() //
- .node(Nodes.class) //
+ private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
.child(Node.class, NODE_KEY).toInstance();
private static final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier NODE_INSTANCE_ID_BI = //
package org.opendaylight.controller.sal.binding.test.bugfix;
-
-import java.util.Arrays;
import java.util.Collections;
-import java.util.List;
import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
+
import org.junit.Test;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.Flows;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.Flow;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.FlowBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.FlowKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Match;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodesBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.types.rev130827.VlanId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.VlanMatchBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.vlan.match.fields.VlanIdBuilder;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
+
import static org.junit.Assert.*;
private static final Map<QName, Object> NODE_KEY_BI = Collections.<QName, Object> singletonMap(NODE_ID_QNAME,
NODE_ID);
- private static final InstanceIdentifier<Nodes> NODES_INSTANCE_ID_BA = InstanceIdentifier.builder() //
- .node(Nodes.class) //
+ private static final InstanceIdentifier<Nodes> NODES_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
.toInstance();
- private static final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier NODES_INSTANCE_ID_BI = //
- org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.builder() //
- .node(Nodes.QNAME) //
- .toInstance();
- private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder() //
- .node(Nodes.class) //
+ private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder(NODES_INSTANCE_ID_BA) //
.child(Node.class, NODE_KEY).toInstance();
private static final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier NODE_INSTANCE_ID_BI = //
.node(Nodes.QNAME) //
.nodeWithKey(Node.QNAME, NODE_KEY_BI) //
.toInstance();
- private static final NodeRef NODE_REF = new NodeRef(NODE_INSTANCE_ID_BA);
private DataChangeEvent<InstanceIdentifier<?>, DataObject> receivedChangeEvent;
verifyNodes(nodes,original);
+
+ testAddingNodeConnector();
+
+
+
+ testNodeRemove();
+
+
+ }
+
+ private void testAddingNodeConnector() throws Exception {
+
+ NodeConnectorId ncId = new NodeConnectorId("openflow:1:bar");
+ NodeConnectorKey nodeKey = new NodeConnectorKey(ncId );
+ InstanceIdentifier<NodeConnector> ncInstanceId = InstanceIdentifier.builder(NODE_INSTANCE_ID_BA).child(NodeConnector.class, nodeKey).toInstance();
+ NodeConnectorBuilder ncBuilder = new NodeConnectorBuilder();
+ ncBuilder.setId(ncId);
+ ncBuilder.setKey(nodeKey);
+ NodeConnector connector = ncBuilder.build();
+ DataModificationTransaction transaction = baDataService.beginTransaction();
+ transaction.putOperationalData(ncInstanceId, connector);
+ RpcResult<TransactionStatus> result = transaction.commit().get();
+ assertEquals(TransactionStatus.COMMITED, result.getResult());
+ Node node = (Node) baDataService.readOperationalData(NODE_INSTANCE_ID_BA);
+ assertNotNull(node);
+ assertNotNull(node.getNodeConnector());
+ assertFalse(node.getNodeConnector().isEmpty());
+ NodeConnector readedNc = node.getNodeConnector().get(0);
+ assertNotNull(readedNc);
+
+
+
+
+ }
+
+ private void testNodeRemove() throws Exception {
+ DataModificationTransaction transaction = baDataService.beginTransaction();
+ transaction.removeOperationalData(NODE_INSTANCE_ID_BA);
+ RpcResult<TransactionStatus> result = transaction.commit().get();
+ assertEquals(TransactionStatus.COMMITED, result.getResult());
+
+ Node node = (Node) baDataService.readOperationalData(NODE_INSTANCE_ID_BA);
+ assertNull(node);
}
private void verifyNodes(Nodes nodes,Node original) {
--- /dev/null
+package org.opendaylight.controller.sal.binding.test.bugfix;
+
+public class RpcRegistrationNullPointer {
+
+
+
+
+}
assertNotNull(result2.getResult());
assertEquals(TransactionStatus.COMMITED, result.getResult());
- Nodes allNodes = (Nodes) baDataService.readConfigurationData(InstanceIdentifier.builder().node(Nodes.class)
+ Nodes allNodes = (Nodes) baDataService.readConfigurationData(InstanceIdentifier.builder(Nodes.class)
.toInstance());
assertNotNull(allNodes);
assertNotNull(allNodes.getNode());
private static NodeRef createNodeRef(String string) {
NodeKey key = new NodeKey(new NodeId(string));
- InstanceIdentifier<Node> path = InstanceIdentifier.builder().node(Nodes.class).node(Node.class, key)
+ InstanceIdentifier<Node> path = InstanceIdentifier.builder(Nodes.class).child(Node.class, key)
.toInstance();
return new NodeRef(path);
}
--- /dev/null
+package org.opendaylight.controller.sal.binding.test.connect.dom;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+
+
+
+
+
+
+
+
+
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Future;
+
+
+
+
+
+
+
+
+
+
+
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
+import org.opendaylight.controller.md.sal.common.api.data.DataModification;
+import org.opendaylight.controller.sal.binding.impl.connect.dom.CommitHandlersTransactions;
+import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
+import org.opendaylight.controller.sal.core.api.data.DataModificationTransaction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.PopMplsActionBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.ActionBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.Flows;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.FlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.FlowKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.InstructionsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.ApplyActionsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.InstructionBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.types.rev130827.VlanId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.VlanMatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.vlan.match.fields.VlanIdBuilder;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+
+import com.google.common.collect.ImmutableMap;
+
+public class ChangeOriginatedInDomBrokerTest extends AbstractDataServiceTest {
+
+ private static final QName NODE_ID_QNAME = QName.create(Node.QNAME, "id");
+ private static final QName FLOW_ID_QNAME = QName.create(Flow.QNAME, "id");
+ private static final QName FLOW_NODE_QNAME = QName.create(Flow.QNAME, "node");
+ private static final long FLOW_ID = 1234;
+ private static final String NODE_ID = "node:1";
+
+ private DataModification<InstanceIdentifier<? extends DataObject>, DataObject> modificationCapture;
+
+
+ private static final NodeKey NODE_KEY = new NodeKey(new NodeId(NODE_ID));
+
+ private static final Map<QName, Object> NODE_KEY_BI = Collections.<QName, Object> singletonMap(NODE_ID_QNAME,
+ NODE_ID);
+
+ private static final InstanceIdentifier<Node> NODE_INSTANCE_ID_BA = InstanceIdentifier.builder(Nodes.class) //
+ .child(Node.class, NODE_KEY).toInstance();
+
+ private static final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier NODE_INSTANCE_ID_BI = //
+ org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.builder() //
+ .node(Nodes.QNAME) //
+ .nodeWithKey(Node.QNAME, NODE_KEY_BI) //
+ .toInstance();
+ private static final NodeRef NODE_REF = new NodeRef(NODE_INSTANCE_ID_BA);
+
+ private static final FlowKey FLOW_KEY = new FlowKey(FLOW_ID, NODE_REF);
+
+ private static final Map<QName, Object> FLOW_KEY_BI = //
+ ImmutableMap.<QName, Object> of(FLOW_ID_QNAME, FLOW_ID, FLOW_NODE_QNAME, NODE_INSTANCE_ID_BI);
+
+ private static final org.opendaylight.yangtools.yang.data.api.InstanceIdentifier FLOW_INSTANCE_ID_BI = //
+ org.opendaylight.yangtools.yang.data.api.InstanceIdentifier.builder() //
+ .node(Flows.QNAME) //
+ .nodeWithKey(Flow.QNAME, FLOW_KEY_BI) //
+ .toInstance();
+
+ private static final InstanceIdentifier<Flows> FLOWS_PATH_BA = //
+ InstanceIdentifier.builder(Flows.class) //
+ .toInstance();
+
+
+ private static final InstanceIdentifier<Flow> FLOW_INSTANCE_ID_BA = //
+ InstanceIdentifier.builder(Flows.class) //
+ .child(Flow.class, FLOW_KEY) //
+ .toInstance();
+
+ @Test
+ public void simpleModifyOperation() throws Exception {
+
+ registerCommitHandler();
+
+ CompositeNode domflow = createXmlFlow();
+ DataModificationTransaction biTransaction = biDataService.beginTransaction();
+ biTransaction.putConfigurationData(FLOW_INSTANCE_ID_BI, domflow);
+ RpcResult<TransactionStatus> biResult = biTransaction.commit().get();
+
+ assertNotNull(modificationCapture);
+ Flow flow = (Flow) modificationCapture.getCreatedConfigurationData().get(FLOW_INSTANCE_ID_BA);
+ assertNotNull(flow);
+ assertNotNull(flow.getMatch());
+ assertEquals(TransactionStatus.COMMITED, biResult.getResult());
+
+ }
+
+
+
+ private void registerCommitHandler() {
+ DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject> flowTestCommitHandler = new DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject>() {
+
+
+ @Override
+ public org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction<InstanceIdentifier<? extends DataObject>, DataObject> requestCommit(
+ DataModification<InstanceIdentifier<? extends DataObject>, DataObject> modification) {
+ modificationCapture = modification;
+ return CommitHandlersTransactions.allwaysSuccessfulTransaction(modification);
+ }
+
+
+ };
+ Registration<DataCommitHandler<InstanceIdentifier<? extends DataObject>, DataObject>> registration = baDataService.registerCommitHandler(FLOWS_PATH_BA, flowTestCommitHandler);
+ assertNotNull(registration);
+ }
+
+ private CompositeNode createXmlFlow() {
+
+ FlowBuilder flow = new FlowBuilder();
+ MatchBuilder match = new MatchBuilder();
+ VlanMatchBuilder vlanBuilder = new VlanMatchBuilder();
+ VlanIdBuilder vlanIdBuilder = new VlanIdBuilder();
+ VlanId vlanId = new VlanId(10);
+ vlanBuilder.setVlanId(vlanIdBuilder.setVlanId(vlanId).build());
+ match.setVlanMatch(vlanBuilder.build());
+
+ flow.setKey(FLOW_KEY);
+ flow.setMatch(match.build());
+ flow.setNode(NODE_REF);
+ InstructionsBuilder instructions = new InstructionsBuilder();
+ InstructionBuilder instruction = new InstructionBuilder();
+ ApplyActionsBuilder applyActions = new ApplyActionsBuilder();
+ List<Action> actionList = new ArrayList<>();
+ PopMplsActionBuilder popMplsAction = new PopMplsActionBuilder();
+ popMplsAction.setEthernetType(34);
+ actionList.add(new ActionBuilder().setAction(popMplsAction.build()).build());
+
+ applyActions.setAction(actionList );
+
+
+
+ instruction.setInstruction(applyActions.build());
+
+
+ List<Instruction> instructionList = Collections.<Instruction>singletonList(instruction.build());
+ instructions.setInstruction(instructionList );
+
+ flow.setInstructions(instructions.build());
+
+ CompositeNode domFlow = mappingService.toDataDom(flow.build());
+ return domFlow;
+ }
+}
config:java-class "org.opendaylight.controller.sal.binding.api.data.DataProviderService";
}
+ identity binding-data-consumer-broker {
+ base "config:service-type";
+ config:java-class "org.opendaylight.controller.sal.binding.api.data.DataBrokerService";
+ }
+
identity binding-rpc-registry {
base "config:service-type";
config:java-class "org.opendaylight.controller.sal.binding.api.RpcProviderRegistry";
config:java-class "org.opendaylight.controller.sal.binding.api.NotificationProviderService";
}
+ identity binding-notification-subscription-service {
+ base "config:service-type";
+ config:java-class "org.opendaylight.controller.sal.binding.api.NotificationService";
+ }
+
}
\ No newline at end of file
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <artifactId>sal-parent</artifactId>
- <groupId>org.opendaylight.controller</groupId>
- <version>1.0-SNAPSHOT</version>
- </parent>
- <artifactId>sal-binding-it</artifactId>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
- </scm>
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <artifactId>sal-parent</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <version>1.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>sal-binding-it</artifactId>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+ </scm>
- <properties>
- <exam.version>3.0.0</exam.version>
- <url.version>1.5.0</url.version>
- <!-- Sonar jacoco plugin to get integration test coverage info -->
- <sonar.jacoco.reportPath>../sal-binding-broker/target/jacoco.exec</sonar.jacoco.reportPath>
- <sonar.jacoco.itReportPath>../sal-binding-broker/target/jacoco-it.exec</sonar.jacoco.itReportPath>
- </properties>
+ <properties>
+ <exam.version>3.0.0</exam.version>
+ <url.version>1.5.0</url.version>
+ <!-- Sonar jacoco plugin to get integration test coverage info -->
+ <sonar.jacoco.reportPath>../sal-binding-broker/target/jacoco.exec</sonar.jacoco.reportPath>
+ <sonar.jacoco.itReportPath>../sal-binding-broker/target/jacoco-it.exec</sonar.jacoco.itReportPath>
+ <netconf.version>0.2.3-SNAPSHOT</netconf.version>
+ <config.version>0.2.3-SNAPSHOT</config.version>
+ </properties>
- <build>
- <plugins>
- <plugin>
- <groupId>org.ops4j.pax.exam</groupId>
- <artifactId>maven-paxexam-plugin</artifactId>
- <version>1.2.4</version>
- <executions>
- <execution>
- <id>generate-config</id>
- <goals>
- <goal>generate-depends-file</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
- <plugin>
- <groupId>org.jacoco</groupId>
- <artifactId>jacoco-maven-plugin</artifactId>
- <configuration>
- <includes>org.opendaylight.controller.*</includes>
- </configuration>
- <executions>
- <execution>
- <id>pre-test</id>
- <goals>
- <goal>prepare-agent</goal>
- </goals>
- </execution>
- <execution>
- <id>post-test</id>
- <phase>test</phase>
- <goals>
- <goal>report</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
- </plugins>
- <pluginManagement>
- <plugins>
- <!--This plugin's configuration is used to store Eclipse m2e settings
- only. It has no influence on the Maven build itself. -->
- <plugin>
- <groupId>org.eclipse.m2e</groupId>
- <artifactId>lifecycle-mapping</artifactId>
- <version>1.0.0</version>
- <configuration>
- <lifecycleMappingMetadata>
- <pluginExecutions>
- <pluginExecution>
- <pluginExecutionFilter>
- <groupId>
- org.ops4j.pax.exam
- </groupId>
- <artifactId>
- maven-paxexam-plugin
- </artifactId>
- <versionRange>
- [1.2.4,)
- </versionRange>
- <goals>
- <goal>
- generate-depends-file
- </goal>
- </goals>
- </pluginExecutionFilter>
- <action>
- <ignore></ignore>
- </action>
- </pluginExecution>
- </pluginExecutions>
- </lifecycleMappingMetadata>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.jacoco</groupId>
- <artifactId>jacoco-maven-plugin</artifactId>
- <version>${jacoco.version}</version>
- <configuration>
- <destFile>../sal-binding-broker/target/jacoco-it.exec</destFile>
- <includes>org.opendaylight.controller.*</includes>
- </configuration>
- <executions>
- <execution>
- <id>pre-test</id>
- <goals>
- <goal>prepare-agent</goal>
- </goals>
- </execution>
- <execution>
- <id>post-test</id>
- <configuration>
- <skip>true</skip>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </pluginManagement>
- </build>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.ops4j.pax.exam</groupId>
+ <artifactId>maven-paxexam-plugin</artifactId>
+ <version>1.2.4</version>
+ <executions>
+ <execution>
+ <id>generate-config</id>
+ <goals>
+ <goal>generate-depends-file</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.jacoco</groupId>
+ <artifactId>jacoco-maven-plugin</artifactId>
+ <configuration>
+ <includes>org.opendaylight.controller.*</includes>
+ </configuration>
+ <executions>
+ <execution>
+ <id>pre-test</id>
+ <goals>
+ <goal>prepare-agent</goal>
+ </goals>
+ </execution>
+ <execution>
+ <id>post-test</id>
+ <phase>test</phase>
+ <goals>
+ <goal>report</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ <pluginManagement>
+ <plugins>
+ <!--This plugin's configuration is used to store Eclipse
+ m2e settings only. It has no influence on the Maven build itself. -->
+ <plugin>
+ <groupId>org.eclipse.m2e</groupId>
+ <artifactId>lifecycle-mapping</artifactId>
+ <version>1.0.0</version>
+ <configuration>
+ <lifecycleMappingMetadata>
+ <pluginExecutions>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>
+ org.ops4j.pax.exam
+ </groupId>
+ <artifactId>
+ maven-paxexam-plugin
+ </artifactId>
+ <versionRange>
+ [1.2.4,)
+ </versionRange>
+ <goals>
+ <goal>
+ generate-depends-file
+ </goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <ignore></ignore>
+ </action>
+ </pluginExecution>
+ </pluginExecutions>
+ </lifecycleMappingMetadata>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.jacoco</groupId>
+ <artifactId>jacoco-maven-plugin</artifactId>
+ <version>${jacoco.version}</version>
+ <configuration>
+ <destFile>../sal-binding-broker/target/jacoco-it.exec</destFile>
+ <includes>org.opendaylight.controller.*</includes>
+ </configuration>
+ <executions>
+ <execution>
+ <id>pre-test</id>
+ <goals>
+ <goal>prepare-agent</goal>
+ </goals>
+ </execution>
+ <execution>
+ <id>post-test</id>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ </build>
- <dependencies>
- <dependency>
- <groupId>org.opendaylight.yangtools.thirdparty</groupId>
- <artifactId>xtend-lib-osgi</artifactId>
- <version>2.4.3</version>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-binding-broker-impl</artifactId>
- <version>1.0-SNAPSHOT</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.ops4j.pax.exam</groupId>
- <artifactId>pax-exam-container-native</artifactId>
- <version>${exam.version}</version>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.ops4j.pax.exam</groupId>
- <artifactId>pax-exam-junit4</artifactId>
- <version>${exam.version}</version>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.ops4j.pax.exam</groupId>
- <artifactId>pax-exam</artifactId>
- <version>${exam.version}</version>
- <!-- Compile scope here is intentional, it is used
- in TestHelper class which could be downloaded
- via nexus and reused in other integration tests.
- -->
- <scope>compile</scope>
- </dependency>
- <dependency>
- <groupId>org.ops4j.pax.exam</groupId>
- <artifactId>pax-exam-link-mvn</artifactId>
- <version>${exam.version}</version>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.eclipse.osgi</artifactId>
- <version>3.8.1.v20120830-144521</version>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>log4j-over-slf4j</artifactId>
- <version>1.7.2</version>
- </dependency>
- <dependency>
- <groupId>ch.qos.logback</groupId>
- <artifactId>logback-core</artifactId>
- <version>1.0.9</version>
- </dependency>
- <dependency>
- <groupId>ch.qos.logback</groupId>
- <artifactId>logback-classic</artifactId>
- <version>1.0.9</version>
- </dependency>
- <dependency>
- <groupId>org.mockito</groupId>
- <artifactId>mockito-all</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-flow-service</artifactId>
- <version>1.0-SNAPSHOT</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-manager</artifactId>
- <version>0.2.3-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-flow-management</artifactId>
- <version>1.0-SNAPSHOT</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.thirdparty</groupId>
+ <artifactId>xtend-lib-osgi</artifactId>
+ <version>2.4.3</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-broker-impl</artifactId>
+ <version>1.0-SNAPSHOT</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.ops4j.pax.exam</groupId>
+ <artifactId>pax-exam-container-native</artifactId>
+ <version>${exam.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.ops4j.pax.exam</groupId>
+ <artifactId>pax-exam-junit4</artifactId>
+ <version>${exam.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-netconf-connector</artifactId>
+ <version>${netconf.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-store-impl</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>logback-config</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-persister-impl</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-persister-file-adapter</artifactId>
+ <version>${config.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-impl</artifactId>
+ <version>${netconf.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-client</artifactId>
+ <version>${netconf.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.ops4j.pax.exam</groupId>
+ <artifactId>pax-exam</artifactId>
+ <version>${exam.version}</version>
+ <!-- Compile scope here is intentional, it is used in TestHelper
+ class which could be downloaded via nexus and reused in other integration
+ tests. -->
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.ops4j.pax.exam</groupId>
+ <artifactId>pax-exam-link-mvn</artifactId>
+ <version>${exam.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>equinoxSDK381</groupId>
+ <artifactId>org.eclipse.osgi</artifactId>
+ <version>3.8.1.v20120830-144521</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>log4j-over-slf4j</artifactId>
+ <version>1.7.2</version>
+ </dependency>
+ <dependency>
+ <groupId>ch.qos.logback</groupId>
+ <artifactId>logback-core</artifactId>
+ <version>1.0.9</version>
+ </dependency>
+ <dependency>
+ <groupId>ch.qos.logback</groupId>
+ <artifactId>logback-classic</artifactId>
+ <version>1.0.9</version>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-flow-service</artifactId>
+ <version>1.0-SNAPSHOT</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-manager</artifactId>
+ <version>0.2.3-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-flow-management</artifactId>
+ <version>1.0-SNAPSHOT</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
<groupId>org.opendaylight.yangtools.thirdparty</groupId>
<artifactId>antlr4-runtime-osgi-nohead</artifactId>
<version>4.0</version>
- </dependency>
- </dependencies>
+ </dependency>
+ </dependencies>
</project>
import static org.ops4j.pax.exam.CoreOptions.junitBundles;
import static org.ops4j.pax.exam.CoreOptions.mavenBundle;
import static org.ops4j.pax.exam.CoreOptions.repository;
+import static org.ops4j.pax.exam.CoreOptions.repositories;
+import static org.ops4j.pax.exam.CoreOptions.systemProperty;
import org.ops4j.pax.exam.Option;
import org.ops4j.pax.exam.options.DefaultCompositeOption;
+import org.ops4j.pax.exam.util.PathUtils;
public class TestHelper {
public static final String CONTROLLER_MODELS = "org.opendaylight.controller.model";
public static final String YANGTOOLS_MODELS = "org.opendaylight.yangtools.model";
+ private static final String OPENDAYLIGHT_SNAPSHOT = "http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/";
+ private static final String OPENDAYLIGHT_RELEASE = "http://nexus.opendaylight.org/content/repositories/opendaylight.release/";
public static Option mdSalCoreBundles() {
return new DefaultCompositeOption( //
- mavenBundle(YANGTOOLS, "concepts").versionAsInProject(), //
- mavenBundle(YANGTOOLS, "yang-binding").versionAsInProject(), //
- mavenBundle(YANGTOOLS, "yang-common").versionAsInProject(), //
- mavenBundle(CONTROLLER, "sal-common").versionAsInProject(), //
- mavenBundle(CONTROLLER, "sal-common-api").versionAsInProject(), //
- mavenBundle(CONTROLLER, "sal-common-impl").versionAsInProject(), //
-
- mavenBundle("org.apache.commons", "commons-lang3").versionAsInProject(),
- mavenBundle("com.google.guava", "guava").versionAsInProject(), //
+ mavenBundle(YANGTOOLS, "concepts").versionAsInProject(), // //
+ mavenBundle(YANGTOOLS, "yang-binding").versionAsInProject(), // //
+ mavenBundle(YANGTOOLS, "yang-common").versionAsInProject(), // //
+ mavenBundle(CONTROLLER, "sal-common").versionAsInProject(), // //
+ mavenBundle(CONTROLLER, "sal-common-api").versionAsInProject(), // //
+ mavenBundle(CONTROLLER, "sal-common-impl").versionAsInProject(), // //
+
+ mavenBundle("org.apache.commons", "commons-lang3").versionAsInProject(), //
+ mavenBundle("com.google.guava", "guava").versionAsInProject(), // //
mavenBundle(YANGTOOLS + ".thirdparty", "xtend-lib-osgi").versionAsInProject() //
);
}
public static Option configMinumumBundles() {
return new DefaultCompositeOption(
- mavenBundle(CONTROLLER, "config-api").versionAsInProject(), //
- mavenBundle(CONTROLLER, "config-manager").versionAsInProject(), //
- mavenBundle("commons-io", "commons-io").versionAsInProject()
- );
+ mavenBundle("org.opendaylight.bgpcep", "framework").versionAsInProject(), //
+ mavenBundle("org.opendaylight.bgpcep", "util").versionAsInProject(), //
+ mavenBundle("commons-codec", "commons-codec").versionAsInProject(),
+
+ mavenBundle(CONTROLLER, "config-api").versionAsInProject(), // //
+ mavenBundle(CONTROLLER, "config-manager").versionAsInProject(), // //
+ mavenBundle("commons-io", "commons-io").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "config-api").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "config-manager").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "config-util").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "yang-jmx-generator").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "yang-store-api").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "yang-store-impl").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "logback-config").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "config-persister-api").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "netconf-api").versionAsInProject(), //
+
+ mavenBundle(CONTROLLER, "netconf-client").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "netconf-util").versionAsInProject(), //
+ mavenBundle(CONTROLLER + ".thirdparty", "ganymed", "1.0-SNAPSHOT"), //
+ mavenBundle(CONTROLLER, "netconf-mapping-api").versionAsInProject(), //
+
+ mavenBundle(CONTROLLER, "config-persister-impl").versionAsInProject(), //
+
+ mavenBundle("io.netty", "netty-handler").versionAsInProject(), //
+ mavenBundle("io.netty", "netty-codec").versionAsInProject(), //
+ mavenBundle("io.netty", "netty-buffer").versionAsInProject(), //
+ mavenBundle("io.netty", "netty-transport").versionAsInProject(), //
+ mavenBundle("io.netty", "netty-common").versionAsInProject(), //
+
+ mavenBundle("org.opendaylight.controller.thirdparty", "exificient", "0.9.2-SNAPSHOT"), //
+
+ mavenBundle("org.apache.servicemix.bundles", "org.apache.servicemix.bundles.xerces", "2.11.0_1"),
+ mavenBundle("org.eclipse.birt.runtime.3_7_1", "org.apache.xml.resolver", "1.2.0"),
+
+ mavenBundle(CONTROLLER, "config-netconf-connector").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "netconf-impl").versionAsInProject(), //
+
+ mavenBundle(CONTROLLER, "config-persister-file-adapter").versionAsInProject().noStart());
+
}
-
+
public static Option bindingAwareSalBundles() {
return new DefaultCompositeOption( //
- mavenBundle(CONTROLLER, "sal-binding-api").versionAsInProject(), //
- mavenBundle(CONTROLLER, "sal-binding-config").versionAsInProject(),
- mavenBundle(CONTROLLER, "sal-binding-broker-impl").versionAsInProject(), //
- mavenBundle("org.javassist", "javassist").versionAsInProject(), //
- mavenBundle(CONTROLLER, "sal-common-util").versionAsInProject(), //
-
- mavenBundle(YANGTOOLS, "yang-data-api").versionAsInProject(), //
- mavenBundle(YANGTOOLS, "yang-data-impl").versionAsInProject(), //
- mavenBundle(YANGTOOLS, "yang-model-api").versionAsInProject(), //
- mavenBundle(YANGTOOLS, "yang-model-util").versionAsInProject(), //
- mavenBundle(YANGTOOLS, "yang-parser-api").versionAsInProject(),
- mavenBundle(YANGTOOLS, "yang-parser-impl").versionAsInProject(),
-
-
- mavenBundle(YANGTOOLS, "binding-generator-spi").versionAsInProject(), //
- mavenBundle(YANGTOOLS, "binding-model-api").versionAsInProject(), //
- mavenBundle(YANGTOOLS, "binding-generator-util").versionAsInProject(),
+ mavenBundle(CONTROLLER, "sal-binding-api").versionAsInProject(), // //
+ mavenBundle(CONTROLLER, "sal-binding-config").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "sal-binding-broker-impl").versionAsInProject(), // //
+ mavenBundle("org.javassist", "javassist").versionAsInProject(), // //
+ mavenBundle(CONTROLLER, "sal-common-util").versionAsInProject(), // //
+
+ mavenBundle(YANGTOOLS, "yang-data-api").versionAsInProject(), // //
+ mavenBundle(YANGTOOLS, "yang-data-impl").versionAsInProject(), // //
+ mavenBundle(YANGTOOLS, "yang-model-api").versionAsInProject(), // //
+ mavenBundle(YANGTOOLS, "yang-model-util").versionAsInProject(), // //
+ mavenBundle(YANGTOOLS, "yang-parser-api").versionAsInProject(), //
mavenBundle(YANGTOOLS, "yang-parser-impl").versionAsInProject(),
- mavenBundle(YANGTOOLS, "binding-type-provider").versionAsInProject(),
- mavenBundle(YANGTOOLS, "binding-generator-api").versionAsInProject(),
- mavenBundle(YANGTOOLS, "binding-generator-spi").versionAsInProject(),
+
+ mavenBundle(YANGTOOLS, "binding-generator-spi").versionAsInProject(), // //
+ mavenBundle(YANGTOOLS, "binding-model-api").versionAsInProject(), // //
+ mavenBundle(YANGTOOLS, "binding-generator-util").versionAsInProject(), //
+ mavenBundle(YANGTOOLS, "yang-parser-impl").versionAsInProject(), //
+ mavenBundle(YANGTOOLS, "binding-type-provider").versionAsInProject(), //
+ mavenBundle(YANGTOOLS, "binding-generator-api").versionAsInProject(), mavenBundle(YANGTOOLS,
+ "binding-generator-spi").versionAsInProject(), //
mavenBundle(YANGTOOLS, "binding-generator-impl").versionAsInProject(),
-
-
+
mavenBundle(CONTROLLER, "sal-core-api").versionAsInProject().update(), //
- mavenBundle(CONTROLLER, "sal-broker-impl").versionAsInProject(), //
+ mavenBundle(CONTROLLER, "sal-broker-impl").versionAsInProject(), // //
mavenBundle(CONTROLLER, "sal-core-spi").versionAsInProject().update(), //
-
- mavenBundle(YANGTOOLS + ".thirdparty", "antlr4-runtime-osgi-nohead").versionAsInProject() //
- );
+
+ mavenBundle(YANGTOOLS + ".thirdparty", "antlr4-runtime-osgi-nohead").versionAsInProject(), // //
+
+ systemProperty("netconf.tcp.address").value("0.0.0.0"), //
+ systemProperty("netconf.tcp.port").value("18383"), //
+ systemProperty("netconf.config.persister.storageAdapterClass").value(
+ "org.opendaylight.controller.config.persist.storage.file.FileStorageAdapter"), //
+ systemProperty("fileStorage").value(PathUtils.getBaseDir() + "/src/test/resources/controller.config"), //
+ systemProperty("numberOfBackups").value("1") //
+ //systemProperty("yangstore.blacklist").value(".*controller.model.*") //
+
+ );
}
public static Option flowCapableModelBundles() {
return new DefaultCompositeOption( //
- mavenBundle(CONTROLLER_MODELS, "model-flow-base").versionAsInProject(), //
- mavenBundle(CONTROLLER_MODELS, "model-flow-service").versionAsInProject(), //
+ mavenBundle(CONTROLLER_MODELS, "model-flow-base").versionAsInProject(), // //
+ mavenBundle(CONTROLLER_MODELS, "model-flow-service").versionAsInProject(), // //
mavenBundle(CONTROLLER_MODELS, "model-inventory").versionAsInProject() //
);
public static Option baseModelBundles() {
return new DefaultCompositeOption( //
- mavenBundle(YANGTOOLS_MODELS, "yang-ext").versionAsInProject(), //
- mavenBundle(YANGTOOLS_MODELS, "ietf-inet-types").versionAsInProject(), //
- mavenBundle(YANGTOOLS_MODELS, "ietf-yang-types").versionAsInProject(), //
- mavenBundle(YANGTOOLS_MODELS, "opendaylight-l2-types").versionAsInProject(), //
+ mavenBundle(YANGTOOLS_MODELS, "yang-ext").versionAsInProject(), // //
+ mavenBundle(YANGTOOLS_MODELS, "ietf-inet-types").versionAsInProject(), // //
+ mavenBundle(YANGTOOLS_MODELS, "ietf-yang-types").versionAsInProject(), // //
+ mavenBundle(YANGTOOLS_MODELS, "opendaylight-l2-types").versionAsInProject(), // //
mavenBundle(CONTROLLER_MODELS, "model-inventory").versionAsInProject());
}
public static Option junitAndMockitoBundles() {
return new DefaultCompositeOption(
- // Repository required to load harmcrest (OSGi-fied version).
- repository("http://repository.springsource.com/maven/bundles/external").id(
- "com.springsource.repository.bundles.external"),
-
- // Mockito
- mavenBundle("org.mockito", "mockito-all", "1.9.5"),
- junitBundles(),
+ // Repository required to load harmcrest (OSGi-fied version).
+ // Mockito
+ mavenBundle("org.mockito", "mockito-all", "1.9.5"), junitBundles(),
/*
* Felix has implicit boot delegation enabled by default. It
import org.ops4j.pax.exam.Option;
import org.ops4j.pax.exam.junit.PaxExam;
import org.ops4j.pax.exam.options.DefaultCompositeOption;
+import org.ops4j.pax.exam.util.Filter;
import org.ops4j.pax.exam.util.PathUtils;
import org.osgi.framework.BundleContext;
public static final String YANGTOOLS_MODELS = "org.opendaylight.yangtools.model";
@Inject
+ @Filter(timeout=60*1000)
BindingAwareBroker broker;
-
+
@Inject
BundleContext bundleContext;
mavenBundle("org.slf4j", "log4j-over-slf4j").versionAsInProject(), //
mavenBundle("ch.qos.logback", "logback-core").versionAsInProject(), //
mavenBundle("ch.qos.logback", "logback-classic").versionAsInProject(), //
+ systemProperty("osgi.bundles.defaultStartLevel").value("4"),
- configMinumumBundles(),
+
mdSalCoreBundles(),
bindingAwareSalBundles(),
-
+ configMinumumBundles(),
// BASE Models
baseModelBundles(), flowCapableModelBundles(), junitAndMockitoBundles());
}
--- /dev/null
+//START OF CONFIG-LAST
+<data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+<modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:schema-service-singleton</type>
+ <name>yang-schema-service</name>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:hash-map-data-store</type>
+ <name>hash-map-data-store</name>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:dom-broker-impl</type>
+ <name>dom-broker</name>
+ <data-store xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-data-store</type>
+ <name>ref_hash-map-data-store</name>
+ </data-store>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-broker-impl</type>
+ <name>binding-broker-impl</name>
+ <notification-service xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-notification-service</type>
+ <name>ref_binding-notification-broker</name>
+ </notification-service>
+ <data-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
+ <name>ref_binding-data-broker</name>
+ </data-broker>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:runtime-generated-mapping</type>
+ <name>runtime-mapping-singleton</name>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-notification-broker</type>
+ <name>binding-notification-broker</name>
+ </module>
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-data-broker</type>
+ <name>binding-data-broker</name>
+ <dom-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
+ <name>ref_dom-broker</name>
+ </dom-broker>
+ <mapping-service xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding:binding-dom-mapping-service</type>
+ <name>ref_runtime-mapping-singleton</name>
+ </mapping-service>
+ </module>
+</modules>
+<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <service>
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
+ <instance>
+ <name>ref_yang-schema-service</name>
+ <provider>/config/modules/module[name='schema-service-singleton']/instance[name='yang-schema-service']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-notification-service</type>
+ <instance>
+ <name>ref_binding-notification-broker</name>
+ <provider>/config/modules/module[name='binding-notification-broker']/instance[name='binding-notification-broker']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-data-store</type>
+ <instance>
+ <name>ref_hash-map-data-store</name>
+ <provider>/config/modules/module[name='hash-map-data-store']/instance[name='hash-map-data-store']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-broker-osgi-registry</type>
+ <instance>
+ <name>ref_binding-broker-impl</name>
+ <provider>/config/modules/module[name='binding-broker-impl']/instance[name='binding-broker-impl']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:binding-impl="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding-impl:binding-dom-mapping-service</type>
+ <instance>
+ <name>ref_runtime-mapping-singleton</name>
+ <provider>/config/modules/module[name='runtime-generated-mapping']/instance[name='runtime-mapping-singleton']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
+ <instance>
+ <name>ref_dom-broker</name>
+ <provider>/config/modules/module[name='dom-broker-impl']/instance[name='dom-broker']</provider>
+ </instance>
+ </service>
+ <service>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
+ <instance>
+ <name>ref_binding-data-broker</name>
+ <provider>/config/modules/module[name='binding-data-broker']/instance[name='binding-data-broker']</provider>
+ </instance>
+ </service>
+</services>
+</data>
+
+
+//END OF SNAPSHOT
+urn:opendaylight:l2:types?module=opendaylight-l2-types&revision=2013-08-27
+urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&revision=2013-10-28
+urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom?module=opendaylight-md-sal-dom&revision=2013-10-28
+urn:opendaylight:params:xml:ns:yang:controller:config?module=config&revision=2013-04-05
+urn:ietf:params:netconf:capability:candidate:1.0
+urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring?module=ietf-netconf-monitoring&revision=2010-10-04
+urn:ietf:params:xml:ns:yang:rpc-context?module=rpc-context&revision=2013-06-17
+urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl?module=opendaylight-sal-binding-broker-impl&revision=2013-10-28
+urn:ietf:params:xml:ns:yang:ietf-inet-types?module=ietf-inet-types&revision=2010-09-24
+urn:ietf:params:netconf:capability:rollback-on-error:1.0
+urn:ietf:params:xml:ns:yang:ietf-yang-types?module=ietf-yang-types&revision=2010-09-24
+urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl?module=opendaylight-sal-dom-broker-impl&revision=2013-10-28
+urn:opendaylight:params:xml:ns:yang:controller:logback:config?module=config-logging&revision=2013-07-16
+urn:opendaylight:yang:extension:yang-ext?module=yang-ext&revision=2013-07-09
+urn:opendaylight:params:xml:ns:yang:controller:md:sal:common?module=opendaylight-md-sal-common&revision=2013-10-28
+//END OF CONFIG
</encoder>
</appender>
- <root level="error">
+
+ <logger name="org.opendaylight.yangtools.yang.parser.util.ModuleDependencySort" level="ERROR"/>
+
+ <root level="info">
<appender-ref ref="STDOUT" />
</root>
</configuration>
--- /dev/null
+package org.opendaylight.controller.md.sal.common.api;
+
+import java.util.EventListener;
+
+import org.opendaylight.yangtools.concepts.Registration;
+
+public interface RegistrationListener<T extends Registration<?>> extends EventListener {
+
+ void onRegister(T registration);
+
+ void onUnregister(T registration);
+}
--- /dev/null
+package org.opendaylight.controller.md.sal.common.api.data;
+
+import org.opendaylight.yangtools.concepts.Path;
+import org.opendaylight.yangtools.concepts.Registration;
+
+public interface DataCommitHandlerRegistration<P extends Path<P>,D> extends Registration<DataCommitHandler<P, D>>{
+
+ P getPath();
+}
*/
package org.opendaylight.controller.md.sal.common.api.data;
-// FIXME: After 0.6 Release of YANGTools refactor to use Path marker interface for arguments.
-// import org.opendaylight.yangtools.concepts.Path;
+import org.opendaylight.controller.md.sal.common.api.RegistrationListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Path;
import org.opendaylight.yangtools.concepts.Registration;
-public interface DataProvisionService<P/* extends Path<P> */, D> {
+public interface DataProvisionService<P extends Path<P> , D> {
public Registration<DataCommitHandler<P, D>> registerCommitHandler(P path, DataCommitHandler<P, D> commitHandler);
+
+ public ListenerRegistration<RegistrationListener<DataCommitHandlerRegistration<P, D>>>
+ registerCommitHandlerListener(RegistrationListener<DataCommitHandlerRegistration<P, D>> commitHandlerListener);
}
if (operationalOriginal.containsKey(path)) {
return true;
}
- D data = reader.readConfigurationData(path);
+ D data = reader.readOperationalData(path);
if (data != null) {
operationalOriginal.putIfAbsent(path, data);
return true;
import com.google.common.collect.FluentIterable;
import java.util.Set
import com.google.common.collect.ImmutableList
+import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandlerRegistration
+import org.opendaylight.controller.md.sal.common.api.RegistrationListener
+import org.opendaylight.yangtools.concepts.util.ListenerRegistry
+import java.util.concurrent.atomic.AtomicLong
abstract class AbstractDataBroker<P extends Path<P>, D, DCL extends DataChangeListener<P, D>> implements DataModificationTransactionFactory<P, D>, //
DataReader<P, D>, //
DataChangePublisher<P, D, DCL>, //
DataProvisionService<P, D> {
+ private static val LOG = LoggerFactory.getLogger(AbstractDataBroker);
+
@Property
var ExecutorService executor;
var AbstractDataReadRouter<P, D> dataReadRouter;
Multimap<P, DataChangeListenerRegistration<P, D, DCL>> listeners = HashMultimap.create();
- Multimap<P, DataCommitHandlerRegistration<P, D>> commitHandlers = HashMultimap.create();
-
+ Multimap<P, DataCommitHandlerRegistrationImpl<P, D>> commitHandlers = HashMultimap.create();
+
+ val ListenerRegistry<RegistrationListener<DataCommitHandlerRegistration<P,D>>> commitHandlerRegistrationListeners = new ListenerRegistry();
public new() {
}
protected def /*Iterator<Entry<Collection<DataChangeListenerRegistration<P,D,DCL>>,D>>*/ affectedCommitHandlers(
HashSet<P> paths) {
- return FluentIterable.from(commitHandlers.asMap.entrySet)
- .filter[key.isAffectedBy(paths)] //
- .transformAndConcat [value] //
- .transform[instance].toList()
+ return FluentIterable.from(commitHandlers.asMap.entrySet).filter[key.isAffectedBy(paths)] //
+ .transformAndConcat[value] //
+ .transform[instance].toList()
}
override final readConfigurationData(P path) {
}
override final registerCommitHandler(P path, DataCommitHandler<P, D> commitHandler) {
- val registration = new DataCommitHandlerRegistration(path, commitHandler, this);
+ val registration = new DataCommitHandlerRegistrationImpl(path, commitHandler, this);
commitHandlers.put(path, registration)
+ LOG.info("Registering Commit Handler {} for path: {}",commitHandler,path);
+ for(listener : commitHandlerRegistrationListeners) {
+ try {
+ listener.instance.onRegister(registration);
+ } catch (Exception e) {
+ LOG.error("Unexpected exception in listener {} during invoking onRegister",listener.instance,e);
+ }
+ }
return registration;
}
return new CompositeObjectRegistration(reader, Arrays.asList(confReg, dataReg));
}
+
+ override registerCommitHandlerListener(RegistrationListener<DataCommitHandlerRegistration<P, D>> commitHandlerListener) {
+ val ret = commitHandlerRegistrationListeners.register(commitHandlerListener);
+
+ return ret;
+ }
+
protected final def removeListener(DataChangeListenerRegistration<P, D, DCL> registration) {
listeners.remove(registration.path, registration);
}
- protected final def removeCommitHandler(DataCommitHandlerRegistration<P, D> registration) {
+ protected final def removeCommitHandler(DataCommitHandlerRegistrationImpl<P, D> registration) {
commitHandlers.remove(registration.path, registration);
+
+ LOG.info("Removing Commit Handler {} for path: {}",registration.instance,registration.path);
+ for(listener : commitHandlerRegistrationListeners) {
+ try {
+ listener.instance.onUnregister(registration);
+ } catch (Exception e) {
+ LOG.error("Unexpected exception in listener {} during invoking onUnregister",listener.instance,e);
+ }
+ }
}
protected final def getActiveCommitHandlers() {
}
@Data
-package class ListenerStateCapture<P extends Path<P>, D,DCL extends DataChangeListener<P, D>> {
+package class ListenerStateCapture<P extends Path<P>, D, DCL extends DataChangeListener<P, D>> {
@Property
P path;
}
-package class DataCommitHandlerRegistration<P extends Path<P>, D> extends AbstractObjectRegistration<DataCommitHandler<P, D>> {
+package class DataCommitHandlerRegistrationImpl<P extends Path<P>, D> //
+extends AbstractObjectRegistration<DataCommitHandler<P, D>> //
+implements DataCommitHandlerRegistration<P, D> {
AbstractDataBroker<P, D, ?> dataBroker;
dataBroker.removeCommitHandler(this);
dataBroker = null;
}
-
}
-package class TwoPhaseCommit<P extends Path<P>, D,DCL extends DataChangeListener<P, D>> implements Callable<RpcResult<TransactionStatus>> {
+package class TwoPhaseCommit<P extends Path<P>, D, DCL extends DataChangeListener<P, D>> implements Callable<RpcResult<TransactionStatus>> {
private static val log = LoggerFactory.getLogger(TwoPhaseCommit);
val listeners = dataBroker.affectedListenersWithInitialState(affectedPaths);
+ val transactionId = transaction.identifier;
+
+ log.info("Transaction: {} Started.",transactionId);
// requesting commits
- val Iterable<DataCommitHandler<P, D>> commitHandlers = dataBroker.affectedCommitHandlers(affectedPaths);
+ val Iterable<DataCommitHandler<P, D>> commitHandlers = dataBroker.affectedCommitHandlers(affectedPaths);
val List<DataCommitTransaction<P, D>> handlerTransactions = new ArrayList();
try {
for (handler : commitHandlers) {
handlerTransactions.add(handler.requestCommit(transaction));
}
} catch (Exception e) {
- log.error("Request Commit failded", e);
+ log.error("Transaction: {} Request Commit failed", transactionId,e);
return rollback(handlerTransactions, e);
}
val List<RpcResult<Void>> results = new ArrayList();
}
listeners.publishDataChangeEvent();
} catch (Exception e) {
- log.error("Finish Commit failed", e);
+ log.error("Transaction: {} Finish Commit failed",transactionId, e);
return rollback(handlerTransactions, e);
}
-
-
+ log.info("Transaction: {} Finished succesfully.",transactionId);
return Rpcs.getRpcResult(true, TransactionStatus.COMMITED, Collections.emptySet());
}
-
- def void publishDataChangeEvent(ImmutableList<ListenerStateCapture<P, D,DCL>> listeners) {
- for(listenerSet : listeners) {
+
+ def void publishDataChangeEvent(ImmutableList<ListenerStateCapture<P, D, DCL>> listeners) {
+ for (listenerSet : listeners) {
val updatedConfiguration = dataBroker.readConfigurationData(listenerSet.path);
val updatedOperational = dataBroker.readOperationalData(listenerSet.path);
-
- val changeEvent = new DataChangeEventImpl(transaction,listenerSet.initialConfigurationState,listenerSet.initialOperationalState,updatedOperational,updatedConfiguration);
- for(listener : listenerSet.listeners) {
+
+ val changeEvent = new DataChangeEventImpl(transaction, listenerSet.initialConfigurationState,
+ listenerSet.initialOperationalState, updatedOperational, updatedConfiguration);
+ for (listener : listenerSet.listeners) {
try {
listener.instance.onDataChanged(changeEvent);
-
+
} catch (Exception e) {
e.printStackTrace();
}
return Rpcs.getRpcResult(false, TransactionStatus.FAILED, Collections.emptySet());
}
}
+
public abstract class AbstractDataTransaction<P extends Path<P>, D> extends AbstractDataModification<P, D> {
@Property
var AbstractDataBroker<P, D, ?> broker;
- protected new(AbstractDataBroker<P, D, ?> dataBroker) {
+ protected new(Object identifier,AbstractDataBroker<P, D, ?> dataBroker) {
super(dataBroker);
- _identifier = new Object();
+ _identifier = identifier;
broker = dataBroker;
status = TransactionStatus.NEW;
</parent>
<artifactId>sal-core-api</artifactId>
<scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
- <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
</scm>
-
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ </plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>0.5.9-SNAPSHOT</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>
+ org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+ </codeGeneratorClass>
+ <outputBaseDir>${project.build.directory}/generated-sources/config</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>
+ urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang
+ </namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>0.2.3-SNAPSHOT</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <version>1.8</version>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>${project.build.directory}/generated-sources/config</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<version>1.0-SNAPSHOT</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-api</artifactId>
+ <version>0.2.3-SNAPSHOT</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-data-api</artifactId>
- <version>0.5.9-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
--- /dev/null
+package org.opendaylight.controller.sal.core.api.data;
+
+import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
+import org.opendaylight.controller.md.sal.common.api.data.DataReader;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
+
+public interface DataStore extends //
+ DataReader<InstanceIdentifier, CompositeNode>,
+ DataCommitHandler<InstanceIdentifier, CompositeNode> {
+
+}
--- /dev/null
+module opendaylight-md-sal-common {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:common";
+ prefix "md-sal-common";
+
+ description
+ "Common definition for MD-SAL.";
+
+ revision "2013-10-28" {
+ description
+ "Initial revision";
+ }
+
+ grouping rpc-routing-table {
+
+ leaf routing-context {
+ type string;
+ }
+ list routes {
+ leaf path {
+ type string;
+ }
+ leaf destination {
+ type string;
+ }
+ }
+
+ }
+
+ grouping rpc-router {
+ leaf module {
+ type string;
+ }
+ container routing-tables {
+ list routing-table {
+ uses rpc-routing-table;
+ }
+ }
+ }
+
+ grouping rpc-state {
+ list rpc-router {
+ uses rpc-router;
+ }
+ }
+
+ grouping notification-state {
+ container notifications {
+ leaf published {
+ type uint32;
+ }
+ }
+ }
+
+ grouping data-state {
+ container transactions {
+ leaf created {
+ type uint32;
+ }
+ leaf successful {
+ type uint32;
+ }
+ leaf failed {
+ type uint32;
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+module opendaylight-md-sal-dom {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom";
+ prefix "md-sal-dom";
+
+ import config { prefix config; revision-date 2013-04-05; }
+
+ description
+ "Service definition for Binding Aware MD-SAL.";
+
+ revision "2013-10-28" {
+ description
+ "Initial revision";
+ }
+
+ identity dom-broker-osgi-registry {
+ base "config:service-type";
+ config:java-class "org.opendaylight.controller.sal.core.api.Broker";
+ }
+
+ identity dom-data-broker {
+ base "config:service-type";
+ config:java-class "org.opendaylight.controller.sal.core.api.data.DataProviderService";
+ }
+
+ identity dom-data-store {
+ base "config:service-type";
+ config:java-class "org.opendaylight.controller.sal.core.api.data.DataStore";
+ }
+
+ identity schema-service {
+ base "config:service-type";
+ config:java-class "org.opendaylight.controller.sal.core.api.model.SchemaService";
+ }
+}
\ No newline at end of file
<build>
<plugins>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <version>0.5.9-SNAPSHOT</version>
+ <executions>
+ <execution>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ <configuration>
+ <codeGenerators>
+ <generator>
+ <codeGeneratorClass>
+ org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+ </codeGeneratorClass>
+ <outputBaseDir>${project.build.directory}/generated-sources/config</outputBaseDir>
+ <additionalConfiguration>
+ <namespaceToPackage1>
+ urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang
+ </namespaceToPackage1>
+ </additionalConfiguration>
+ </generator>
+ </codeGenerators>
+ <inspectDependencies>true</inspectDependencies>
+ </configuration>
+ </execution>
+ </executions>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator-plugin</artifactId>
+ <version>0.2.3-SNAPSHOT</version>
+ </dependency>
+ </dependencies>
+ </plugin>
+
+
<plugin>
<groupId>org.apache.felix</groupId>
<artifactId>maven-bundle-plugin</artifactId>
<configuration>
<instructions>
<Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
- <Bundle-Activator>org.opendaylight.controller.sal.dom.broker.BrokerActivator</Bundle-Activator>
<Private-Package>
- org.opendaylight.controller.sal.dom.broker.*
+ org.opendaylight.controller.sal.dom.broker.*,
+ org.opendaylight.controller.config.yang.md.sal.dom.impl
</Private-Package>
</instructions>
</configuration>
</plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <version>1.8</version>
+ <executions>
+ <execution>
+ <id>add-source</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>add-source</goal>
+ </goals>
+ <configuration>
+ <sources>
+ <source>${project.build.directory}/generated-sources/config</source>
+ </sources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
<plugin>
<groupId>org.eclipse.xtend</groupId>
<artifactId>xtend-maven-plugin</artifactId>
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-dom-broker-impl yang module local name: dom-broker-impl
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 17:04:41 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.dom.impl;
+
+import org.opendaylight.controller.sal.core.api.data.DataStore;
+import org.opendaylight.controller.sal.dom.broker.BrokerConfigActivator;
+import org.opendaylight.controller.sal.dom.broker.BrokerImpl;
+import org.osgi.framework.BundleContext;
+
+/**
+*
+*/
+public final class DomBrokerImplModule extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractDomBrokerImplModule
+{
+
+ private BundleContext bundleContext;
+
+ public DomBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public DomBrokerImplModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, DomBrokerImplModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void validate(){
+ super.validate();
+ // Add custom validation for module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ BrokerImpl broker = new BrokerImpl();
+ BrokerConfigActivator activator = new BrokerConfigActivator();
+ DataStore store = getDataStoreDependency();
+ activator.start(broker, store,getBundleContext());
+ return broker;
+ }
+
+ private BundleContext getBundleContext() {
+ return this.bundleContext;
+ }
+
+ public void setBundleContext(BundleContext bundleContext) {
+ this.bundleContext = bundleContext;
+ }
+}
--- /dev/null
+/**
+ * Generated file
+
+ * Generated from: yang module name: opendaylight-sal-dom-broker-impl yang module local name: dom-broker-impl
+ * Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+ * Generated at: Wed Nov 20 17:04:41 CET 2013
+ *
+ * Do not modify this file unless it is present under src/main directory
+ */
+package org.opendaylight.controller.config.yang.md.sal.dom.impl;
+
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
+import org.opendaylight.controller.config.spi.Module;
+import org.osgi.framework.BundleContext;
+
+/**
+*
+*/
+public class DomBrokerImplModuleFactory extends
+ org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractDomBrokerImplModuleFactory {
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver, BundleContext bundleContext) {
+ DomBrokerImplModule module = (DomBrokerImplModule) super.createModule(instanceName, dependencyResolver, bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver,
+ DynamicMBeanWithInstance old, BundleContext bundleContext) throws Exception {
+ DomBrokerImplModule module = (DomBrokerImplModule) super.createModule(instanceName, dependencyResolver, old, bundleContext);
+ module.setBundleContext(bundleContext);
+ return module;
+ }
+
+}
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-dom-broker-impl yang module local name: hash-map-data-store
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 17:01:31 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.dom.impl;
+
+import org.opendaylight.controller.sal.dom.broker.impl.HashMapDataStore;
+
+/**
+*
+*/
+public final class HashMapDataStoreModule extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractHashMapDataStoreModule
+{
+
+ public HashMapDataStoreModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public HashMapDataStoreModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, HashMapDataStoreModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void validate(){
+ super.validate();
+ // Add custom validation for module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ HashMapDataStore store = new HashMapDataStore();
+ return store;
+ }
+}
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-dom-broker-impl yang module local name: hash-map-data-store
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 17:01:31 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.dom.impl;
+
+/**
+*
+*/
+public class HashMapDataStoreModuleFactory extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractHashMapDataStoreModuleFactory
+{
+
+
+}
--- /dev/null
+/**
+* Generated file
+
+* Generated from: yang module name: opendaylight-sal-dom-broker-impl yang module local name: schema-service-singleton
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Nov 20 17:01:31 CET 2013
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.md.sal.dom.impl;
+
+import org.opendaylight.controller.sal.dom.broker.SchemaServiceImpl;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+import org.osgi.framework.BundleContext;
+
+/**
+*
+*/
+public final class SchemaServiceImplSingletonModule extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractSchemaServiceImplSingletonModule
+{
+
+ BundleContext bundleContext;
+
+ public SchemaServiceImplSingletonModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public SchemaServiceImplSingletonModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, SchemaServiceImplSingletonModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public boolean canReuseInstance(AbstractSchemaServiceImplSingletonModule oldModule) {
+ return true;
+ }
+
+ public BundleContext getBundleContext() {
+ return bundleContext;
+ }
+
+ public void setBundleContext(BundleContext bundleContext) {
+ this.bundleContext = bundleContext;
+ }
+
+ @Override
+ public void validate(){
+ super.validate();
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ SchemaServiceImpl newInstance = new SchemaServiceImpl();
+ newInstance.setContext(getBundleContext());
+ newInstance.setParser(new YangParserImpl());
+ newInstance.start();
+ return newInstance;
+ }
+}
--- /dev/null
+/**
+ * Generated file
+
+ * Generated from: yang module name: opendaylight-sal-dom-broker-impl yang module local name: schema-service-singleton
+ * Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+ * Generated at: Wed Nov 20 17:01:31 CET 2013
+ *
+ * Do not modify this file unless it is present under src/main directory
+ */
+package org.opendaylight.controller.config.yang.md.sal.dom.impl;
+
+import java.util.Collections;
+import java.util.Set;
+
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.DependencyResolverFactory;
+import org.opendaylight.controller.config.api.ModuleIdentifier;
+import org.opendaylight.controller.config.spi.Module;
+import org.osgi.framework.BundleContext;
+
+/**
+*
+*/
+public class SchemaServiceImplSingletonModuleFactory extends
+ org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractSchemaServiceImplSingletonModuleFactory {
+
+ private static final ModuleIdentifier IDENTIFIER = new ModuleIdentifier(NAME, "yang-schema-service");
+ public static SchemaServiceImplSingletonModule SINGLETON;
+
+ @Override
+ public Module createModule(String instanceName, DependencyResolver dependencyResolver, BundleContext bundleContext) {
+ throw new UnsupportedOperationException("Only default instance supported.");
+ }
+
+ @Override
+ public Set<SchemaServiceImplSingletonModule> getDefaultModules(DependencyResolverFactory dependencyResolverFactory,
+ BundleContext bundleContext) {
+ DependencyResolver dependencyResolver = dependencyResolverFactory.createDependencyResolver(IDENTIFIER);
+
+ if (SINGLETON == null) {
+ SINGLETON = new SchemaServiceImplSingletonModule(IDENTIFIER, dependencyResolver);
+ SINGLETON.setBundleContext(bundleContext);
+ }
+ return Collections.singleton(SINGLETON);
+ }
+}
+++ /dev/null
-package org.opendaylight.controller.sal.dom.broker;
-
-import java.util.Hashtable;
-
-import org.opendaylight.controller.sal.core.api.Broker;
-import org.opendaylight.controller.sal.core.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.core.api.data.DataProviderService;
-import org.opendaylight.controller.sal.core.api.model.SchemaService;
-import org.opendaylight.controller.sal.core.api.mount.MountProvisionService;
-import org.opendaylight.controller.sal.core.api.mount.MountService;
-import org.opendaylight.controller.sal.dom.broker.impl.HashMapDataStore;
-import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
-import org.osgi.framework.BundleActivator;
-import org.osgi.framework.BundleContext;
-import org.osgi.framework.ServiceRegistration;
-
-public class BrokerActivator implements BundleActivator {
-
- private static final InstanceIdentifier ROOT = InstanceIdentifier.builder().toInstance();
- BrokerImpl broker;
- private ServiceRegistration<Broker> brokerReg;
- private ServiceRegistration<SchemaService> schemaReg;
- private ServiceRegistration<DataBrokerService> dataReg;
- private ServiceRegistration<DataProviderService> dataProviderReg;
- private SchemaServiceImpl schemaService;
- private DataBrokerImpl dataService;
- private MountPointManagerImpl mountService;
- private ServiceRegistration<MountService> mountReg;
- private ServiceRegistration<MountProvisionService> mountProviderReg;
- private HashMapDataStore hashMapStore;
-
- @Override
- public void start(BundleContext context) throws Exception {
- Hashtable<String, String> emptyProperties = new Hashtable<String, String>();
- broker = new BrokerImpl();
- broker.setBundleContext(context);
-
-
- schemaService = new SchemaServiceImpl();
- schemaService.setContext(context);
- schemaService.setParser(new YangParserImpl());
- schemaService.start();
- schemaReg = context.registerService(SchemaService.class, schemaService, new Hashtable<String, String>());
-
- dataService = new DataBrokerImpl();
- dataService.setExecutor(broker.getExecutor());
-
- dataReg = context.registerService(DataBrokerService.class, dataService, emptyProperties);
- dataProviderReg = context.registerService(DataProviderService.class, dataService, emptyProperties);
-
- hashMapStore = new HashMapDataStore();
-
- dataService.registerConfigurationReader(ROOT, hashMapStore);
- dataService.registerCommitHandler(ROOT, hashMapStore);
- dataService.registerOperationalReader(ROOT, hashMapStore);
-
- mountService = new MountPointManagerImpl();
- mountService.setDataBroker(dataService);
-
- mountReg = context.registerService(MountService.class, mountService, emptyProperties);
- mountProviderReg = context.registerService(MountProvisionService.class, mountService, emptyProperties);
-
- brokerReg = context.registerService(Broker.class, broker, emptyProperties);
- }
-
- @Override
- public void stop(BundleContext context) throws Exception {
- if (brokerReg != null) {
- brokerReg.unregister();
- }
- }
-}
--- /dev/null
+package org.opendaylight.controller.sal.dom.broker
+
+import org.osgi.framework.ServiceRegistration
+import org.opendaylight.controller.sal.core.api.model.SchemaService
+import org.opendaylight.controller.sal.core.api.data.DataBrokerService
+import org.opendaylight.controller.sal.core.api.data.DataProviderService
+import org.opendaylight.controller.sal.dom.broker.impl.HashMapDataStore
+import org.opendaylight.controller.sal.core.api.mount.MountProvisionService
+import org.opendaylight.controller.sal.core.api.mount.MountService
+import org.osgi.framework.BundleContext
+import java.util.Hashtable
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl
+import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier
+import org.opendaylight.controller.sal.core.api.data.DataStore
+
+class BrokerConfigActivator implements AutoCloseable {
+
+
+ private static val ROOT = InstanceIdentifier.builder().toInstance();
+
+ private var ServiceRegistration<SchemaService> schemaReg;
+ private var ServiceRegistration<DataBrokerService> dataReg;
+ private var ServiceRegistration<DataProviderService> dataProviderReg;
+ private var ServiceRegistration<MountService> mountReg;
+ private var ServiceRegistration<MountProvisionService> mountProviderReg;
+
+ private var SchemaServiceImpl schemaService;
+ private var DataBrokerImpl dataService;
+ private var MountPointManagerImpl mountService;
+
+ public def void start(BrokerImpl broker,DataStore store,BundleContext context) {
+ val emptyProperties = new Hashtable<String, String>();
+ broker.setBundleContext(context);
+
+
+ schemaService = new SchemaServiceImpl();
+ schemaService.setContext(context);
+ schemaService.setParser(new YangParserImpl());
+ schemaService.start();
+ schemaReg = context.registerService(SchemaService, schemaService, emptyProperties);
+
+ dataService = new DataBrokerImpl();
+ dataService.setExecutor(broker.getExecutor());
+
+ dataReg = context.registerService(DataBrokerService, dataService, emptyProperties);
+ dataProviderReg = context.registerService(DataProviderService, dataService, emptyProperties);
+
+ dataService.registerConfigurationReader(ROOT, store);
+ dataService.registerCommitHandler(ROOT, store);
+ dataService.registerOperationalReader(ROOT, store);
+
+ mountService = new MountPointManagerImpl();
+ mountService.setDataBroker(dataService);
+
+ mountReg = context.registerService(MountService, mountService, emptyProperties);
+ mountProviderReg = context.registerService(MountProvisionService, mountService, emptyProperties);
+ }
+
+ override def close() {
+ schemaReg?.unregister();
+ dataReg?.unregister();
+ dataProviderReg?.unregister();
+ mountReg?.unregister();
+ mountProviderReg?.unregister();
+ }
+
+}
\ No newline at end of file
import org.opendaylight.yangtools.concepts.ListenerRegistration
import org.opendaylight.controller.sal.core.api.RpcRegistrationListener
-public class BrokerImpl implements Broker {
+public class BrokerImpl implements Broker, AutoCloseable {
private static val log = LoggerFactory.getLogger(BrokerImpl);
// Broker Generic Context
private var ExecutorService executor = Executors.newFixedThreadPool(5);
@Property
private var BundleContext bundleContext;
+
+ @Property
+ private var AutoCloseable deactivator;
@Property
private var RpcRouter router;
sessions.remove(consumerContextImpl);
providerSessions.remove(consumerContextImpl);
}
+
+ override close() throws Exception {
+ deactivator?.close();
+ }
+
}
package org.opendaylight.controller.sal.dom.broker;
+import java.util.concurrent.atomic.AtomicLong;
+
import org.opendaylight.controller.md.sal.common.api.data.DataReader;
import org.opendaylight.controller.md.sal.common.impl.service.AbstractDataBroker;
import org.opendaylight.controller.sal.common.DataStoreIdentifier;
setDataReadRouter(new DataReaderRouter());
}
+ private AtomicLong nextTransaction = new AtomicLong();
+
@Override
public DataTransactionImpl beginTransaction() {
- return new DataTransactionImpl(this);
+ String transactionId = "DOM-" + nextTransaction.getAndIncrement();
+ return new DataTransactionImpl(transactionId,this);
}
@Override
@Deprecated
@Override
public void addValidator(DataStoreIdentifier store, DataValidator validator) {
- // TODO Auto-generated method stub
+ throw new UnsupportedOperationException("Deprecated");
}
@Deprecated
@Override
public void removeValidator(DataStoreIdentifier store, DataValidator validator) {
- // TODO Auto-generated method stub
-
+ throw new UnsupportedOperationException("Deprecated");
}
@Deprecated
@Override
public void addRefresher(DataStoreIdentifier store, DataRefresher refresher) {
- // TODO Auto-generated method stub
-
+ throw new UnsupportedOperationException("Deprecated");
}
@Deprecated
@Override
public void removeRefresher(DataStoreIdentifier store, DataRefresher refresher) {
- // TODO Auto-generated method stub
-
+ throw new UnsupportedOperationException("Deprecated");
}
}
\ No newline at end of file
- public DataTransactionImpl(DataBrokerImpl dataBroker) {
- super(dataBroker);
+ public DataTransactionImpl(Object identifier,DataBrokerImpl dataBroker) {
+ super(identifier,dataBroker);
}
@Override
import java.util.Set;
import java.util.concurrent.Future;
+import org.opendaylight.controller.md.sal.common.api.RegistrationListener;
import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
+import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandlerRegistration;
import org.opendaylight.controller.md.sal.common.api.data.DataReader;
import org.opendaylight.controller.sal.common.DataStoreIdentifier;
import org.opendaylight.controller.sal.core.api.Broker.RoutedRpcRegistration;
return MountPointImpl.this.readOperationalData(newPath);
}
}
+
+ @Override
+ public ListenerRegistration<RegistrationListener<DataCommitHandlerRegistration<InstanceIdentifier, CompositeNode>>> registerCommitHandlerListener(
+ RegistrationListener<DataCommitHandlerRegistration<InstanceIdentifier, CompositeNode>> commitHandlerListener) {
+ // TODO Auto-generated method stub
+ return null;
+ }
}
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.osgi.util.tracker.BundleTracker;
import org.osgi.util.tracker.BundleTrackerCustomizer;
+import org.osgi.util.tracker.ServiceTracker;
+import org.osgi.util.tracker.ServiceTrackerCustomizer;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.parser.api.YangModelParser;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
import org.osgi.framework.BundleActivator;
import org.osgi.framework.BundleContext;
import org.osgi.framework.BundleEvent;
+import org.osgi.framework.ServiceReference;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.concepts.util.ListenerRegistry;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import com.google.common.collect.Sets;
+
import static com.google.common.base.Preconditions.*;
-public class SchemaServiceImpl implements SchemaService, AutoCloseable {
+public class SchemaServiceImpl implements //
+SchemaService, //
+ServiceTrackerCustomizer<SchemaServiceListener, SchemaServiceListener>, //
+AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(SchemaServiceImpl.class);
private ListenerRegistry<SchemaServiceListener> listeners;
private BundleTracker<Object> bundleTracker;
private final YangStoreCache cache = new YangStoreCache();
+ private ServiceTracker<SchemaServiceListener,SchemaServiceListener> listenerTracker;
+
public ListenerRegistry<SchemaServiceListener> getListeners() {
return listeners;
}
if (listeners == null) {
listeners = new ListenerRegistry<>();
}
-
+
+ listenerTracker = new ServiceTracker<>(context, SchemaServiceListener.class, this);
bundleTracker = new BundleTracker<Object>(context, BundleEvent.RESOLVED | BundleEvent.UNRESOLVED, scanner);
bundleTracker.open();
+ listenerTracker.open();
}
public SchemaContext getGlobalContext() {
private void updateCache(SchemaContext snapshot) {
cache.cacheYangStore(consistentBundlesToYangURLs, snapshot);
+
+ Object[] services = listenerTracker.getServices();
+ if(services != null) {
+ for(Object rawListener : services) {
+ SchemaServiceListener listener = (SchemaServiceListener) rawListener;
+ try {
+ listener.onGlobalContextUpdated(snapshot);
+ } catch (Exception e) {
+ logger.error("Exception occured during invoking listener",e);
+ }
+ }
+ }
for (ListenerRegistration<SchemaServiceListener> listener : listeners) {
try {
listener.getInstance().onGlobalContextUpdated(snapshot);
proposedNewState.putAll(inconsistentBundlesToYangURLs);
proposedNewState.putAll(bundle, addedURLs);
boolean adding = true;
+
if (tryToUpdateState(addedURLs, proposedNewState, adding) == false) {
inconsistentBundlesToYangURLs.putAll(bundle, addedURLs);
}
this.cachedUrls = setFromMultimapValues(urls);
this.cachedContextSnapshot = ctx;
}
-
+ }
+
+ @Override
+ public SchemaServiceListener addingService(ServiceReference<SchemaServiceListener> reference) {
+
+ SchemaServiceListener listener = context.getService(reference);
+ SchemaContext _ctxContext = getGlobalContext();
+ if(getContext() != null) {
+ listener.onGlobalContextUpdated(_ctxContext);
+ }
+ return listener;
+ }
+
+ @Override
+ public void modifiedService(ServiceReference<SchemaServiceListener> reference, SchemaServiceListener service) {
+ // NOOP
+ }
+
+ @Override
+ public void removedService(ServiceReference<SchemaServiceListener> reference, SchemaServiceListener service) {
+ context.ungetService(reference);
}
}
package org.opendaylight.controller.sal.dom.broker.impl
-import org.opendaylight.controller.md.sal.common.api.data.DataReader
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler
import org.opendaylight.controller.md.sal.common.api.data.DataModification
import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction
import org.opendaylight.yangtools.yang.common.RpcResult
import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier
import org.opendaylight.yangtools.yang.data.api.CompositeNode
import static extension org.opendaylight.controller.sal.dom.broker.impl.DataUtils.*;
+import org.opendaylight.controller.sal.core.api.data.DataStore
+import java.util.HashSet
-class HashMapDataStore //
-implements //
-DataReader<InstanceIdentifier, CompositeNode>, DataCommitHandler<InstanceIdentifier, CompositeNode> {
+class HashMapDataStore implements DataStore, AutoCloseable {
val Map<InstanceIdentifier, CompositeNode> configuration = new ConcurrentHashMap();
val Map<InstanceIdentifier, CompositeNode> operational = new ConcurrentHashMap();
operational.putAll(modification.updatedOperationalData);
for (removal : modification.removedConfigurationData) {
- configuration.remove(removal);
+ remove(configuration,removal);
}
for (removal : modification.removedOperationalData) {
- operational.remove(removal);
+ remove(operational,removal);
}
return Rpcs.getRpcResult(true, null, Collections.emptySet);
}
+
+ def remove(Map<InstanceIdentifier, CompositeNode> map, InstanceIdentifier identifier) {
+ val affected = new HashSet<InstanceIdentifier>();
+ for(path : map.keySet) {
+ if(identifier.contains(path)) {
+ affected.add(path);
+ }
+ }
+ for(pathToRemove : affected) {
+ map.remove(pathToRemove);
+ }
+
+ }
+
+ override close() {
+ // NOOP
+ }
+
}
class HashMapDataStoreTransaction implements //
package org.opendaylight.controller.sal.dom.broker.osgi;
+import org.opendaylight.controller.md.sal.common.api.RegistrationListener;
import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
+import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandlerRegistration;
import org.opendaylight.controller.md.sal.common.api.data.DataReader;
import org.opendaylight.controller.sal.common.DataStoreIdentifier;
import org.opendaylight.controller.sal.core.api.data.DataChangeListener;
public void removeValidator(DataStoreIdentifier store, DataValidator validator) {
getDelegate().removeValidator(store, validator);
}
+
+ @Override
+ public ListenerRegistration<RegistrationListener<DataCommitHandlerRegistration<InstanceIdentifier, CompositeNode>>> registerCommitHandlerListener(
+ RegistrationListener<DataCommitHandlerRegistration<InstanceIdentifier, CompositeNode>> commitHandlerListener) {
+ return addRegistration(getDelegate().registerCommitHandlerListener(commitHandlerListener));
+ }
}
--- /dev/null
+module opendaylight-sal-dom-broker-impl {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl";
+ prefix "binding-impl";
+
+ import config { prefix config; revision-date 2013-04-05; }
+ import opendaylight-md-sal-dom {prefix sal;}
+
+ description
+ "Service definition for Binding Aware MD-SAL.";
+
+ revision "2013-10-28" {
+ description
+ "Initial revision";
+ }
+
+ identity dom-broker-impl {
+ base config:module-type;
+ config:provided-service sal:dom-broker-osgi-registry;
+ config:java-name-prefix DomBrokerImpl;
+ }
+
+ identity hash-map-data-store {
+ base config:module-type;
+ config:provided-service sal:dom-data-store;
+ config:java-name-prefix HashMapDataStore;
+ }
+
+ identity schema-service-singleton {
+ base config:module-type;
+ config:provided-service sal:schema-service;
+ config:java-name-prefix SchemaServiceImplSingleton;
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case dom-broker-impl {
+ when "/config:modules/config:module/config:type = 'dom-broker-impl'";
+ container data-store {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity sal:dom-data-store;
+ }
+ }
+ }
+ }
+ }
+
+ augment "/config:modules/config:module/config:state" {
+ case hash-map-data-store {
+ when "/config:modules/config:module/config:type = 'hash-map-data-store'";
+ }
+ }
+
+ augment "/config:modules/config:module/config:state" {
+ case schema-service-singleton {
+ when "/config:modules/config:module/config:type = 'schema-service-singleton'";
+ }
+ }
+
+}
\ No newline at end of file
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
+import javax.ws.rs.core.Response;
-import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.restconf.impl.StructuredData;
-import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
/**
@GET
public Object getRoot();
-
@GET
@Path("/modules")
@Produces({API+JSON,API+XML})
@Produces({Draft02.MediaTypes.API+JSON,Draft02.MediaTypes.API+XML,API+JSON,API+XML})
public StructuredData invokeRpc(@PathParam("identifier") String identifier, CompositeNode payload);
-
@GET
@Path("/config/{identifier:.+}")
@Produces({Draft02.MediaTypes.DATA+JSON,Draft02.MediaTypes.DATA+XML})
public StructuredData readConfigurationData(@PathParam("identifier") String identifier);
-
-
@PUT
@Path("/config/{identifier:.+}")
@Produces({API+JSON,API+XML})
- public RpcResult<TransactionStatus> createConfigurationData(@PathParam("identifier") String identifier, CompositeNode payload);
+ public Response createConfigurationData(@PathParam("identifier") String identifier, CompositeNode payload);
@POST
@Path("/config/{identifier:.+}")
@Produces({API+JSON,API+XML})
- public RpcResult<TransactionStatus> updateConfigurationData(@PathParam("identifier") String identifier, CompositeNode payload);
+ public Response updateConfigurationData(@PathParam("identifier") String identifier, CompositeNode payload);
@GET
@Path("/operational/{identifier:.+}")
@PUT
@Path("/operational/{identifier:.+}")
@Produces({API+JSON,API+XML})
- public RpcResult<TransactionStatus> createOperationalData(@PathParam("identifier") String identifier, CompositeNode payload);
+ public Response createOperationalData(@PathParam("identifier") String identifier, CompositeNode payload);
@POST
@Path("/operational/{identifier:.+}")
@Produces({API+JSON,API+XML})
- public RpcResult<TransactionStatus> updateOperationalData(@PathParam("identifier") String identifier, CompositeNode payload);
+ public Response updateOperationalData(@PathParam("identifier") String identifier, CompositeNode payload);
-
}
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
+import javax.ws.rs.core.Response;
-import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.restconf.impl.StructuredData;
-import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
public interface RestconfServiceLegacy {
@PUT
@Path("/datastore/{identifier:.+}")
@Produces({API+JSON,API+XML})
- public RpcResult<TransactionStatus> createConfigurationDataLegacy(@PathParam("identifier") String identifier, CompositeNode payload);
+ public Response createConfigurationDataLegacy(@PathParam("identifier") String identifier, CompositeNode payload);
@Deprecated
@POST
@Path("/datastore/{identifier:.+}")
@Produces({API+JSON,API+XML})
- public RpcResult<TransactionStatus> updateConfigurationDataLegacy(@PathParam("identifier") String identifier, CompositeNode payload);
+ public Response updateConfigurationDataLegacy(@PathParam("identifier") String identifier, CompositeNode payload);
}
import static com.google.common.base.Preconditions.checkNotNull;
import java.io.IOException;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
+import java.util.*;
import javax.activation.UnsupportedDataTypeException;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.api.Node;
import org.opendaylight.yangtools.yang.data.api.SimpleNode;
-import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.type.BooleanTypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.type.DecimalTypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.type.EmptyTypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.type.InstanceIdentifierTypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.type.IntegerTypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.type.UnsignedIntegerTypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.*;
+import org.opendaylight.yangtools.yang.model.api.type.*;
import com.google.common.base.Preconditions;
import com.google.gson.stream.JsonWriter;
class JsonMapper {
-
+
private final Set<LeafListSchemaNode> foundLeafLists = new HashSet<>();
private final Set<ListSchemaNode> foundLists = new HashSet<>();
-
+
public void write(JsonWriter writer, CompositeNode data, DataNodeContainer schema) throws IOException {
Preconditions.checkNotNull(writer);
Preconditions.checkNotNull(data);
Preconditions.checkNotNull(schema);
writer.beginObject();
-
+
if (schema instanceof ContainerSchemaNode) {
writeContainer(writer, data, (ContainerSchemaNode) schema);
} else if (schema instanceof ListSchemaNode) {
- writeList(writer, data, (ListSchemaNode) schema);
+ writeList(writer, null, data, (ListSchemaNode) schema);
} else {
throw new UnsupportedDataTypeException(
"Schema can be ContainerSchemaNode or ListSchemaNode. Other types are not supported yet.");
}
-
+
writer.endObject();
-
+
foundLeafLists.clear();
foundLists.clear();
}
- private void writeChildrenOfParent(JsonWriter writer, CompositeNode parent, DataNodeContainer parentSchema) throws IOException {
+ private void writeChildrenOfParent(JsonWriter writer, CompositeNode parent, DataNodeContainer parentSchema)
+ throws IOException {
checkNotNull(parent);
checkNotNull(parentSchema);
-
+
for (Node<?> child : parent.getChildren()) {
DataSchemaNode childSchema = findFirstSchemaForNode(child, parentSchema.getChildNodes());
if (childSchema == null) {
throw new UnsupportedDataTypeException("Probably the data node \"" + child.getNodeType().getLocalName()
+ "\" is not conform to schema");
}
-
+
if (childSchema instanceof ContainerSchemaNode) {
Preconditions.checkState(child instanceof CompositeNode,
"Data representation of Container should be CompositeNode - " + child.getNodeType());
Preconditions.checkState(child instanceof CompositeNode,
"Data representation of List should be CompositeNode - " + child.getNodeType());
foundLists.add((ListSchemaNode) childSchema);
- writeList(writer, (CompositeNode) child, (ListSchemaNode) childSchema);
+ writeList(writer, parent, (CompositeNode) child, (ListSchemaNode) childSchema);
}
} else if (childSchema instanceof LeafListSchemaNode) {
if (!foundLeafLists.contains(childSchema)) {
Preconditions.checkState(child instanceof SimpleNode<?>,
"Data representation of LeafList should be SimpleNode - " + child.getNodeType());
foundLeafLists.add((LeafListSchemaNode) childSchema);
- writeLeafList(writer, (SimpleNode<?>) child, (LeafListSchemaNode) childSchema);
+ writeLeafList(writer, parent, (SimpleNode<?>) child, (LeafListSchemaNode) childSchema);
}
} else if (childSchema instanceof LeafSchemaNode) {
Preconditions.checkState(child instanceof SimpleNode<?>,
+ "LeafListSchemaNode, or LeafSchemaNode. Other types are not supported yet.");
}
}
-
+
for (Node<?> child : parent.getChildren()) {
DataSchemaNode childSchema = findFirstSchemaForNode(child, parentSchema.getChildNodes());
if (childSchema instanceof LeafListSchemaNode) {
}
}
}
-
+
private DataSchemaNode findFirstSchemaForNode(Node<?> node, Set<DataSchemaNode> dataSchemaNode) {
for (DataSchemaNode dsn : dataSchemaNode) {
if (node.getNodeType().getLocalName().equals(dsn.getQName().getLocalName())) {
}
return null;
}
-
+
private void writeContainer(JsonWriter writer, CompositeNode node, ContainerSchemaNode schema) throws IOException {
writer.name(node.getNodeType().getLocalName());
writer.beginObject();
writeChildrenOfParent(writer, node, schema);
writer.endObject();
}
-
- private void writeList(JsonWriter writer, CompositeNode node, ListSchemaNode schema) throws IOException {
- writer.name(node.getNodeType().getLocalName());
- writer.beginArray();
-
- if (node.getParent() != null) {
- CompositeNode parent = node.getParent();
- List<CompositeNode> nodeLists = parent.getCompositesByName(node.getNodeType());
- for (CompositeNode nodeList : nodeLists) {
- writer.beginObject();
- writeChildrenOfParent(writer, nodeList, schema);
- writer.endObject();
- }
- } else {
+
+ private void writeList(JsonWriter writer, CompositeNode nodeParent, CompositeNode node, ListSchemaNode schema) throws IOException {
+ writer.name(node.getNodeType().getLocalName());
+ writer.beginArray();
+
+ if (nodeParent != null) {
+ List<CompositeNode> nodeLists = nodeParent.getCompositesByName(node.getNodeType());
+ for (CompositeNode nodeList : nodeLists) {
writer.beginObject();
- writeChildrenOfParent(writer, node, schema);
+ writeChildrenOfParent(writer, nodeList, schema);
writer.endObject();
}
-
- writer.endArray();
- }
-
- private void writeLeafList(JsonWriter writer, SimpleNode<?> node, LeafListSchemaNode schema) throws IOException {
- writer.name(node.getNodeType().getLocalName());
- writer.beginArray();
-
- CompositeNode parent = node.getParent();
- List<SimpleNode<?>> nodeLeafLists = parent.getSimpleNodesByName(node.getNodeType());
- for (SimpleNode<?> nodeLeafList : nodeLeafLists) {
- writeValueOfNodeByType(writer, nodeLeafList, schema.getType());
- }
-
- writer.endArray();
+ } else {
+ writer.beginObject();
+ writeChildrenOfParent(writer, node, schema);
+ writer.endObject();
+ }
+
+ writer.endArray();
}
-
+
+ private void writeLeafList(JsonWriter writer, CompositeNode nodeParent, SimpleNode<?> node, LeafListSchemaNode schema) throws IOException {
+ writer.name(node.getNodeType().getLocalName());
+ writer.beginArray();
+
+ List<SimpleNode<?>> nodeLeafLists = nodeParent.getSimpleNodesByName(node.getNodeType());
+ for (SimpleNode<?> nodeLeafList : nodeLeafLists) {
+ writeValueOfNodeByType(writer, nodeLeafList, schema.getType());
+ }
+
+ writer.endArray();
+ }
+
private void writeLeaf(JsonWriter writer, SimpleNode<?> node, LeafSchemaNode schema) throws IOException {
writer.name(node.getNodeType().getLocalName());
writeValueOfNodeByType(writer, node, schema.getType());
}
-
- private void writeValueOfNodeByType(JsonWriter writer, SimpleNode<?> node, TypeDefinition<?> type) throws IOException {
+
+ private void writeValueOfNodeByType(JsonWriter writer, SimpleNode<?> node, TypeDefinition<?> type)
+ throws IOException {
if (!(node.getValue() instanceof String)) {
throw new IllegalStateException("Value in SimpleNode should be type String");
}
-
+
String value = (String) node.getValue();
- // TODO check Leafref, InstanceIdentifierTypeDefinition, IdentityrefTypeDefinition, UnionTypeDefinition
- if (type.getBaseType() != null) {
- writeValueOfNodeByType(writer, node, type.getBaseType());
- } else if (type instanceof InstanceIdentifierTypeDefinition) {
- writer.value(((InstanceIdentifierTypeDefinition) type).getPathStatement().toString());
- } else if (type instanceof DecimalTypeDefinition
- || type instanceof IntegerTypeDefinition
- || type instanceof UnsignedIntegerTypeDefinition) {
+ // TODO check Leafref, InstanceIdentifierTypeDefinition,
+ // IdentityrefTypeDefinition, UnionTypeDefinition
+ TypeDefinition<?> baseType = resolveBaseTypeFrom(type);
+ if (baseType instanceof InstanceIdentifierTypeDefinition) {
+ writer.value(((InstanceIdentifierTypeDefinition) baseType).getPathStatement().toString());
+ } else if (baseType instanceof UnionTypeDefinition) {
+ processTypeIsUnionType(writer, (UnionTypeDefinition) baseType, value);
+ } else if (baseType instanceof DecimalTypeDefinition || baseType instanceof IntegerTypeDefinition
+ || baseType instanceof UnsignedIntegerTypeDefinition) {
writer.value(new NumberForJsonWriter(value));
- } else if (type instanceof BooleanTypeDefinition) {
+ } else if (baseType instanceof BooleanTypeDefinition) {
writer.value(Boolean.parseBoolean(value));
- } else if (type instanceof EmptyTypeDefinition) {
- writer.beginArray();
- writer.nullValue();
- writer.endArray();
+ } else if (baseType instanceof EmptyTypeDefinition) {
+ writeEmptyDataTypeToJson(writer);
} else {
writer.value(value != null ? value : "");
}
}
-
+
+ private void processTypeIsUnionType(JsonWriter writer, UnionTypeDefinition unionType, String value)
+ throws IOException {
+ if (value == null) {
+ writeEmptyDataTypeToJson(writer);
+ } else if ((isNumber(value))
+ && containsType(unionType, UnsignedIntegerTypeDefinition.class, IntegerTypeDefinition.class,
+ DecimalTypeDefinition.class)) {
+ writer.value(new NumberForJsonWriter(value));
+ } else if (isBoolean(value) && containsType(unionType, BooleanTypeDefinition.class)) {
+ writer.value(Boolean.parseBoolean(value));
+ } else {
+ writer.value(value);
+ }
+ }
+
+ private boolean isBoolean(String value) {
+ if (value.equals("true") || value.equals("false")) {
+ return true;
+ }
+ return false;
+ }
+
+ private void writeEmptyDataTypeToJson(JsonWriter writer) throws IOException {
+ writer.beginArray();
+ writer.nullValue();
+ writer.endArray();
+ }
+
+ private boolean isNumber(String value) {
+ try {
+ Double.valueOf(value);
+ } catch (NumberFormatException e) {
+ return false;
+ }
+ return true;
+ }
+
+ private boolean containsType(UnionTypeDefinition unionType, Class<?>... searchedTypes) {
+ List<TypeDefinition<?>> allUnionSubtypes = resolveAllUnionSubtypesFrom(unionType);
+
+ for (TypeDefinition<?> unionSubtype : allUnionSubtypes) {
+ for (Class<?> searchedType : searchedTypes) {
+ if (searchedType.isInstance(unionSubtype)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ private List<TypeDefinition<?>> resolveAllUnionSubtypesFrom(UnionTypeDefinition inputType) {
+ List<TypeDefinition<?>> result = new ArrayList<>();
+ for (TypeDefinition<?> subtype : inputType.getTypes()) {
+ TypeDefinition<?> resolvedSubtype = subtype;
+
+ resolvedSubtype = resolveBaseTypeFrom(subtype);
+
+ if (resolvedSubtype instanceof UnionTypeDefinition) {
+ List<TypeDefinition<?>> subtypesFromRecursion = resolveAllUnionSubtypesFrom((UnionTypeDefinition) resolvedSubtype);
+ result.addAll(subtypesFromRecursion);
+ } else {
+ result.add(resolvedSubtype);
+ }
+ }
+
+ return result;
+ }
+
+ private TypeDefinition<?> resolveBaseTypeFrom(TypeDefinition<?> type) {
+ return type.getBaseType() != null ? resolveBaseTypeFrom(type.getBaseType()) : type;
+ }
+
private static final class NumberForJsonWriter extends Number {
-
+
private static final long serialVersionUID = -3147729419814417666L;
private final String value;
-
+
public NumberForJsonWriter(String value) {
this.value = value;
}
public String toString() {
return value;
}
-
+
}
}
public CompositeNodeWrapper read(InputStream entityStream) throws UnsupportedFormatException {
JsonParser parser = new JsonParser();
-
+
JsonElement rootElement = parser.parse(new InputStreamReader(entityStream));
if (!rootElement.isJsonObject()) {
throw new UnsupportedFormatException("Root element of Json has to be Object");
}
-
+
Set<Entry<String, JsonElement>> entrySetsOfRootJsonObject = rootElement.getAsJsonObject().entrySet();
if (entrySetsOfRootJsonObject.size() != 1) {
throw new UnsupportedFormatException("Json Object should contain one element");
if (firstElementInArray.isJsonObject()) {
return createStructureWithRoot(firstElementName, firstElementInArray.getAsJsonObject());
}
- throw new UnsupportedFormatException("Array as the first element in Json Object can have only Object element");
+ throw new UnsupportedFormatException(
+ "Array as the first element in Json Object can have only Object element");
}
}
- throw new UnsupportedFormatException("First element in Json Object has to be \"Object\" or \"Array with one Object element\". Other scenarios are not supported yet.");
+ throw new UnsupportedFormatException(
+ "First element in Json Object has to be \"Object\" or \"Array with one Object element\". Other scenarios are not supported yet.");
}
}
-
+
private CompositeNodeWrapper createStructureWithRoot(String rootObjectName, JsonObject rootObject) {
CompositeNodeWrapper firstNode = new CompositeNodeWrapper(getNamespaceFrom(rootObjectName),
getLocalNameFrom(rootObjectName));
}
return firstNode;
}
-
+
private void addChildToParent(String childName, JsonElement childType, CompositeNodeWrapper parent) {
if (childType.isJsonObject()) {
CompositeNodeWrapper child = new CompositeNodeWrapper(getNamespaceFrom(childName),
addChildToParent(childOfChild.getKey(), childOfChild.getValue(), child);
}
} else if (childType.isJsonArray()) {
- for (JsonElement childOfChildType : childType.getAsJsonArray()) {
- addChildToParent(childName, childOfChildType, parent);
+ if (childType.getAsJsonArray().size() == 1 && childType.getAsJsonArray().get(0).isJsonNull()) {
+ parent.addValue(new SimpleNodeWrapper(getNamespaceFrom(childName), getLocalNameFrom(childName), null));
+
+ } else {
+ for (JsonElement childOfChildType : childType.getAsJsonArray()) {
+ addChildToParent(childName, childOfChildType, parent);
+ }
}
} else if (childType.isJsonPrimitive()) {
JsonPrimitive childPrimitive = childType.getAsJsonPrimitive();
String value = childPrimitive.getAsString();
- SimpleNodeWrapper child = null;
- if (value.equals("[null]")) {
- child = new SimpleNodeWrapper(getNamespaceFrom(childName), getLocalNameFrom(childName), null);
- } else {
- child = new SimpleNodeWrapper(getNamespaceFrom(childName), getLocalNameFrom(childName), value);
- }
- parent.addValue(child);
+ parent.addValue(new SimpleNodeWrapper(getNamespaceFrom(childName), getLocalNameFrom(childName), value));
}
}
import org.opendaylight.controller.sal.restconf.impl.SimpleNodeWrapper;
public class XmlReader {
-
+
private final static XMLInputFactory xmlInputFactory = XMLInputFactory.newInstance();
private XMLEventReader eventReader;
public CompositeNodeWrapper read(InputStream entityStream) throws XMLStreamException, UnsupportedFormatException {
eventReader = xmlInputFactory.createXMLEventReader(entityStream);
-
+
if (eventReader.hasNext()) {
XMLEvent element = eventReader.peek();
if (element.isStartDocument()) {
if (eventReader.hasNext() && !isCompositeNodeEvent(eventReader.peek())) {
throw new UnsupportedFormatException("Root element of XML has to be composite element.");
}
-
+
final Stack<NodeWrapper<?>> processingQueue = new Stack<>();
CompositeNodeWrapper root = null;
NodeWrapper<?> element = null;
element = processingQueue.pop();
}
}
-
+
if (!root.getLocalName().equals(element.getLocalName())) {
throw new UnsupportedFormatException("XML should contain only one root element");
}
-
+
return root;
}
-
+
private boolean isSimpleNodeEvent(final XMLEvent event) throws XMLStreamException {
checkArgument(event != null, "XML Event cannot be NULL!");
if (event.isStartElement()) {
}
return false;
}
-
+
private boolean isCompositeNodeEvent(final XMLEvent event) throws XMLStreamException {
checkArgument(event != null, "XML Event cannot be NULL!");
if (event.isStartElement()) {
}
return false;
}
-
- private SimpleNodeWrapper resolveSimpleNodeFromStartElement(final StartElement startElement) throws XMLStreamException {
+
+ private SimpleNodeWrapper resolveSimpleNodeFromStartElement(final StartElement startElement)
+ throws XMLStreamException {
checkArgument(startElement != null, "Start Element cannot be NULL!");
String data = null;
data = innerEvent.asCharacters().getData();
}
} else if (innerEvent.isEndElement()) {
- data = "";
+ if (startElement.getLocation().getCharacterOffset() == innerEvent.getLocation().getCharacterOffset()) {
+ data = null;
+ } else {
+ data = "";
+ }
}
}
-
+
return new SimpleNodeWrapper(getNamespaceFrom(startElement), getLocalNameFrom(startElement), data);
}
-
+
private CompositeNodeWrapper resolveCompositeNodeFromStartElement(final StartElement startElement) {
checkArgument(startElement != null, "Start Element cannot be NULL!");
return new CompositeNodeWrapper(getNamespaceFrom(startElement), getLocalNameFrom(startElement));
}
-
+
private String getLocalNameFrom(StartElement startElement) {
return startElement.getName().getLocalPart();
}
-
+
private URI getNamespaceFrom(StartElement startElement) {
String namespaceURI = startElement.getName().getNamespaceURI();
return namespaceURI.isEmpty() ? null : URI.create(namespaceURI);
}
-
+
}
private DataBrokerService dataService;
private new() {
- if (INSTANCE != null) {
+ if (INSTANCE !== null) {
throw new IllegalStateException("Already instantiated");
}
}
}
private def void checkPreconditions() {
- if (context == null || dataService == null) {
+ if (context === null || dataService === null) {
throw new WebApplicationException(Response.status(Response.Status.SERVICE_UNAVAILABLE)
.entity(RestconfProvider::NOT_INITALIZED_MSG).build())
}
private new() {
- if (INSTANCE != null) {
+ if (INSTANCE !== null) {
throw new IllegalStateException("Already instantiated");
}
}
}
private def void checkPreconditions() {
- if (schemas == null) {
+ if (schemas === null) {
throw new WebApplicationException(Response.status(Response.Status.SERVICE_UNAVAILABLE)
.entity(RestconfProvider::NOT_INITALIZED_MSG).build())
}
pathArgs.remove(0)
}
val schemaNode = ret.collectPathArguments(pathArgs, restconfInstance.findModule);
- if (schemaNode == null) {
+ if (schemaNode === null) {
return null
}
new InstanceIdWithSchemaNode(ret.toInstance, schemaNode)
private def getLatestModule(SchemaContext schema, String moduleName) {
checkNotNull(schema)
- checkArgument(moduleName != null && !moduleName.empty)
+ checkArgument(moduleName !== null && !moduleName.empty)
val modules = schema.modules.filter[m|m.name == moduleName]
var latestModule = modules.head
for (module : modules) {
def CharSequence toRestconfIdentifier(QName qname) {
checkPreconditions
var module = uriToModuleName.get(qname.namespace)
- if (module == null) {
+ if (module === null) {
val moduleSchema = schemas.findModuleByNamespaceAndRevision(qname.namespace, qname.revision);
- if(moduleSchema == null) throw new IllegalArgumentException()
+ if(moduleSchema === null) throw new IllegalArgumentException()
uriToModuleName.put(qname.namespace, moduleSchema.name)
module = moduleSchema.name;
}
}
private def toUriString(Object object) {
- if(object == null) return "";
+ if(object === null) return "";
return URLEncoder.encode(object.toString)
}
val nodeName = nodeRef.toNodeName();
val targetNode = parentNode.getDataChildByName(nodeName);
- if (targetNode == null) {
+ if (targetNode === null) {
val children = parentNode.childNodes
for (child : children) {
if (child instanceof ChoiceNode) {
val choice = child as ChoiceNode
for (caze : choice.cases) {
val result = builder.collectPathArguments(strings, caze as DataNodeContainer);
- if (result != null)
+ if (result !== null)
return result
}
}
package org.opendaylight.controller.sal.restconf.impl
import java.util.List
+import javax.ws.rs.core.Response
import org.opendaylight.controller.sal.rest.api.RestconfService
import org.opendaylight.yangtools.yang.data.api.CompositeNode
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus
+import javax.ws.rs.WebApplicationException
class RestconfImpl implements RestconfService {
extension ControllerContext controllerContext
private new() {
- if (INSTANCE != null) {
+ if (INSTANCE !== null) {
throw new IllegalStateException("Already instantiated");
}
}
}
override readData(String identifier) {
- val instanceIdentifierWithSchemaNode = identifier.toInstanceIdentifier
+ val instanceIdentifierWithSchemaNode = identifier.resolveInstanceIdentifier
val data = broker.readOperationalData(instanceIdentifierWithSchemaNode.getInstanceIdentifier);
return new StructuredData(data, instanceIdentifierWithSchemaNode.schemaNode)
}
override createConfigurationData(String identifier, CompositeNode payload) {
- val identifierWithSchemaNode = identifier.toInstanceIdentifier
+ val identifierWithSchemaNode = identifier.resolveInstanceIdentifier
val value = resolveNodeNamespaceBySchema(payload, identifierWithSchemaNode.schemaNode)
- return broker.commitConfigurationDataPut(identifierWithSchemaNode.instanceIdentifier,value).get();
+ val status = broker.commitConfigurationDataPut(identifierWithSchemaNode.instanceIdentifier,value).get();
+ switch status.result {
+ case TransactionStatus.COMMITED: Response.status(Response.Status.OK).build
+ default: Response.status(Response.Status.INTERNAL_SERVER_ERROR).build
+ }
}
override updateConfigurationData(String identifier, CompositeNode payload) {
- val identifierWithSchemaNode = identifier.toInstanceIdentifier
+ val identifierWithSchemaNode = identifier.resolveInstanceIdentifier
val value = resolveNodeNamespaceBySchema(payload, identifierWithSchemaNode.schemaNode)
- return broker.commitConfigurationDataPut(identifierWithSchemaNode.instanceIdentifier,value).get();
+ val status = broker.commitConfigurationDataPut(identifierWithSchemaNode.instanceIdentifier,value).get();
+ switch status.result {
+ case TransactionStatus.COMMITED: Response.status(Response.Status.NO_CONTENT).build
+ default: Response.status(Response.Status.INTERNAL_SERVER_ERROR).build
+ }
}
override invokeRpc(String identifier, CompositeNode payload) {
}
override readConfigurationData(String identifier) {
- val instanceIdentifierWithSchemaNode = identifier.toInstanceIdentifier
+ val instanceIdentifierWithSchemaNode = identifier.resolveInstanceIdentifier
val data = broker.readOperationalData(instanceIdentifierWithSchemaNode.getInstanceIdentifier);
return new StructuredData(data, instanceIdentifierWithSchemaNode.schemaNode)
}
override readOperationalData(String identifier) {
- val instanceIdentifierWithSchemaNode = identifier.toInstanceIdentifier
+ val instanceIdentifierWithSchemaNode = identifier.resolveInstanceIdentifier
val data = broker.readOperationalData(instanceIdentifierWithSchemaNode.getInstanceIdentifier);
return new StructuredData(data, instanceIdentifierWithSchemaNode.schemaNode)
}
}
override createOperationalData(String identifier, CompositeNode payload) {
- val identifierWithSchemaNode = identifier.toInstanceIdentifier
+ val identifierWithSchemaNode = identifier.resolveInstanceIdentifier
val value = resolveNodeNamespaceBySchema(payload, identifierWithSchemaNode.schemaNode)
- return broker.commitOperationalDataPut(identifierWithSchemaNode.instanceIdentifier,value).get();
+ val status = broker.commitOperationalDataPut(identifierWithSchemaNode.instanceIdentifier,value).get();
+ switch status.result {
+ case TransactionStatus.COMMITED: Response.status(Response.Status.OK).build
+ default: Response.status(Response.Status.INTERNAL_SERVER_ERROR).build
+ }
}
override updateOperationalData(String identifier, CompositeNode payload) {
- val identifierWithSchemaNode = identifier.toInstanceIdentifier
+ val identifierWithSchemaNode = identifier.resolveInstanceIdentifier
val value = resolveNodeNamespaceBySchema(payload, identifierWithSchemaNode.schemaNode)
- return broker.commitOperationalDataPut(identifierWithSchemaNode.instanceIdentifier,value).get();
+ val status = broker.commitOperationalDataPut(identifierWithSchemaNode.instanceIdentifier,value).get();
+ switch status.result {
+ case TransactionStatus.COMMITED: Response.status(Response.Status.NO_CONTENT).build
+ default: Response.status(Response.Status.INTERNAL_SERVER_ERROR).build
+ }
+ }
+
+ private def InstanceIdWithSchemaNode resolveInstanceIdentifier(String identifier) {
+ val identifierWithSchemaNode = identifier.toInstanceIdentifier
+ if (identifierWithSchemaNode === null) {
+ throw new WebApplicationException(Response.status(Response.Status.BAD_REQUEST).entity("URI has bad format")
+ .build());
+ }
+ return identifierWithSchemaNode
}
private def CompositeNode resolveNodeNamespaceBySchema(CompositeNode node, DataSchemaNode schema) {
}
private def void addNamespaceToNodeFromSchemaRecursively(NodeWrapper<?> nodeBuilder, DataSchemaNode schema) {
- if (nodeBuilder.namespace == null) {
+ if (nodeBuilder.namespace === null) {
nodeBuilder.namespace = schema.QName.namespace
}
if (nodeBuilder instanceof CompositeNodeWrapper) {
import org.opendaylight.yangtools.yang.common.RpcResult;
public class DummyFuture implements Future<RpcResult<TransactionStatus>> {
+
+ private final boolean cancel;
+ private final boolean isCancelled;
+ private final boolean isDone;
+ private final RpcResult<TransactionStatus> result;
+
+ public DummyFuture() {
+ cancel = false;
+ isCancelled = false;
+ isDone = false;
+ result = null;
+ }
+
+ private DummyFuture(Builder builder) {
+ cancel = builder.cancel;
+ isCancelled = builder.isCancelled;
+ isDone = builder.isDone;
+ result = builder.result;
+ }
+
+ public static Builder builder() {
+ return new DummyFuture.Builder();
+ }
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
- return false;
+ return cancel;
}
@Override
public boolean isCancelled() {
- return false;
+ return isCancelled;
}
@Override
public boolean isDone() {
- return false;
+ return isDone;
}
@Override
public RpcResult<TransactionStatus> get() throws InterruptedException, ExecutionException {
- return null;
+ return result;
}
@Override
public RpcResult<TransactionStatus> get(long timeout, TimeUnit unit) throws InterruptedException,
ExecutionException, TimeoutException {
- return null;
+ return result;
+ }
+
+ public static class Builder {
+
+ private boolean cancel;
+ private boolean isCancelled;
+ private boolean isDone;
+ private RpcResult<TransactionStatus> result;
+
+ public Builder cancel(boolean cancel) {
+ this.cancel = cancel;
+ return this;
+ }
+
+ public Builder isCancelled(boolean isCancelled) {
+ this.isCancelled = isCancelled;
+ return this;
+ }
+
+ public Builder isDone(boolean isDone) {
+ this.isDone = isDone;
+ return this;
+ }
+
+ public Builder rpcResult(RpcResult<TransactionStatus> result) {
+ this.result = result;
+ return this;
+ }
+
+ public Future<RpcResult<TransactionStatus>> build() {
+ return new DummyFuture(this);
+ }
}
}
\ No newline at end of file
--- /dev/null
+package org.opendaylight.controller.sal.restconf.impl.test;
+
+import java.util.Collection;
+
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+public class DummyRpcResult implements RpcResult<TransactionStatus> {
+
+ private final boolean isSuccessful;
+ private final TransactionStatus result;
+ private final Collection<RpcError> errors;
+
+ public DummyRpcResult() {
+ isSuccessful = false;
+ result = null;
+ errors = null;
+ }
+
+ private DummyRpcResult(Builder builder) {
+ isSuccessful = builder.isSuccessful;
+ result = builder.result;
+ errors = builder.errors;
+ }
+
+ public static Builder builder() {
+ return new DummyRpcResult.Builder();
+ }
+
+ @Override
+ public boolean isSuccessful() {
+ return isSuccessful;
+ }
+
+ @Override
+ public TransactionStatus getResult() {
+ return result;
+ }
+
+ @Override
+ public Collection<RpcError> getErrors() {
+ return errors;
+ }
+
+ public static class Builder {
+ private boolean isSuccessful;
+ private TransactionStatus result;
+ private Collection<RpcError> errors;
+
+ public Builder isSuccessful(boolean isSuccessful) {
+ this.isSuccessful = isSuccessful;
+ return this;
+ }
+
+ public Builder result(TransactionStatus result) {
+ this.result = result;
+ return this;
+ }
+
+ public Builder errors(Collection<RpcError> errors) {
+ this.errors = errors;
+ return this;
+ }
+
+ public RpcResult<TransactionStatus> build() {
+ return new DummyRpcResult(this);
+ }
+
+ }
+
+}
import com.google.gson.JsonSyntaxException;
-public class FromJsonToCompositeNode {
+public class FromJsonToCompositeNodeTest {
- private static Logger LOG = LoggerFactory.getLogger(FromJsonToCompositeNode.class);
+ private static final Logger LOG = LoggerFactory.getLogger(FromJsonToCompositeNodeTest.class);
@Test
public void simpleListTest() {
simpleTest("/json-to-composite-node/simple-list.json", "/json-to-composite-node/simple-list-yang", "lst",
- "simple:data:types");
+ "simple:list:yang1", "simple-list-yang1");
}
@Test
public void simpleContainerTest() {
simpleTest("/json-to-composite-node/simple-container.json", "/json-to-composite-node/simple-container-yang",
- "cont", "simple:data:types");
+ "cont", "simple:container:yang", "simple-container-yang");
+ }
+
+ /**
+ * test if for every leaf list item is simple node instance created
+ */
+ @Test
+ public void multipleItemsInLeafList() {
+ CompositeNode compositeNode = compositeContainerFromJson(
+ "/json-to-composite-node/multiple-leaflist-items.json", true);
+ assertNotNull(compositeNode);
+ assertEquals(3, compositeNode.getChildren().size());
+
+ boolean lflst1_1 = false;
+ boolean lflst1_2 = false;
+ boolean lflst1_3 = false;
+
+ for (Node<?> node : compositeNode.getChildren()) {
+ assertEquals("lflst1", node.getNodeType().getLocalName());
+ assertTrue(node instanceof SimpleNode<?>);
+ SimpleNode<?> simpleNode = (SimpleNode<?>) node;
+ if (simpleNode.getValue().equals("45")) {
+ lflst1_1 = true;
+ } else if (simpleNode.getValue().equals("55")) {
+ lflst1_2 = true;
+ } else if (simpleNode.getValue().equals("66")) {
+ lflst1_3 = true;
+ }
+ }
+
+ assertTrue(lflst1_1);
+ assertTrue(lflst1_2);
+ assertTrue(lflst1_3);
+
}
/**
verityMultipleItemsInList(compositeNode);
}
+ @Test
+ public void nullArrayToSimpleNodeWithNullValueTest() {
+ CompositeNode compositeNode = compositeContainerFromJson("/json-to-composite-node/array-with-null.json", true);
+ assertNotNull(compositeNode);
+ assertEquals("cont", compositeNode.getNodeType().getLocalName());
+
+ assertNotNull(compositeNode.getChildren());
+ assertEquals(1, compositeNode.getChildren().size());
+ Node<?> lfNode = compositeNode.getChildren().iterator().next();
+
+ assertTrue(lfNode instanceof SimpleNode<?>);
+ assertEquals(null, ((SimpleNode<?>) lfNode).getValue());
+
+ }
+
@Test
public void incorrectTopLevelElementsTest() {
Throwable cause1 = null;
}
- private void simpleTest(String jsonPath, String yangPath, String topLevelElementName, String namespace) {
+ /**
+ * Tests whether namespace <b>stay unchanged</b> if concrete values are
+ * present in composite or simple node and if the method for update is
+ * called.
+ *
+ */
+ @Test
+ public void notSupplyNamespaceIfAlreadySupplied() {
+
+ CompositeNode compositeNode = compositeContainerFromJson("/json-to-composite-node/simple-list.json");
+ assertNotNull(compositeNode);
+
+ DataSchemaNode dataSchemaNode1 = null;
+ DataSchemaNode dataSchemaNode2 = null;
+ try {
+ dataSchemaNode1 = TestUtils.obtainSchemaFromYang("/json-to-composite-node/simple-list-yang",
+ "simple-list-yang1");
+ dataSchemaNode2 = TestUtils.obtainSchemaFromYang("/json-to-composite-node/simple-list-yang",
+ "simple-list-yang2");
+ } catch (FileNotFoundException e) {
+ LOG.error(e.getMessage());
+ assertTrue(false);
+ }
+ assertNotNull(dataSchemaNode1);
+ assertNotNull(dataSchemaNode2);
+
+ // supplement namespaces according to first data schema -
+ // "simple:data:types1"
+ TestUtils.supplementNamespace(dataSchemaNode1, compositeNode);
+
+ assertTrue(compositeNode instanceof CompositeNodeWrapper);
+ CompositeNode compNode = ((CompositeNodeWrapper) compositeNode).unwrap(null);
+
+ assertEquals("lst", compNode.getNodeType().getLocalName());
+ verifyCompositeNode(compNode, "simple:list:yang1");
+
+ // dataSchemaNode2 should't be taken into account, because compNode
+ // isn't CompositeNodeWrapper
+ TestUtils.supplementNamespace(dataSchemaNode2, compNode);
+ verifyCompositeNode(compNode, "simple:list:yang1");
+
+ }
+
+ private void simpleTest(String jsonPath, String yangPath, String topLevelElementName, String namespace,
+ String moduleName) {
CompositeNode compositeNode = compositeContainerFromJson(jsonPath);
assertNotNull(compositeNode);
DataSchemaNode dataSchemaNode = null;
try {
- dataSchemaNode = TestUtils.obtainSchemaFromYang(yangPath);
+ dataSchemaNode = TestUtils.obtainSchemaFromYang(yangPath, moduleName);
} catch (FileNotFoundException e) {
LOG.error(e.getMessage());
assertTrue(false);
throws WebApplicationException {
JsonToCompositeNodeProvider jsonToCompositeNodeProvider = JsonToCompositeNodeProvider.INSTANCE;
- InputStream jsonStream = FromJsonToCompositeNode.class.getResourceAsStream(jsonPath);
+ InputStream jsonStream = FromJsonToCompositeNodeTest.class.getResourceAsStream(jsonPath);
try {
CompositeNode compositeNode = jsonToCompositeNodeProvider
.readFrom(null, null, null, null, null, jsonStream);
import org.junit.*;
import org.opendaylight.controller.sal.rest.impl.XmlToCompositeNodeProvider;
-import org.opendaylight.controller.sal.restconf.impl.CompositeNodeWrapper;
+import org.opendaylight.controller.sal.restconf.impl.*;
import org.opendaylight.yangtools.yang.data.api.*;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
import org.slf4j.*;
-public class FromXmlToCompositeNode {
- private static Logger LOG = LoggerFactory.getLogger(FromXmlToCompositeNode.class);
+public class FromXmlToCompositeNodeTest {
+ private static final Logger LOG = LoggerFactory.getLogger(FromXmlToCompositeNodeTest.class);
/**
* top level element represents container. second level element is list with
String nameSpace = "data:container:yang";
assertEquals(nameSpace, compNode.getNodeType().getNamespace().toString());
+ verifyNullAndEmptyStringSingleNode(compNode, nameSpace);
verifyCommonPartAOfXml(compNode, "", nameSpace);
}
+ private void verifyNullAndEmptyStringSingleNode(CompositeNode compNode, String nameSpace) {
+ assertEquals("cont", compNode.getNodeType().getLocalName());
+
+ SimpleNode<?> lf2 = null;
+ SimpleNode<?> lf3 = null;
+ int found = 0;
+ for (Node<?> child : compNode.getChildren()) {
+ if (found == 0x3)
+ break;
+ if (child instanceof SimpleNode<?>) {
+ SimpleNode<?> childSimple = (SimpleNode<?>) child;
+ if (childSimple.getNodeType().getLocalName().equals("lf3")) {
+ lf3 = childSimple;
+ found = found | (1 << 0);
+ } else if (childSimple.getNodeType().getLocalName().equals("lf2")) {
+ lf2 = childSimple;
+ found = found | (1 << 1);
+ }
+ }
+ assertEquals(nameSpace, child.getNodeType().getNamespace().toString());
+ }
+
+ assertEquals("", lf2.getValue());
+ assertEquals(null, lf3.getValue());
+ }
+
@Test
public void testXmlDataList() {
CompositeNode compNode = compositeContainerFromXml("/xml-to-composite-node/data-list.xml", false);
private CompositeNode compositeContainerFromXml(String xmlPath, boolean dummyNamespaces) {
XmlToCompositeNodeProvider xmlToCompositeNodeProvider = XmlToCompositeNodeProvider.INSTANCE;
try {
- InputStream xmlStream = FromXmlToCompositeNode.class.getResourceAsStream(xmlPath);
+ InputStream xmlStream = FromXmlToCompositeNodeTest.class.getResourceAsStream(xmlPath);
CompositeNode compositeNode = xmlToCompositeNodeProvider.readFrom(null, null, null, null, null, xmlStream);
if (dummyNamespaces) {
try {
import java.net.*;
import java.sql.Date;
import java.util.*;
+import java.util.concurrent.Future;
import javax.ws.rs.WebApplicationException;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
import javax.xml.stream.XMLStreamException;
import javax.xml.transform.*;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.sal.rest.impl.StructuredDataToJsonProvider;
import org.opendaylight.controller.sal.restconf.impl.*;
import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.*;
import org.opendaylight.yangtools.yang.data.impl.XmlTreeBuilder;
import org.opendaylight.yangtools.yang.model.api.*;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
import org.slf4j.*;
import org.w3c.dom.Document;
+import org.xml.sax.SAXException;
+
+import com.google.common.base.Preconditions;
final class TestUtils {
}
return (CompositeNode) dataTree;
}
+
+ public static Document loadDocumentFrom(InputStream inputStream) {
+ try {
+ DocumentBuilderFactory dbfac = DocumentBuilderFactory.newInstance();
+ DocumentBuilder docBuilder = dbfac.newDocumentBuilder();
+ return docBuilder.parse(inputStream);
+ } catch (SAXException | IOException | ParserConfigurationException e) {
+ logger.error("Error during loading Document from XML", e);
+ return null;
+ }
+ }
public static String getDocumentInPrintableForm(Document doc) {
+ Preconditions.checkNotNull(doc);
try {
ByteArrayOutputStream out = new ByteArrayOutputStream();
TransformerFactory tf = TransformerFactory.newInstance();
ControllerContext controllerContext = mock(ControllerContext.class);
BrokerFacade broker = mock(BrokerFacade.class);
+ RpcResult<TransactionStatus> rpcResult = DummyRpcResult.builder().result(TransactionStatus.COMMITED).build();
+ Future<RpcResult<TransactionStatus>> future = DummyFuture.builder().rpcResult(rpcResult).build();
when(controllerContext.toInstanceIdentifier(any(String.class))).thenReturn(instIdAndSchema);
- when(broker.commitConfigurationDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(
- new DummyFuture());
+ when(broker.commitConfigurationDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(future);
restconf.setControllerContext(controllerContext);
restconf.setBroker(broker);
if (modules.size() < 1) {
return null;
}
-
- Module moduleRes = null;
+
+ Module moduleRes = null;
if (modules.size() > 1) {
if (moduleName == null) {
return null;
} else {
- for (Module module: modules) {
+ for (Module module : modules) {
if (module.getName().equals(moduleName)) {
- moduleRes = module;
+ moduleRes = module;
}
}
if (moduleRes == null) {
} else {
moduleRes = modules.iterator().next();
}
-
+
if (moduleRes.getChildNodes() == null) {
return null;
}
@Test
public void simpleYangDataTest() {
String jsonOutput;
- // jsonOutput =
- // TestUtils.readJsonFromFile("/yang-to-json-conversion/simple-yang-types/xml/awaited_output.json",
- // false);
-
jsonOutput = TestUtils.convertCompositeNodeDataAndYangToJson(
TestUtils.loadCompositeNode("/yang-to-json-conversion/simple-data-types/xml/data.xml"),
"/yang-to-json-conversion/simple-data-types", "/yang-to-json-conversion/simple-data-types/xml");
// boolean lfref1Checked = false;
boolean lfemptyChecked = false;
boolean lfstr1Checked = false;
-
+
while (jReader.hasNext()) {
String keyName = jReader.nextName();
JsonToken peek = null;
jReader.nextNull();
jReader.endArray();
lfemptyChecked = true;
- // TODO: test will be implemented when functionality will be
- // implemented
- } else if (keyName.equals("lflstunion")) {
- jReader.skipValue();
+ } else if (keyName.startsWith("lfunion")) {
+ checkLfUnion(jReader, keyName, peek);
} else {
assertTrue("Key " + keyName + " doesn't exists in yang file.", false);
}
jReader.endObject();
}
+
+ private void checkLfUnion(JsonReader jReader, String keyName, JsonToken peek) throws IOException {
+ if (keyName.equals("lfunion1")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.NUMBER, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion2")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.NUMBER, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion3")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextInt();
+ } else if (keyName.equals("lfunion4")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.BOOLEAN, peek);
+ jReader.nextBoolean();
+ } else if (keyName.equals("lfunion5")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion6")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion7")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion8")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion9")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion10")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.STRING, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion11")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.NUMBER, peek);
+ jReader.nextString();
+ } else if (keyName.equals("lfunion12")) {
+ assertEquals("Key " + keyName + " has incorrect type", JsonToken.BOOLEAN, peek);
+ jReader.nextBoolean();
+ }
+ }
}
package org.opendaylight.controller.sal.restconf.impl.test;
-import static org.mockito.Mockito.*;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
import java.io.FileNotFoundException;
-import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLEncoder;
-import java.util.Collection;
import java.util.List;
import java.util.Set;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
import java.util.logging.LogRecord;
import javax.ws.rs.core.Application;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-import org.glassfish.jersey.client.ClientConfig;
import org.glassfish.jersey.server.ResourceConfig;
import org.glassfish.jersey.test.JerseyTest;
import org.glassfish.jersey.test.TestProperties;
import org.opendaylight.controller.sal.restconf.impl.ControllerContext;
import org.opendaylight.controller.sal.restconf.impl.MediaTypes;
import org.opendaylight.controller.sal.restconf.impl.RestconfImpl;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.data.api.InstanceIdentifier;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.w3c.dom.Document;
-import org.xml.sax.SAXException;
import com.google.common.base.Charsets;
private static ControllerContext controllerContext;
private static BrokerFacade brokerFacade;
private static RestconfImpl restconfImpl;
+ private static final MediaType MEDIA_TYPE = new MediaType("application", "vnd.yang.api+xml");
@BeforeClass
- public static void init() {
- Set<Module> allModules = null;
- try {
- allModules = TestUtils.loadModules(RestconfImplTest.class.getResource("/full-versions/yangs").getPath());
- } catch (FileNotFoundException e) {
- e.printStackTrace();
- }
+ public static void init() throws FileNotFoundException {
+ Set<Module> allModules = TestUtils.loadModules(RestconfImplTest.class.getResource("/full-versions/yangs").getPath());
SchemaContext schemaContext = TestUtils.loadSchemaContext(allModules);
controllerContext = ControllerContext.getInstance();
controllerContext.setSchemas(schemaContext);
}
@Test
- public void testStructuredDataToXmlProvider() throws FileNotFoundException {
- URI uri = null;
- try {
- uri = new URI("/datastore/" + URLEncoder.encode("ietf-interfaces:interfaces/interface/eth0", Charsets.US_ASCII.name()).toString());
- } catch (UnsupportedEncodingException | URISyntaxException e) {
- e.printStackTrace();
- }
+ public void testStructuredDataToXmlProvider() throws FileNotFoundException, UnsupportedEncodingException {
+ String uri = createUri("/datastore/", "ietf-interfaces:interfaces/interface/eth0");
InputStream xmlStream = RestconfImplTest.class.getResourceAsStream("/parts/ietf-interfaces_interfaces.xml");
CompositeNode loadedCompositeNode = TestUtils.loadCompositeNode(xmlStream);
when(brokerFacade.readOperationalData(any(InstanceIdentifier.class))).thenReturn(loadedCompositeNode);
- Response response = target(uri.toASCIIString()).request(MediaTypes.API+RestconfService.XML).get();
+ Response response = target(uri).request(MEDIA_TYPE).get();
assertEquals(200, response.getStatus());
}
@Test
- public void testXmlToCompositeNodeProvider() throws ParserConfigurationException, SAXException, IOException {
- URI uri = null;
- try {
- uri = new URI("/config/" + URLEncoder.encode("ietf-interfaces:interfaces/interface/eth0", Charsets.US_ASCII.name()).toString());
- } catch (UnsupportedEncodingException | URISyntaxException e) {
- e.printStackTrace();
- }
- InputStream xmlStream = RestconfImplTest.class.getResourceAsStream("/parts/ietf-interfaces_interfaces.xml");
- final CompositeNode loadedCompositeNode = TestUtils.loadCompositeNode(xmlStream);
- when(brokerFacade.commitConfigurationDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(new Future<RpcResult<TransactionStatus>>() {
- @Override
- public boolean cancel(boolean mayInterruptIfRunning) {
- return false;
- }
- @Override
- public boolean isCancelled() {
- return false;
- }
- @Override
- public boolean isDone() {
- return false;
- }
- @Override
- public RpcResult<TransactionStatus> get() throws InterruptedException, ExecutionException {
- return null;
- }
- @Override
- public RpcResult<TransactionStatus> get(long timeout, TimeUnit unit) throws InterruptedException,
- ExecutionException, TimeoutException {
- return null;
- }
- });
+ public void testBadFormatXmlToCompositeNodeProvider() throws UnsupportedEncodingException, URISyntaxException {
+ String uri = createUri("/operations/", "ietf-interfaces:interfaces/interface/eth0");
- DocumentBuilderFactory dbfac = DocumentBuilderFactory.newInstance();
- DocumentBuilder docBuilder = dbfac.newDocumentBuilder();
- xmlStream = RestconfImplTest.class.getResourceAsStream("/parts/ietf-interfaces_interfaces.xml");
- Document doc = docBuilder.parse(xmlStream);
+ Response response = target(uri).request(MediaTypes.API + RestconfService.XML).post(
+ Entity.entity("<SimpleNode/>", MEDIA_TYPE));
+ assertEquals(400, response.getStatus());
- Response response = target(uri.toASCIIString()).request(MediaTypes.API+RestconfService.XML).post(Entity.entity(TestUtils.getDocumentInPrintableForm(doc), new MediaType("application","vnd.yang.api+xml")));
- assertEquals(204, response.getStatus());
+ response = target(uri).request(MediaTypes.API + RestconfService.XML).post(
+ Entity.entity("<SimpleNode>", MEDIA_TYPE));
+ assertEquals(400, response.getStatus());
}
@Test
- public void testXmlToCompositeNodeProviderExceptions() {
- URI uri = null;
- try {
- uri = new URI("/operations/" + URLEncoder.encode("ietf-interfaces:interfaces/interface/eth0", Charsets.US_ASCII.name()).toString());
- } catch (UnsupportedEncodingException | URISyntaxException e) {
- e.printStackTrace();
- }
+ public void testXmlToCompositeNode404NotFound() throws UnsupportedEncodingException, URISyntaxException {
+ String uri = createUri("/datastore/", "ietf-interfaces:interfaces/interface/eth0");
- Response response = target(uri.toASCIIString()).request(MediaTypes.API + RestconfService.XML).post(
- Entity.entity("<SimpleNode/>", new MediaType("application", "vnd.yang.api+xml")));
- assertEquals(400, response.getStatus());
+ when(brokerFacade.readOperationalData(any(InstanceIdentifier.class))).thenReturn(null);
- response = target(uri.toASCIIString()).request(MediaTypes.API + RestconfService.XML).post(
- Entity.entity("<SimpleNode>", new MediaType("application", "vnd.yang.api+xml")));
- assertEquals(400, response.getStatus());
+ Response response = target(uri).request(MediaTypes.API+RestconfService.XML).get();
+ assertEquals(404, response.getStatus());
}
@Test
- public void testXmlToCompositeNode404NotFound() {
- URI uri = null;
- try {
- uri = new URI("/datastore/" + URLEncoder.encode("ietf-interfaces:interfaces/interface/eth0", Charsets.US_ASCII.name()).toString());
- } catch (UnsupportedEncodingException | URISyntaxException e) {
- e.printStackTrace();
- }
+ public void testXmlToCompositeNode400() throws UnsupportedEncodingException, URISyntaxException {
+ String uri = createUri("/datastore/", "simple-nodes:user/name");
when(brokerFacade.readOperationalData(any(InstanceIdentifier.class))).thenReturn(null);
- Response response = target(uri.toASCIIString()).request(MediaTypes.API+RestconfService.XML).get();
- assertEquals(404, response.getStatus());
+ Response response = target(uri).request(MediaTypes.API+RestconfService.XML).get();
+ assertEquals(400, response.getStatus());
+ }
+
+ @Test
+ public void testRpcResultCommitedToStatusCodes() throws UnsupportedEncodingException {
+ InputStream xmlStream = RestconfImplTest.class.getResourceAsStream("/parts/ietf-interfaces_interfaces.xml");
+ String xml = TestUtils.getDocumentInPrintableForm(TestUtils.loadDocumentFrom(xmlStream));
+ Entity<String> entity = Entity.entity(xml, MEDIA_TYPE);
+ RpcResult<TransactionStatus> rpcResult = DummyRpcResult.builder().result(TransactionStatus.COMMITED).build();
+ Future<RpcResult<TransactionStatus>> dummyFuture = DummyFuture.builder().rpcResult(rpcResult).build();
+ when(brokerFacade.commitOperationalDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(dummyFuture);
+ when(brokerFacade.commitConfigurationDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(dummyFuture);
+
+ String uri = createUri("/config/", "ietf-interfaces:interfaces/interface/eth0");
+ Response response = target(uri).request(MEDIA_TYPE).put(entity);
+ assertEquals(200, response.getStatus());
+ response = target(uri).request(MEDIA_TYPE).post(entity);
+ assertEquals(204, response.getStatus());
+
+ uri = createUri("/operational/", "ietf-interfaces:interfaces/interface/eth0");
+ response = target(uri).request(MEDIA_TYPE).put(entity);
+ assertEquals(200, response.getStatus());
+ response = target(uri).request(MEDIA_TYPE).post(entity);
+ assertEquals(204, response.getStatus());
+
+ uri = createUri("/datastore/", "ietf-interfaces:interfaces/interface/eth0");
+ response = target(uri).request(MEDIA_TYPE).put(entity);
+ assertEquals(200, response.getStatus());
+ response = target(uri).request(MEDIA_TYPE).post(entity);
+ assertEquals(204, response.getStatus());
+ }
+
+ @Test
+ public void testRpcResultOtherToStatusCodes() throws UnsupportedEncodingException {
+ InputStream xmlStream = RestconfImplTest.class.getResourceAsStream("/parts/ietf-interfaces_interfaces.xml");
+ String xml = TestUtils.getDocumentInPrintableForm(TestUtils.loadDocumentFrom(xmlStream));
+ Entity<String> entity = Entity.entity(xml, MEDIA_TYPE);
+ RpcResult<TransactionStatus> rpcResult = DummyRpcResult.builder().result(TransactionStatus.FAILED).build();
+ Future<RpcResult<TransactionStatus>> dummyFuture = DummyFuture.builder().rpcResult(rpcResult).build();
+ when(brokerFacade.commitOperationalDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(dummyFuture);
+ when(brokerFacade.commitConfigurationDataPut(any(InstanceIdentifier.class), any(CompositeNode.class))).thenReturn(dummyFuture);
+
+ String uri = createUri("/config/", "ietf-interfaces:interfaces/interface/eth0");
+ Response response = target(uri).request(MEDIA_TYPE).put(entity);
+ assertEquals(500, response.getStatus());
+ response = target(uri).request(MEDIA_TYPE).post(entity);
+ assertEquals(500, response.getStatus());
+
+ uri = createUri("/operational/", "ietf-interfaces:interfaces/interface/eth0");
+ response = target(uri).request(MEDIA_TYPE).put(entity);
+ assertEquals(500, response.getStatus());
+ response = target(uri).request(MEDIA_TYPE).post(entity);
+ assertEquals(500, response.getStatus());
+
+ uri = createUri("/datastore/", "ietf-interfaces:interfaces/interface/eth0");
+ response = target(uri).request(MEDIA_TYPE).put(entity);
+ assertEquals(500, response.getStatus());
+ response = target(uri).request(MEDIA_TYPE).post(entity);
+ assertEquals(500, response.getStatus());
+ }
+
+ private String createUri(String prefix, String encodedPart) throws UnsupportedEncodingException {
+ return URI.create(prefix + URLEncoder.encode(encodedPart, Charsets.US_ASCII.name()).toString()).toASCIIString();
}
@Override
--- /dev/null
+{
+ "cont": {
+ "lf":[null]
+ }
+}
\ No newline at end of file
--- /dev/null
+{
+ "cont": {
+ "lflst1":[45,55,66]
+ }
+}
\ No newline at end of file
-module simple-data-types {
- namespace "simple:data:types";
+module simple-container-yang {
+ namespace "simple:container:yang";
prefix "smpdtp";
revision 2013-11-12 {
-module simple-data-types {
- namespace "simple:data:types";
+module simple-list-yang1 {
+ namespace "simple:list:yang1";
- prefix "smpdtp";
+ prefix "smplstyg";
revision 2013-11-12 {
}
--- /dev/null
+module simple-list-yang2 {
+ namespace "simple:list:yang2";
+
+ prefix "smplstyg";
+ revision 2013-11-12 {
+ }
+
+ list lst {
+ container cont1 {
+ }
+ list lst1 {
+ }
+ leaf-list lflst1 {
+ type string;
+ }
+ leaf lf1 {
+ type string;
+ }
+ }
+}
\ No newline at end of file
namespace "data:container:yang";
prefix "dtconyg";
- revision 2013-11-19 {
+ revision 2013-11-19 {
}
container cont {
leaf lf1 {
type string;
}
+
+ leaf lf2 {
+ type string;
+ }
+
+ leaf lf3 {
+ type empty;
+ }
+
leaf-list lflst1 {
type string;
}
<cont>
<lf1>str0</lf1>
+ <lf2></lf2>
+ <lf3/>
<lflst1>121</lflst1>
<lflst1>131</lflst1>
<lflst1>str1</lflst1>
revision 2013-11-12 {
}
+ typedef tpdfempty {
+ type empty;
+ }
+
+ typedef tpdfbit {
+ type bits {
+ bit b1;
+ bit b2;
+ bit b3;
+ }
+ }
+
+ typedef tpdfun4 {
+ type boolean;
+ }
+
+ typedef tpdfun3 {
+ type union {
+ type tpdfbit;
+ type tpdfempty;
+ }
+ }
+
+ typedef tpdfun2 {
+ type union {
+ type tpdfun3;
+ type tpdfun4;
+ }
+ }
+
+ typedef tpdfun1 {
+ type union {
+ type uint8;
+ type decimal64 {
+ fraction-digits 2;
+ }
+ }
+ }
+
container cont {
leaf lfnint8Min {
type int8;
type empty;
}
- leaf-list lflstunion {
+ leaf lfunion1 {
type union {
type uint16;
type string;
}
}
+ leaf lfunion2 {
+ type union {
+ type decimal64 {
+ fraction-digits 2;
+ }
+ type string;
+ }
+ }
+
+ leaf lfunion3 {
+ type union {
+ type empty;
+ type string;
+ }
+ }
+
+ leaf lfunion4 {
+ type union {
+ type boolean;
+ type string;
+ }
+ }
+
+ leaf lfunion5 {
+ type union {
+ type uint16;
+ type string;
+ }
+ }
+
+ leaf lfunion6 {
+ type union {
+ type uint16;
+ type empty;
+ }
+ }
+
+ leaf lfunion7 {
+ type tpdfun3;
+ }
+
+ leaf lfunion8 {
+ type union {
+ type uint16;
+ type string;
+ }
+ }
+
+ leaf lfunion9 {
+ type union {
+ type uint16;
+ type boolean;
+ }
+ }
+
+ leaf lfunion10 {
+ type union {
+ type bits {
+ bit bt1;
+ bit bt2;
+ }
+ type boolean;
+ }
+ }
+
+ leaf lfunion11 {
+ type union {
+ type tpdfun1;
+ type tpdfun2;
+ }
+ }
+
+ leaf lfunion12 {
+ type tpdfun2;
+ }
}
<lfbits>bit3</lfbits>
<lfbinary>AAaacdabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ%%-#^</lfbinary>
<lfempty></lfempty>
- <lflstunion>324</lflstunion>
- <lflstunion>33.3</lflstunion>
- <lflstunion>lfunion</lflstunion>
- <lflstunion>true</lflstunion>
+ <lfunion1>324</lfunion1>
+ <lfunion2>33.3</lfunion2>
+ <lfunion3>55</lfunion3>
+ <lfunion4>true</lfunion4>
+ <lfunion5>true</lfunion5>
+ <lfunion6>false</lfunion6>
+ <lfunion7></lfunion7>
+ <lfunion8></lfunion8>
+ <lfunion9></lfunion9>
+ <lfunion10>bt1</lfunion10>
+ <lfunion11>33</lfunion11>
+ <lfunion12>false</lfunion12>
</cont>
\ No newline at end of file
<activeByDefault>false</activeByDefault>
</activation>
<modules>
- <module>toaster-it</module>
+ <!--module>toaster-it</module -->
</modules>
</profile>
</profiles>
GroupFeaturesBuilder groupFeatures = new GroupFeaturesBuilder();
groupFeatures.setActions(notification.getActions());
groupFeatures.setCapabilities(notification.getCapabilities());
- groupFeatures.setGroupType(notification.getGroupType());
+ groupFeatures.setTypes(notification.getTypes());
groupFeatures.setMaxGroups(notification.getMaxGroups());
cache.get(notification.getId()).setGroupFeatures(groupFeatures.build());
*
* @param routeId route identifier
* @param route network address
+ * @throws RoutingTableException for any logical exception
+ * @throws SystemException
*/
- public void addRoute(I routeId, R route) throws SystemException, RoutingTableException;
+ public void addRoute(I routeId, R route) throws RoutingTableException,SystemException;
- /**
+ /**
* Adds a network address for the route. If the route already exists,
* it throws <code>DuplicateRouteException</code>.
* This method would be used when registering a global service.
* @param routeId route identifier
* @param route network address
* @throws DuplicateRouteException
+ * @throws RoutingTableException
*/
public void addGlobalRoute(I routeId, R route) throws RoutingTableException, SystemException;
* Remove the route.
* This method would be used when registering a global service.
* @param routeId
+ * @throws RoutingTableException
+ * @throws SystemException
*/
- public void removeGlobalRoute(I routeId);
+ public void removeGlobalRoute(I routeId) throws RoutingTableException, SystemException;
/**
* Returns a set of network addresses associated with this route
*/
public R getARoute(I routeId);
+ /**
+ *
+ * This will be removed after listeners
+ * have made change on their end to use whiteboard pattern
+ * @deprecated
+ */
+
public void registerRouteChangeListener(RouteChangeListener listener);
public class DuplicateRouteException extends RoutingTableException {
import org.apache.felix.dm.Component;
import org.opendaylight.controller.clustering.services.ICacheUpdateAware;
import org.opendaylight.controller.clustering.services.IClusterGlobalServices;
+import org.opendaylight.controller.sal.connector.remoterpc.api.RouteChangeListener;
import org.opendaylight.controller.sal.connector.remoterpc.api.RoutingTable;
import org.opendaylight.controller.sal.core.ComponentActivatorAbstractBase;
import org.slf4j.Logger;
c.setInterface(new String[] { RoutingTable.class.getName(),ICacheUpdateAware.class.getName() }, props);
logger.debug("configureGlobalInstance adding dependency:", IClusterGlobalServices.class);
+
+ // RouteChangeListener services will be none or many so the
+ // dependency is optional
+ c.add(createServiceDependency()
+ .setService(RouteChangeListener.class)
+ .setCallbacks("setRouteChangeListener", "unsetRouteChangeListener")
+ .setRequired(false));
+
+ //dependency is required as it provides us the caching support
c.add(createServiceDependency().setService(
IClusterGlobalServices.class).setCallbacks(
"setClusterGlobalServices",
import com.google.common.base.Preconditions;
import org.apache.felix.dm.Component;
-import org.opendaylight.controller.clustering.services.*;
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.CacheListenerAddException;
+import org.opendaylight.controller.clustering.services.ICacheUpdateAware;
+import org.opendaylight.controller.clustering.services.IClusterGlobalServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
import org.opendaylight.controller.sal.connector.remoterpc.api.RouteChangeListener;
import org.opendaylight.controller.sal.connector.remoterpc.api.RoutingTable;
import org.opendaylight.controller.sal.connector.remoterpc.api.RoutingTableException;
import org.opendaylight.controller.sal.connector.remoterpc.api.SystemException;
-import org.osgi.framework.ServiceRegistration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.transaction.HeuristicRollbackException;
import javax.transaction.NotSupportedException;
import javax.transaction.RollbackException;
-import java.util.*;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Set;
import java.util.concurrent.ConcurrentMap;
/**
* @author: syedbahm
*/
-public class RoutingTableImpl<I, R> implements RoutingTable<I, R>,ICacheUpdateAware<I,R> {
+public class RoutingTableImpl<I, R> implements RoutingTable<I, R>, ICacheUpdateAware<I, R> {
public static final String ROUTING_TABLE_GLOBAL_CACHE = "routing_table_global_cache";
- private Logger log = LoggerFactory
- .getLogger(RoutingTableImpl.class);
-
- private IClusterGlobalServices clusterGlobalServices = null;
- private RoutingTableImpl routingTableInstance = null;
- private ConcurrentMap routingTableCache = null;
- private List<RouteChangeListener> routeChangeListeners = new ArrayList<RouteChangeListener>();
- private ServiceRegistration cacheAwareRegistration = null;
-
- public RoutingTableImpl() {
- }
-
- @Override
- public void addRoute(I routeId, R route) throws RoutingTableException {
- throw new UnsupportedOperationException(" Not implemented yet!");
- }
-
- @Override
- public void addGlobalRoute(I routeId, R route) throws RoutingTableException, SystemException {
- Preconditions.checkNotNull(routeId, "addGlobalRoute: routeId cannot be null!");
- Preconditions.checkNotNull(route, "addGlobalRoute: route cannot be null!");
- try {
-
- Set<R> existingRoute = null;
- // ok does the global route is already registered ?
- if ((existingRoute = getRoutes(routeId)) == null) {
-
- if(log.isDebugEnabled()){
- log.debug("addGlobalRoute: adding a new route with id"+ routeId + " and value = "+route);
- }
- // lets start a transaction
- clusterGlobalServices.tbegin();
- Set<R> routes = new HashSet<R>();
- routes.add(route);
- routingTableCache.put(routeId, routes);
- clusterGlobalServices.tcommit();
- } else {
- throw new DuplicateRouteException(" There is already existing route " + existingRoute);
- }
-
- } catch (NotSupportedException e) {
- throw new RoutingTableException("Transaction error - while trying to create route id=" + routeId + "with route" + route, e);
- } catch (HeuristicRollbackException e) {
- throw new RoutingTableException("Transaction error - while trying to create route id=" + routeId + "with route" + route, e);
- } catch (RollbackException e) {
- throw new RoutingTableException("Transaction error - while trying to create route id=" + routeId + "with route" + route, e);
- } catch (HeuristicMixedException e) {
- throw new RoutingTableException("Transaction error - while trying to create route id=" + routeId + "with route" + route, e);
- } catch (javax.transaction.SystemException e){
- throw new SystemException ( "System error occurred - while trying to create with value",e);
+ private Logger log = LoggerFactory.getLogger(RoutingTableImpl.class);
+
+ private IClusterGlobalServices clusterGlobalServices = null;
+ private RoutingTableImpl routingTableInstance = null;
+ private ConcurrentMap routingTableCache = null;
+ private Set<RouteChangeListener> routeChangeListeners = Collections
+ .synchronizedSet(new HashSet<RouteChangeListener>());
+
+ public RoutingTableImpl() {
}
- }
+ @Override
+ public void addRoute(I routeId, R route) throws RoutingTableException {
+ throw new UnsupportedOperationException(" Not implemented yet!");
+ }
- @Override
- public void removeRoute(I routeId, R route) {
- throw new UnsupportedOperationException("Not implemented yet!");
- }
@Override
- public void removeGlobalRoute(I routeId) {
- routingTableCache.remove(routeId);
+ public void addGlobalRoute(I routeId, R route) throws RoutingTableException, SystemException {
+ Preconditions.checkNotNull(routeId, "addGlobalRoute: routeId cannot be null!");
+ Preconditions.checkNotNull(route, "addGlobalRoute: route cannot be null!");
+ try {
+
+ Set<R> existingRoute = null;
+ // ok does the global route is already registered ?
+ if ((existingRoute = getRoutes(routeId)) == null) {
+
+ if (log.isDebugEnabled()) {
+ log.debug("addGlobalRoute: adding a new route with id" + routeId + " and value = "
+ + route);
+ }
+ // lets start a transaction
+ clusterGlobalServices.tbegin();
+ Set<R> routes = new HashSet<R>();
+ routes.add(route);
+ routingTableCache.put(routeId, routes);
+ clusterGlobalServices.tcommit();
+ } else {
+ throw new DuplicateRouteException(" There is already existing route " + existingRoute);
+ }
+
+ } catch (NotSupportedException e) {
+ throw new RoutingTableException("Transaction error - while trying to create route id="
+ + routeId + "with route" + route, e);
+ } catch (HeuristicRollbackException e) {
+ throw new RoutingTableException("Transaction error - while trying to create route id="
+ + routeId + "with route" + route, e);
+ } catch (RollbackException e) {
+ throw new RoutingTableException("Transaction error - while trying to create route id="
+ + routeId + "with route" + route, e);
+ } catch (HeuristicMixedException e) {
+ throw new RoutingTableException("Transaction error - while trying to create route id="
+ + routeId + "with route" + route, e);
+ } catch (javax.transaction.SystemException e) {
+ throw new SystemException("System error occurred - while trying to create with value", e);
+ }
+
}
- @Override
- public Set<R> getRoutes(I routeId) {
+ @Override
+ public void removeRoute(I routeId, R route) {
+ throw new UnsupportedOperationException("Not implemented yet!");
+ }
- //Note: currently works for global routes only wherein there is just single route
- Preconditions.checkNotNull(routeId, "getARoute: routeId cannot be null!");
- return (Set<R>) routingTableCache.get(routeId);
- }
+ @Override
+ public void removeGlobalRoute(I routeId) throws RoutingTableException, SystemException {
+ Preconditions.checkNotNull(routeId, "removeGlobalRoute: routeId cannot be null!");
+ try {
+ if (log.isDebugEnabled()) {
+ log.debug("removeGlobalRoute: removing a new route with id" + routeId);
+ }
+ // lets start a transaction
+ clusterGlobalServices.tbegin();
+
+ routingTableCache.remove(routeId);
+ clusterGlobalServices.tcommit();
+
+ } catch (NotSupportedException e) {
+ throw new RoutingTableException("Transaction error - while trying to remove route id="
+ + routeId, e);
+ } catch (HeuristicRollbackException e) {
+ throw new RoutingTableException("Transaction error - while trying to remove route id="
+ + routeId, e);
+ } catch (RollbackException e) {
+ throw new RoutingTableException("Transaction error - while trying to remove route id="
+ + routeId, e);
+ } catch (HeuristicMixedException e) {
+ throw new RoutingTableException("Transaction error - while trying to remove route id="
+ + routeId, e);
+ } catch (javax.transaction.SystemException e) {
+ throw new SystemException("System error occurred - while trying to remove with value", e);
+ }
+ }
- @Override
- public R getARoute(I routeId) {
- throw new UnsupportedOperationException("Not implemented yet!");
- }
+ @Override
+ public Set<R> getRoutes(I routeId) {
- /**
- * Registers listener for sending any change notification
- *
- * @param listener
- */
- @Override
- public void registerRouteChangeListener(RouteChangeListener listener) {
- routeChangeListeners.add(listener);
- }
+ // Note: currently works for global routes only wherein there is just single
+ // route
+ Preconditions.checkNotNull(routeId, "getARoute: routeId cannot be null!");
+ return (Set<R>) routingTableCache.get(routeId);
+ }
+ @Override
+ public R getARoute(I routeId) {
+ throw new UnsupportedOperationException("Not implemented yet!");
+ }
/**
- * Returning the list of route change listeners for Unit testing
- * Note: the package scope is default
- * @return List of registered RouteChangeListener<I,R> listeners
+ * @deprecated doesn't do anything will be removed once listeners used
+ * whiteboard pattern Registers listener for sending any change
+ * notification
+ * @param listener
*/
- List<RouteChangeListener> getRegisteredRouteChangeListeners(){
- return routeChangeListeners;
- }
+ @Override
+ public void registerRouteChangeListener(RouteChangeListener listener) {
- public void setClusterGlobalServices(IClusterGlobalServices clusterGlobalServices) {
- this.clusterGlobalServices = clusterGlobalServices;
- }
+ }
- public void unsetClusterGlobalServices(IClusterGlobalServices clusterGlobalServices) {
- if(cacheAwareRegistration != null) {
- cacheAwareRegistration.unregister();
+ public void setRouteChangeListener(RouteChangeListener rcl) {
+ if(rcl != null){
+ routeChangeListeners.add(rcl);
+ }else{
+ log.warn("setRouteChangeListener called with null listener");
+ }
+ }
+
+ public void unSetRouteChangeListener(RouteChangeListener rcl) {
+ if(rcl != null){
+ routeChangeListeners.remove(rcl);
+ }else{
+ log.warn("unSetRouteChangeListener called with null listener");
+ }
}
- this.clusterGlobalServices = null;
- }
/**
- * Creates the Routing Table clustered global services cache
- * @throws CacheExistException -- cluster global services exception when cache exist
- * @throws CacheConfigException -- cluster global services exception during cache config
- * @throws CacheListenerAddException -- cluster global services exception during adding of listener
+ * Returning the set of route change listeners for Unit testing Note: the
+ * package scope is default
+ *
+ * @return List of registered RouteChangeListener<I,R> listeners
*/
+ Set<RouteChangeListener> getRegisteredRouteChangeListeners() {
+ return routeChangeListeners;
+ }
- void createRoutingTableCache() throws CacheExistException, CacheConfigException, CacheListenerAddException {
- // TBD: HOW DO WE DECIDE ON PROPERTIES OF THE CACHE i.e. what duration it
- // should be caching?
-
- // let us check here if the cache already exists -- if so don't create
- if (!clusterGlobalServices.existCache(
- ROUTING_TABLE_GLOBAL_CACHE)) {
+ public void setClusterGlobalServices(IClusterGlobalServices clusterGlobalServices) {
+ this.clusterGlobalServices = clusterGlobalServices;
+ }
- if(log.isDebugEnabled()){
- log.debug("createRoutingTableCache: creating a new routing table cache "+ROUTING_TABLE_GLOBAL_CACHE );
- }
- routingTableCache = clusterGlobalServices.createCache(
- ROUTING_TABLE_GLOBAL_CACHE, EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
- } else {
- if(log.isDebugEnabled()){
- log.debug("createRoutingTableCache: found existing routing table cache "+ROUTING_TABLE_GLOBAL_CACHE );
+ public void unsetClusterGlobalServices(IClusterGlobalServices clusterGlobalServices) {
+ if((clusterGlobalServices != null ) && (this.clusterGlobalServices.equals(clusterGlobalServices))){
+ this.clusterGlobalServices = null;
}
- routingTableCache = clusterGlobalServices.getCache(
- ROUTING_TABLE_GLOBAL_CACHE);
}
- }
-
- /**
- * Function called by the dependency manager when all the required
- * dependencies are satisfied
- *
- */
- void init(Component c) {
- try {
-
- createRoutingTableCache();
- } catch (CacheExistException e) {
- throw new IllegalStateException("could not construct routing table cache");
- } catch (CacheConfigException e) {
- throw new IllegalStateException("could not construct routing table cache");
- } catch (CacheListenerAddException e) {
- throw new IllegalStateException("could not construct routing table cache");
+ /**
+ * Creates the Routing Table clustered global services cache
+ *
+ * @throws CacheExistException
+ * -- cluster global services exception when cache exist
+ * @throws CacheConfigException
+ * -- cluster global services exception during cache config
+ * @throws CacheListenerAddException
+ * -- cluster global services exception during adding of listener
+ */
+
+ void createRoutingTableCache() throws CacheExistException, CacheConfigException,
+ CacheListenerAddException {
+ // TBD: HOW DO WE DECIDE ON PROPERTIES OF THE CACHE i.e. what duration it
+ // should be caching?
+
+ // let us check here if the cache already exists -- if so don't create
+ if (!clusterGlobalServices.existCache(ROUTING_TABLE_GLOBAL_CACHE)) {
+
+ if (log.isDebugEnabled()) {
+ log.debug("createRoutingTableCache: creating a new routing table cache "
+ + ROUTING_TABLE_GLOBAL_CACHE);
+ }
+ routingTableCache = clusterGlobalServices.createCache(ROUTING_TABLE_GLOBAL_CACHE,
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+ } else {
+ if (log.isDebugEnabled()) {
+ log.debug("createRoutingTableCache: found existing routing table cache "
+ + ROUTING_TABLE_GLOBAL_CACHE);
+ }
+ routingTableCache = clusterGlobalServices.getCache(ROUTING_TABLE_GLOBAL_CACHE);
+ }
+
}
- }
+ /**
+ * Function called by the dependency manager when all the required
+ * dependencies are satisfied
+ *
+ */
+ void init(Component c) {
+ try {
+
+ createRoutingTableCache();
+ } catch (CacheExistException e) {
+ throw new IllegalStateException("could not construct routing table cache");
+ } catch (CacheConfigException e) {
+ throw new IllegalStateException("could not construct routing table cache");
+ } catch (CacheListenerAddException e) {
+ throw new IllegalStateException("could not construct routing table cache");
+ }
+ }
/**
- * Get routing table method is useful for unit testing
- * <note>It has package scope</note>
+ * Get routing table method is useful for unit testing <note>It has package
+ * scope</note>
*/
- ConcurrentMap getRoutingTableCache(){
+ ConcurrentMap getRoutingTableCache() {
return this.routingTableCache;
}
+ /**
+ * Invoked when a new entry is available in the cache, the key is only
+ * provided, the value will come as an entryUpdate invocation
+ *
+ * @param key
+ * Key for the entry just created
+ * @param cacheName
+ * name of the cache for which update has been received
+ * @param originLocal
+ * true if the event is generated from this node
+ */
+ @Override
+ public void entryCreated(I key, String cacheName, boolean originLocal) {
+ // TBD: do we require this.
+ if (log.isDebugEnabled()) {
+ log.debug("RoutingTableUpdates: entryCreated routeId = " + key + " cacheName=" + cacheName);
+ }
+ }
- /**
- * Invoked when a new entry is available in the cache, the key is
- * only provided, the value will come as an entryUpdate invocation
- *
- * @param key Key for the entry just created
- * @param cacheName name of the cache for which update has been
- * received
- * @param originLocal true if the event is generated from this
- * node
- */
- @Override
- public void entryCreated(I key, String cacheName, boolean originLocal) {
- //TBD: do we require this.
- if(log.isDebugEnabled()){
- log.debug("RoutingTableUpdates: entryCreated routeId = "+key
- + " cacheName="+cacheName
- );
- }
- }
-
- /**
- * Called anytime a given entry is updated
- *
- * @param key Key for the entry modified
- * @param new_value the new value the key will have
- * @param cacheName name of the cache for which update has been
- * received
- * @param originLocal true if the event is generated from this
- * node
- */
- @Override
- public void entryUpdated(I key, R new_value, String cacheName, boolean originLocal) {
- if(log.isDebugEnabled()){
- log.debug("RoutingTableUpdates: entryUpdated routeId = "+key
- + ",value = "+ new_value
- + " ,cacheName="+cacheName
- );
- }
- for(RouteChangeListener rcl:routeChangeListeners){
- rcl.onRouteUpdated(key, new_value);
- }
- }
-
- /**
- * Called anytime a given key is removed from the
- * ConcurrentHashMap we are listening to.
- *
- * @param key Key of the entry removed
- * @param cacheName name of the cache for which update has been
- * received
- * @param originLocal true if the event is generated from this
- * node
- */
- @Override
- public void entryDeleted(I key, String cacheName, boolean originLocal) {
- if(log.isDebugEnabled()){
- log.debug("RoutingTableUpdates: entryUpdated routeId = "+key
- + " local = "+ originLocal
- + " cacheName="+cacheName
- );
- }
- for(RouteChangeListener rcl:routeChangeListeners){
- rcl.onRouteDeleted(key);
- }
- }
- }
\ No newline at end of file
+ /**
+ * Called anytime a given entry is updated
+ *
+ * @param key
+ * Key for the entry modified
+ * @param new_value
+ * the new value the key will have
+ * @param cacheName
+ * name of the cache for which update has been received
+ * @param originLocal
+ * true if the event is generated from this node
+ */
+ @Override
+ public void entryUpdated(I key, R new_value, String cacheName, boolean originLocal) {
+ if (log.isDebugEnabled()) {
+ log.debug("RoutingTableUpdates: entryUpdated routeId = " + key + ",value = " + new_value
+ + " ,cacheName=" + cacheName + " originLocal="+originLocal);
+ }
+ if (!originLocal) {
+ for (RouteChangeListener rcl : routeChangeListeners) {
+ rcl.onRouteUpdated(key, new_value);
+ }
+ }
+ }
+
+ /**
+ * Called anytime a given key is removed from the ConcurrentHashMap we are
+ * listening to.
+ *
+ * @param key
+ * Key of the entry removed
+ * @param cacheName
+ * name of the cache for which update has been received
+ * @param originLocal
+ * true if the event is generated from this node
+ */
+ @Override
+ public void entryDeleted(I key, String cacheName, boolean originLocal) {
+ if (log.isDebugEnabled()) {
+ log.debug("RoutingTableUpdates: entryUpdated routeId = " + key + " local = " + originLocal
+ + " cacheName=" + cacheName + " originLocal="+originLocal);
+ }
+ if (!originLocal) {
+ for (RouteChangeListener rcl : routeChangeListeners) {
+ rcl.onRouteDeleted(key);
+ }
+ }
+ }
+}
\ No newline at end of file
Assert.assertEquals(rti.getRegisteredRouteChangeListeners().size(),0);
rti.registerRouteChangeListener(new RouteChangeListenerImpl());
- Assert.assertEquals(rti.getRegisteredRouteChangeListeners().size(),1);
+ Assert.assertEquals(rti.getRegisteredRouteChangeListeners().size(),0); //old should not work
+ //what about the new approach - using whiteboard pattern
+ rti.setRouteChangeListener(new RouteChangeListenerImpl());
+
+ Assert.assertEquals(rti.getRegisteredRouteChangeListeners().size(),1); //should not work
+
}
@Test
import java.net.URI;
import java.util.Set;
-
import static org.ops4j.pax.exam.CoreOptions.junitBundles;
import static org.ops4j.pax.exam.CoreOptions.mavenBundle;
import static org.ops4j.pax.exam.CoreOptions.options;
public static final String YANG = "org.opendaylight.yangtools";
public static final String CONTROLLER = "org.opendaylight.controller";
public static final String YANGTOOLS = "org.opendaylight.yangtools";
+ RoutingIdentifierImpl rii = new RoutingIdentifierImpl();
// get the OSGI bundle context
@Inject
private BundleContext bc;
mavenBundle(YANGTOOLS + ".thirdparty", "antlr4-runtime-osgi-nohead").versionAsInProject(), //
- mavenBundle(YANG, "concepts").versionAsInProject(),
- mavenBundle(YANG, "yang-binding").versionAsInProject(), //
- mavenBundle(YANG, "yang-common").versionAsInProject(), //
mavenBundle(YANG+".thirdparty", "xtend-lib-osgi").versionAsInProject(),
mavenBundle("com.google.guava", "guava").versionAsInProject(), //
mavenBundle("org.javassist", "javassist").versionAsInProject(),
@Test
public void testAddGlobalRoute () throws Exception{
- RoutingIdentifierImpl rii = new RoutingIdentifierImpl();
+
routingTable.addGlobalRoute(rii,"172.27.12.1:5000");
Set<String> routes = routingTable.getRoutes(rii);
}
+ @Test
+ public void testDeleteGlobalRoute () throws Exception{
+
+ routingTable.removeGlobalRoute(rii);
+
+ Set<String> routes = routingTable.getRoutes(rii);
+
+ Assert.assertNull(routes);
+
+
+ }
+
+
+
class RoutingIdentifierImpl implements RpcRouter.RouteIdentifier,Serializable {
private final URI namespace = URI.create("http://cisco.com/example");
public org.opendaylight.yangtools.yang.data.api.InstanceIdentifier getRoute() {
return InstanceIdentifier.of(instance);
}
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ RoutingIdentifierImpl that = (RoutingIdentifierImpl) o;
+
+ if (QNAME != null ? !QNAME.equals(that.QNAME) : that.QNAME != null) return false;
+ if (instance != null ? !instance.equals(that.instance) : that.instance != null) return false;
+ if (namespace != null ? !namespace.equals(that.namespace) : that.namespace != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = namespace != null ? namespace.hashCode() : 0;
+ result = 31 * result + (QNAME != null ? QNAME.hashCode() : 0);
+ result = 31 * result + (instance != null ? instance.hashCode() : 0);
+ return result;
+ }
}
import org.opendaylight.controller.netconf.util.xml.XmlNetconfConstants;
import java.util.List;
+import java.util.Map;
public class ObjectNameAttributeReadingStrategy extends AbstractAttributeReadingStrategy<AttributeIfc> {
+ private static final Object PREFIX_SEPARATOR = ":";
+
public ObjectNameAttributeReadingStrategy(DependencyAttribute attributeIfc) {
super(attributeIfc);
}
private ObjectNameAttributeMappingStrategy.MappedDependency resolve(XmlElement firstChild) {
XmlElement typeElement = firstChild.getOnlyChildElementWithSameNamespace(XmlNetconfConstants.TYPE_KEY);
- String serviceName = typeElement.getTextContent();
+ Map.Entry<String, String> prefixNamespace = typeElement.findNamespaceOfTextContent();
+
+ String serviceName = checkPrefixAndExtractServiceName(typeElement, prefixNamespace);
+
XmlElement nameElement = firstChild.getOnlyChildElementWithSameNamespace(XmlNetconfConstants.NAME_KEY);
String dependencyName = nameElement.getTextContent();
- return new ObjectNameAttributeMappingStrategy.MappedDependency(serviceName, dependencyName);
+ return new ObjectNameAttributeMappingStrategy.MappedDependency(prefixNamespace.getValue(), serviceName,
+ dependencyName);
+ }
+
+ public static String checkPrefixAndExtractServiceName(XmlElement typeElement, Map.Entry<String, String> prefixNamespace) {
+ String serviceName = typeElement.getTextContent();
+
+ Preconditions.checkState(prefixNamespace.equals("") == false, "Service %s value not prefixed with namespace",
+ XmlNetconfConstants.TYPE_KEY);
+ String prefix = prefixNamespace.getKey() + PREFIX_SEPARATOR;
+ Preconditions.checkState(serviceName.startsWith(prefix),
+ "Service %s not correctly prefixed, expected %s, but was %s", XmlNetconfConstants.TYPE_KEY, prefix,
+ serviceName);
+ serviceName = serviceName.substring(prefix.length());
+ return serviceName;
}
}
protected AttributeMappingStrategy<?, ? extends OpenType<?>> caseDependencyAttribute(
DependencyAttribute attributeIfc) {
String serviceName = attributeIfc.getDependency().getSie().getQName().getLocalName();
+ String namespace = attributeIfc.getDependency().getSie().getQName().getNamespace().toString();
return new ObjectNameAttributeMappingStrategy((SimpleType<?>) attributeIfc.getOpenType(), dependencyTracker,
- serviceName);
+ serviceName, namespace);
}
@Override
private final Services tracker;
private final String serviceName;
+ private final String namespace;
- public ObjectNameAttributeMappingStrategy(SimpleType<?> openType, Services dependencyTracker, String serviceName) {
+ public ObjectNameAttributeMappingStrategy(SimpleType<?> openType, Services dependencyTracker, String serviceName, String namespace) {
super(openType);
this.tracker = dependencyTracker;
this.serviceName = serviceName;
+ this.namespace = namespace;
}
@Override
Util.checkType(value, ObjectName.class);
ObjectName on = (ObjectName) value;
- String refName = tracker.addServiceEntry(serviceName, on);
+ String refName = tracker.addServiceEntry(namespace, serviceName, on);
- return Optional.of(new MappedDependency(serviceName, refName));
+ return Optional.of(new MappedDependency(namespace, serviceName, refName));
}
public static class MappedDependency {
- private final String serviceName, refName;
+ private final String namespace, serviceName, refName;
- public MappedDependency(String serviceName, String refName) {
+ public MappedDependency(String namespace, String serviceName, String refName) {
this.serviceName = serviceName;
this.refName = refName;
+ this.namespace = namespace;
}
public String getServiceName() {
return refName;
}
+ public String getNamespace() {
+ return namespace;
+ }
+
@Override
public String toString() {
final StringBuffer sb = new StringBuffer("MappedDependency{");
- sb.append("serviceName='").append(serviceName).append('\'');
+ sb.append("namespace='").append(namespace).append('\'');
+ sb.append(", serviceName='").append(serviceName).append('\'');
sb.append(", refName='").append(refName).append('\'');
sb.append('}');
return sb.toString();
Util.checkType(value, ObjectNameAttributeMappingStrategy.MappedDependency.class);
ObjectNameAttributeMappingStrategy.MappedDependency mappedDep = (ObjectNameAttributeMappingStrategy.MappedDependency) value;
- ServiceInstance byRefName = serviceTracker.getByServiceAndRefName(mappedDep.getServiceName(),
- mappedDep.getRefName());
+ String serviceName = mappedDep.getServiceName();
+ String refName = mappedDep.getRefName();
+ String namespace = mappedDep.getNamespace();
+ logger.trace("Getting service instance by service name {} : {} and ref name {}", namespace, serviceName, refName);
+
+ ServiceInstance byRefName = serviceTracker.getByServiceAndRefName(namespace, serviceName, refName);
ObjectName on = ObjectNameUtil.createReadOnlyModuleON(byRefName.getModuleName(), byRefName.getInstanceName());
logger.debug("Attribute {} : {} parsed to type {}", attrName, value, getOpenType());
return Optional.of(on);
String moduleName = ((ObjectNameAttributeMappingStrategy.MappedDependency) value).getServiceName();
String refName = ((ObjectNameAttributeMappingStrategy.MappedDependency) value).getRefName();
+ String namespaceForType = ((ObjectNameAttributeMappingStrategy.MappedDependency) value).getNamespace();
- final Element typeElement = XmlUtil.createTextElement(document, XmlNetconfConstants.TYPE_KEY, moduleName);
+ Element typeElement = XmlUtil.createPrefixedTextElement(document, XmlNetconfConstants.TYPE_KEY, XmlNetconfConstants.PREFIX,
+ moduleName);
+ XmlUtil.addPrefixedNamespaceAttr(typeElement, XmlNetconfConstants.PREFIX, namespaceForType);
innerNode.appendChild(typeElement);
final Element nameElement = XmlUtil.createTextElement(document, XmlNetconfConstants.NAME_KEY, refName);
package org.opendaylight.controller.netconf.confignetconfconnector.mapping.config;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Maps;
import com.google.common.collect.Multimap;
import org.opendaylight.controller.config.api.jmx.ObjectNameUtil;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditStrategyType;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import javax.management.ObjectName;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
import static com.google.common.base.Preconditions.checkState;
import static java.lang.String.format;
public class Config {
+ private final Logger logger = LoggerFactory.getLogger(Config.class);
- private final Map<String, Map<String, ModuleConfig>> moduleConfigs;
+ private final Map<String/* Namespace from yang file */,
+ Map<String /* Name of module entry from yang file */, ModuleConfig>> moduleConfigs;
+ private final Map<String, ModuleConfig> moduleNamesToConfigs;
public Config(Map<String, Map<String, ModuleConfig>> moduleConfigs) {
this.moduleConfigs = moduleConfigs;
+ Map<String, ModuleConfig> moduleNamesToConfigs = new HashMap<>();
+ for (Entry<String, Map<String, ModuleConfig>> entry : moduleConfigs.entrySet()) {
+ moduleNamesToConfigs.putAll(entry.getValue());
+ }
+ this.moduleNamesToConfigs = Collections.unmodifiableMap(moduleNamesToConfigs);
}
private Map<String, Map<String, Collection<ObjectName>>> getMappedInstances(Set<ObjectName> instancesToMap,
// All found instances add to service tracker in advance
// This way all instances will be serialized as all available
// services when get-config is triggered
- // (even if they are not used as services by other onstances)
+ // (even if they are not used as services by other instances)
// = more user friendly
addServices(serviceTracker, instances, mbeEntry.getValue().getProvidedServices());
}
private void addServices(Services serviceTracker, Collection<ObjectName> instances,
- Collection<String> providedServices) {
+ Multimap<String, String> providedServices) {
for (ObjectName instanceOn : instances) {
- for (String serviceName : providedServices) {
- serviceTracker.addServiceEntry(serviceName, instanceOn);
+ for (Entry<String, String> serviceName : providedServices.entries()) {
+ serviceTracker.addServiceEntry(serviceName.getKey(), serviceName.getValue(), instanceOn);
}
}
}
return root;
}
+ // TODO remove commented modules from output
private void addEmptyModulesCommented(Document document, Element root, String moduleNamespace,
Entry<String, Collection<ObjectName>> moduleMappingEntry) {
Element emptyModule = document.createElement(XmlNetconfConstants.MODULE_KEY);
// TODO refactor, replace string representing namespace with namespace class
// TODO refactor, replace Map->Multimap with e.g. ConfigElementResolved
// class
- public Map<String, Multimap<String, ModuleElementResolved>> fromXml(XmlElement xml) {
+ public Map<String, Multimap<String, ModuleElementResolved>> fromXml(XmlElement xml, Set<ObjectName> instancesForFillingServiceRefMapping,
+ EditStrategyType defaultEditStrategyType) {
Map<String, Multimap<String, ModuleElementResolved>> retVal = Maps.newHashMap();
List<XmlElement> recognisedChildren = Lists.newArrayList();
- Services serviceTracker = fromXmlServices(xml, recognisedChildren);
+ Services serviceTracker = fromXmlServices(xml, recognisedChildren, instancesForFillingServiceRefMapping);
List<XmlElement> moduleElements = fromXmlModules(xml, recognisedChildren);
xml.checkUnrecognisedElements(recognisedChildren);
for (XmlElement moduleElement : moduleElements) {
- resolveModule(retVal, serviceTracker, moduleElement);
+ resolveModule(retVal, serviceTracker, moduleElement, defaultEditStrategyType);
}
return retVal;
}
private void resolveModule(Map<String, Multimap<String, ModuleElementResolved>> retVal, Services serviceTracker,
- XmlElement moduleElement) {
+ XmlElement moduleElement, EditStrategyType defaultStrategy) {
XmlElement typeElement = moduleElement.getOnlyChildElementWithSameNamespace(XmlNetconfConstants.TYPE_KEY);
Entry<String, String> prefixToNamespace = typeElement.findNamespaceOfTextContent();
String moduleNamespace = prefixToNamespace.getValue();
}
ModuleElementResolved moduleElementResolved = moduleMapping.fromXml(moduleElement, serviceTracker,
- instanceName, moduleNamespace);
+ instanceName, moduleNamespace, defaultStrategy);
innerMap.put(factoryName, moduleElementResolved);
}
- private Services fromXmlServices(XmlElement xml, List<XmlElement> recognisedChildren) {
+ private Services fromXmlServices(XmlElement xml, List<XmlElement> recognisedChildren, Set<ObjectName> instancesForFillingServiceRefMapping) {
Optional<XmlElement> servicesElement = xml.getOnlyChildElementOptionally(XmlNetconfConstants.SERVICES_KEY,
XmlNetconfConstants.URN_OPENDAYLIGHT_PARAMS_XML_NS_YANG_CONTROLLER_CONFIG);
- Map<String, Map<String, String>> mappedServices;
+ Map<String, Map<String, Map<String, String>>> mappedServices;
if (servicesElement.isPresent()) {
mappedServices = Services.fromXml(servicesElement.get());
recognisedChildren.add(servicesElement.get());
} else {
mappedServices = new HashMap<>();
}
+ Services services = Services.resolveServices(mappedServices);
+ // merge with what candidate db contains by default - ref_
+
+ for(ObjectName existingON: instancesForFillingServiceRefMapping) {
+ logger.trace("Filling services from {}", existingON);
+ // get all its services
+ String factoryName = ObjectNameUtil.getFactoryName(existingON);
+ ModuleConfig moduleConfig = moduleNamesToConfigs.get(factoryName);
+
+ checkState(moduleConfig != null, "Cannot find ModuleConfig with name " + factoryName + " in " + moduleNamesToConfigs);
+ // Set<String> services = ;
+ for (Entry<String, String> serviceName : moduleConfig.getProvidedServices().entries()) {
+
+ services.addServiceEntry(serviceName.getKey(), serviceName.getValue(), existingON);
+ }
+ }
- return Services.resolveServices(mappedServices);
+ return services;
}
private String getFactoryName(String factoryNameWithPrefix, String prefixOrEmptyString) {
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.attributes.resolving.ObjectResolver;
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.attributes.toxml.AttributeWritingStrategy;
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.attributes.toxml.ObjectXmlWriter;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditStrategyType;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlNetconfConstants;
import org.slf4j.Logger;
depTracker).prepareResolving(yangToAttrConfig);
for (Entry<String, AttributeConfigElement> configDefEntry : mappedConfig.getConfiguration().entrySet()) {
+ AttributeConfigElement value = configDefEntry.getValue();
+ String attributeName = configDefEntry.getKey();
try {
-
AttributeResolvingStrategy<?, ? extends OpenType<?>> attributeResolvingStrategy = resolvingStrategies
- .get(configDefEntry.getKey());
+ .get(attributeName);
+ logger.trace("Trying to set value {} of attribute {} with {}", value, attributeName, attributeResolvingStrategy);
- configDefEntry.getValue().resolveValue(attributeResolvingStrategy, configDefEntry.getKey());
- configDefEntry.getValue().setJmxName(
- yangToAttrConfig.get(configDefEntry.getKey()).getUpperCaseCammelCase());
+ value.resolveValue(attributeResolvingStrategy, attributeName);
+ value.setJmxName(
+ yangToAttrConfig.get(attributeName).getUpperCaseCammelCase());
} catch (Exception e) {
- throw new IllegalStateException("Unable to resolve value " + configDefEntry.getValue()
- + " to attribute " + configDefEntry.getKey(), e);
+ throw new IllegalStateException("Unable to resolve value " + value
+ + " to attribute " + attributeName, e);
}
}
}
- public InstanceConfigElementResolved fromXml(XmlElement moduleElement, Services services, String moduleNamespace) {
+ public InstanceConfigElementResolved fromXml(XmlElement moduleElement, Services services, String moduleNamespace,
+ EditStrategyType defaultStrategy) {
Map<String, AttributeConfigElement> retVal = Maps.newHashMap();
Map<String, AttributeReadingStrategy> strats = new ObjectXmlReader().prepareReading(yangToAttrConfig);
XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0);
InstanceConfigElementResolved instanceConfigElementResolved = perInstanceEditStrategy.equals("") ? new InstanceConfigElementResolved(
- retVal) : new InstanceConfigElementResolved(perInstanceEditStrategy, retVal);
+ retVal, defaultStrategy) : new InstanceConfigElementResolved(perInstanceEditStrategy, retVal, defaultStrategy);
resolveConfiguration(instanceConfigElementResolved, services);
return instanceConfigElementResolved;
private final EditStrategyType editStrategy;
private final Map<String, AttributeConfigElement> configuration;
- public InstanceConfigElementResolved(String strat, Map<String, AttributeConfigElement> configuration) {
- EditStrategyType valueOf = checkStrategy(strat);
+ public InstanceConfigElementResolved(String currentStrategy, Map<String, AttributeConfigElement> configuration, EditStrategyType defaultStrategy) {
+ EditStrategyType valueOf = checkStrategy(currentStrategy, defaultStrategy);
this.editStrategy = valueOf;
this.configuration = configuration;
}
- EditStrategyType checkStrategy(String strat) {
- EditStrategyType valueOf = EditStrategyType.valueOf(strat);
- if (EditStrategyType.defaultStrategy().isEnforcing()) {
+ public InstanceConfigElementResolved(Map<String, AttributeConfigElement> configuration, EditStrategyType defaultStrategy) {
+ editStrategy = defaultStrategy;
+ this.configuration = configuration;
+ }
+
+
+ EditStrategyType checkStrategy(String currentStrategy, EditStrategyType defaultStrategy) {
+ EditStrategyType valueOf = EditStrategyType.valueOf(currentStrategy);
+ if (defaultStrategy.isEnforcing()) {
Preconditions
.checkArgument(
- valueOf == EditStrategyType.defaultStrategy(),
+ valueOf == defaultStrategy,
"With "
- + EditStrategyType.defaultStrategy()
+ + defaultStrategy
+ " as "
+ EditConfigXmlParser.DEFAULT_OPERATION_KEY
+ " operations on module elements are not permitted since the default option is restrictive");
return valueOf;
}
- public InstanceConfigElementResolved(Map<String, AttributeConfigElement> configuration) {
- editStrategy = EditStrategyType.defaultStrategy();
- this.configuration = configuration;
- }
public EditConfigStrategy getEditStrategy() {
return editStrategy.getFittingStrategy();
package org.opendaylight.controller.netconf.confignetconfconnector.mapping.config;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
import org.opendaylight.controller.config.api.jmx.ObjectNameUtil;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditStrategyType;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yangtools.yang.common.QName;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import javax.management.ObjectName;
import java.util.Collection;
-import java.util.Collections;
public class ModuleConfig {
private final String moduleName;
private final InstanceConfig instanceConfig;
- private final Collection<String> providedServices;
+ private final Multimap<String, String> providedServices;
- public ModuleConfig(String moduleName, InstanceConfig mbeanMapping, Collection<String> providedServices) {
+ public ModuleConfig(String moduleName, InstanceConfig mbeanMapping, Collection<QName> providedServices) {
this.moduleName = moduleName;
this.instanceConfig = mbeanMapping;
- this.providedServices = providedServices;
+ this.providedServices = mapServices(providedServices);
}
- public ModuleConfig(String key, InstanceConfig instanceConfig) {
- this(key, instanceConfig, Collections.<String> emptyList());
+ private Multimap<String, String> mapServices(Collection<QName> providedServices) {
+ Multimap<String, String> mapped = HashMultimap.create();
+
+ for (QName providedService : providedServices) {
+ String key = providedService.getNamespace().toString();
+ mapped.put(key, providedService.getLocalName());
+ }
+
+ return mapped;
}
public InstanceConfig getMbeanMapping() {
return instanceConfig;
}
- public Collection<String> getProvidedServices() {
+ public Multimap<String, String> getProvidedServices() {
return providedServices;
}
private String getPrefix(String namespace) {
// if(namespace.contains(":")==false)
- return "prefix";
+ return XmlNetconfConstants.PREFIX;
// return namespace.substring(namespace.lastIndexOf(':') + 1,
// namespace.length());
}
public ModuleElementResolved fromXml(XmlElement moduleElement, Services depTracker, String instanceName,
- String moduleNamespace) {
+ String moduleNamespace, EditStrategyType defaultStrategy) {
- InstanceConfigElementResolved ice = instanceConfig.fromXml(moduleElement, depTracker, moduleNamespace);
+ InstanceConfigElementResolved ice = instanceConfig.fromXml(moduleElement, depTracker, moduleNamespace, defaultStrategy);
return new ModuleElementResolved(instanceName, ice);
}
package org.opendaylight.controller.netconf.confignetconfconnector.mapping.config;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
+import org.opendaylight.controller.netconf.confignetconfconnector.mapping.attributes.fromxml.ObjectNameAttributeReadingStrategy;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import java.util.regex.Pattern;
public final class Services {
+ private static final Logger logger = LoggerFactory.getLogger(Services.class);
private static final String PROVIDER_KEY = "provider";
private static final String NAME_KEY = "name";
private long suffix = 1;
private final Map<ServiceInstance, String> instanceToRef = Maps.newHashMap();
- private final Map<String/* ServiceName */, Map<String/* refName */, ServiceInstance>> serviceNameToRefNameToInstance = Maps
+ private final Map<String /*Namespace*/, Map<String/* ServiceName */, Map<String/* refName */, ServiceInstance>>> namespaceToServiceNameToRefNameToInstance = Maps
.newHashMap();
- public String addServiceEntry(String serviceName, ObjectName on) {
+ public String addServiceEntry(String namespace, String serviceName, ObjectName on) {
String moduleName = on.getKeyProperty("moduleFactoryName");
String instanceName = on.getKeyProperty("instanceName");
- return addServiceEntry(serviceName, moduleName, instanceName);
+ String refName = addServiceEntry(namespace, serviceName, moduleName, instanceName);
+ logger.trace("Added service entry to tracker. Service name {}, ref name {}, module name {}, instance name {}",
+ serviceName, refName, moduleName, instanceName);
+ return refName;
}
- public String addServiceEntry(String serviceName, String moduleName, String instanceName) {
+ @VisibleForTesting
+ public String addServiceEntry(String namespace, String serviceName, String moduleName, String instanceName) {
ServiceInstance serviceInstance = new ServiceInstance(moduleName, instanceName);
serviceInstance.setServiceName(serviceName);
String refName = instanceToRef.get(serviceInstance);
+ Map<String, Map<String, ServiceInstance>> serviceNameToRefNameToInstance = namespaceToServiceNameToRefNameToInstance.get(namespace);
+ if (serviceNameToRefNameToInstance == null) {
+ serviceNameToRefNameToInstance = Maps.newHashMap();
+ namespaceToServiceNameToRefNameToInstance.put(namespace, serviceNameToRefNameToInstance);
+ }
+
Map<String, ServiceInstance> refNameToInstance = serviceNameToRefNameToInstance.get(serviceName);
if (refNameToInstance == null) {
refNameToInstance = Maps.newHashMap();
return refNamesAsSet;
}
- public ServiceInstance getByServiceAndRefName(String serviceName, String refName) {
+ public ServiceInstance getByServiceAndRefName(String namespace, String serviceName, String refName) {
+ Map<String, Map<String, ServiceInstance>> serviceNameToRefNameToInstance = namespaceToServiceNameToRefNameToInstance.get(namespace);
+ Preconditions.checkArgument(serviceNameToRefNameToInstance != null, "No serviceInstances mapped to " + namespace + " , "
+ + serviceNameToRefNameToInstance.keySet());
+
Map<String, ServiceInstance> refNameToInstance = serviceNameToRefNameToInstance.get(serviceName);
Preconditions.checkArgument(refNameToInstance != null, "No serviceInstances mapped to " + serviceName + " , "
+ serviceNameToRefNameToInstance.keySet());
// TODO hide getMappedServices, call it explicitly in toXml
- public Map<String, Map<String, String>> getMappedServices() {
- Map<String, Map<String, String>> retVal = Maps.newHashMap();
+ public Map<String, Map<String, Map<String, String>>> getMappedServices() {
+ Map<String, Map<String, Map<String, String>>> retVal = Maps.newHashMap();
+
+ for (String namespace : namespaceToServiceNameToRefNameToInstance.keySet()) {
- for (String serviceName : serviceNameToRefNameToInstance.keySet()) {
+ Map<String, Map<String, ServiceInstance>> serviceNameToRefNameToInstance = namespaceToServiceNameToRefNameToInstance
+ .get(namespace);
+ Map<String, Map<String, String>> innerRetVal = Maps.newHashMap();
- Map<String, String> innerRetVal = Maps.transformValues(serviceNameToRefNameToInstance.get(serviceName),
- new Function<ServiceInstance, String>() {
- @Nullable
- @Override
- public String apply(@Nullable ServiceInstance serviceInstance) {
- return serviceInstance.toString();
- }
- });
- retVal.put(serviceName, innerRetVal);
+ for (String serviceName : serviceNameToRefNameToInstance.keySet()) {
+
+ Map<String, String> innerInnerRetVal = Maps.transformValues(
+ serviceNameToRefNameToInstance.get(serviceName), new Function<ServiceInstance, String>() {
+ @Nullable
+ @Override
+ public String apply(@Nullable ServiceInstance serviceInstance) {
+ return serviceInstance.toString();
+ }
+ });
+ innerRetVal.put(serviceName, innerInnerRetVal);
+ }
+ retVal.put(namespace, innerRetVal);
}
return retVal;
// TODO hide resolveServices, call it explicitly in fromXml
- public static Services resolveServices(Map<String, Map<String, String>> mappedServices) {
+ public static Services resolveServices(Map<String, Map<String, Map<String, String>>> mappedServices) {
Services tracker = new Services();
- for (Entry<String, Map<String, String>> serviceEntry : mappedServices.entrySet()) {
+ for (Entry<String, Map<String, Map<String, String>>> namespaceEntry : mappedServices.entrySet()) {
+ String namespace = namespaceEntry.getKey();
- String serviceName = serviceEntry.getKey();
- for (Entry<String, String> refEntry : serviceEntry.getValue().entrySet()) {
+ for (Entry<String, Map<String, String>> serviceEntry : namespaceEntry.getValue().entrySet()) {
- Map<String, ServiceInstance> refNameToInstance = tracker.serviceNameToRefNameToInstance
- .get(serviceName);
- if (refNameToInstance == null) {
- refNameToInstance = Maps.newHashMap();
- tracker.serviceNameToRefNameToInstance.put(serviceName, refNameToInstance);
- }
+ String serviceName = serviceEntry.getKey();
+ for (Entry<String, String> refEntry : serviceEntry.getValue().entrySet()) {
- String refName = refEntry.getKey();
- Preconditions.checkState(false == refNameToInstance.containsKey(refName),
- "Duplicate reference name to service " + refName + " under service " + serviceName);
- ServiceInstance serviceInstance = ServiceInstance.fromString(refEntry.getValue());
- refNameToInstance.put(refName, serviceInstance);
+ Map<String, Map<String, ServiceInstance>> namespaceToServices = tracker.namespaceToServiceNameToRefNameToInstance.get(namespace);
+ if (namespaceToServices == null) {
+ namespaceToServices = Maps.newHashMap();
+ tracker.namespaceToServiceNameToRefNameToInstance.put(namespace, namespaceToServices);
+ }
+
+ Map<String, ServiceInstance> refNameToInstance = namespaceToServices
+ .get(serviceName);
+ if (refNameToInstance == null) {
+ refNameToInstance = Maps.newHashMap();
+ namespaceToServices.put(serviceName, refNameToInstance);
+ }
- tracker.instanceToRef.put(serviceInstance, refEntry.getKey());
+ String refName = refEntry.getKey();
+ Preconditions.checkState(false == refNameToInstance.containsKey(refName),
+ "Duplicate reference name to service " + refName + " under service " + serviceName);
+ ServiceInstance serviceInstance = ServiceInstance.fromString(refEntry.getValue());
+ refNameToInstance.put(refName, serviceInstance);
+
+ tracker.instanceToRef.put(serviceInstance, refEntry.getKey());
+ }
}
}
return tracker;
}
- public static Map<String, Map<String, String>> fromXml(XmlElement xml) {
- Map<String, Map<String, String>> retVal = Maps.newHashMap();
+ public static Map<String, Map<String, Map<String, String>>> fromXml(XmlElement xml) {
+ Map<String, Map<String, Map<String, String>>> retVal = Maps.newHashMap();
List<XmlElement> services = xml.getChildElements(SERVICE_KEY);
xml.checkUnrecognisedElements(services);
for (XmlElement service : services) {
XmlElement typeElement = service.getOnlyChildElement(TYPE_KEY);
- String serviceName = typeElement.getTextContent();
+ Entry<String, String> prefixNamespace = typeElement.findNamespaceOfTextContent();
+
+ Preconditions.checkState(prefixNamespace.getKey()!=null && prefixNamespace.getKey().equals("") == false, "Type attribute was not prefixed");
+
+ Map<String, Map<String, String>> namespaceToServices = retVal.get(prefixNamespace.getValue());
+ if(namespaceToServices == null) {
+ namespaceToServices = Maps.newHashMap();
+ retVal.put(prefixNamespace.getValue(), namespaceToServices);
+ }
+
+ String serviceName = ObjectNameAttributeReadingStrategy.checkPrefixAndExtractServiceName(typeElement, prefixNamespace);
Map<String, String> innerMap = Maps.newHashMap();
- retVal.put(serviceName, innerMap);
+ namespaceToServices.put(serviceName, innerMap);
List<XmlElement> instances = service.getChildElements(XmlNetconfConstants.INSTANCE_KEY);
service.checkUnrecognisedElements(instances, typeElement);
}
}
- public Element toXml(Map<String, Map<String, String>> mappedServices, Document document) {
+ public Element toXml(Map<String, Map<String, Map<String, String>>> mappedServices, Document document) {
Element root = document.createElement(XmlNetconfConstants.SERVICES_KEY);
XmlUtil.addNamespaceAttr(root, XmlNetconfConstants.URN_OPENDAYLIGHT_PARAMS_XML_NS_YANG_CONTROLLER_CONFIG);
- for (Entry<String, Map<String, String>> serviceEntry : mappedServices.entrySet()) {
- Element serviceElement = document.createElement(SERVICE_KEY);
- root.appendChild(serviceElement);
+ for (String namespace : mappedServices.keySet()) {
+
+ for (Entry<String, Map<String, String>> serviceEntry : mappedServices.get(namespace).entrySet()) {
+ Element serviceElement = document.createElement(SERVICE_KEY);
+ root.appendChild(serviceElement);
- Element typeElement = XmlUtil.createTextElement(document, TYPE_KEY, serviceEntry.getKey());
- serviceElement.appendChild(typeElement);
+ Element typeElement = XmlUtil.createPrefixedTextElement(document, TYPE_KEY, XmlNetconfConstants.PREFIX,
+ serviceEntry.getKey());
+ XmlUtil.addPrefixedNamespaceAttr(typeElement, XmlNetconfConstants.PREFIX, namespace);
+ serviceElement.appendChild(typeElement);
- for (Entry<String, String> instanceEntry : serviceEntry.getValue().entrySet()) {
- Element instanceElement = document.createElement(XmlNetconfConstants.INSTANCE_KEY);
- serviceElement.appendChild(instanceElement);
+ for (Entry<String, String> instanceEntry : serviceEntry.getValue().entrySet()) {
+ Element instanceElement = document.createElement(XmlNetconfConstants.INSTANCE_KEY);
+ serviceElement.appendChild(instanceElement);
- Element nameElement = XmlUtil.createTextElement(document, NAME_KEY, instanceEntry.getKey());
- instanceElement.appendChild(nameElement);
+ Element nameElement = XmlUtil.createTextElement(document, NAME_KEY, instanceEntry.getKey());
+ instanceElement.appendChild(nameElement);
- Element providerElement = XmlUtil.createTextElement(document, PROVIDER_KEY, instanceEntry.getValue());
- instanceElement.appendChild(providerElement);
+ Element providerElement = XmlUtil.createTextElement(document, PROVIDER_KEY, instanceEntry.getValue());
+ instanceElement.appendChild(providerElement);
+ }
}
- }
+ }
return root;
}
public ModuleRpcs getRpcMapping(RuntimeRpcElementResolved id) {
Map<String, ModuleRpcs> modules = mappedRpcs.get(id.getNamespace());
Preconditions.checkState(modules != null, "No modules found for namespace %s", id.getNamespace());
- ModuleRpcs rpcMapping = modules.get(id.getModuleName());
- Preconditions.checkState(modules != null, "No module %s found for namespace %s", id.getModuleName(),
+ String moduleName = id.getModuleName();
+ ModuleRpcs rpcMapping = modules.get(moduleName);
+ Preconditions.checkState(rpcMapping != null, "No module %s found for namespace %s", moduleName,
id.getNamespace());
return rpcMapping;
import com.google.common.collect.Collections2;
import com.google.common.collect.Sets;
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.config.InstanceConfig;
-import org.opendaylight.controller.netconf.util.xml.XmlNetconfConstants;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
}));
}
- public Element toXml(ObjectName rootOn, Set<ObjectName> childRbeOns, Document document) {
- return toXml(rootOn, childRbeOns, document, null, null);
+ public Element toXml(ObjectName rootOn, Set<ObjectName> childRbeOns, Document document, Element parentElement, String namespace) {
+ return toXml(rootOn, childRbeOns, document, null, parentElement, namespace);
}
public Element toXml(ObjectName rootOn, Set<ObjectName> childRbeOns, Document document, String instanceIndex,
- String keyName) {
- Element xml = document.createElement(keyName == null ? XmlNetconfConstants.DATA_KEY : keyName);
+ Element parentElement, String namespace) {
// TODO namespace
- xml = instanceMapping.toXml(rootOn, null, "namespace", document, xml);
+ Element xml = instanceMapping.toXml(rootOn, null, namespace, document, parentElement);
if (instanceIndex != null) {
xml.setAttribute(KEY_ATTRIBUTE_KEY, instanceIndex);
String runtimeInstanceIndex = objectName.getKeyProperty(childMappingEntry.getKey());
String elementName = jmxToYangChildRbeMapping.get(childMappingEntry.getKey());
- xml.appendChild(childMappingEntry.getValue().toXml(objectName, innerChildRbeOns, document,
- runtimeInstanceIndex, elementName));
+
+ Element innerXml = document.createElement(elementName);
+ childMappingEntry.getValue().toXml(objectName, innerChildRbeOns, document,
+ runtimeInstanceIndex, innerXml, namespace);
+ xml.appendChild(innerXml);
}
}
package org.opendaylight.controller.netconf.confignetconfconnector.mapping.runtime;
-import com.google.common.collect.Multimap;
import com.google.common.collect.Sets;
import org.opendaylight.controller.netconf.util.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
throw new IllegalStateException("Root runtime bean not found among " + runtimeBeanOns);
}
- public Element toXml(String namespace, Multimap<String, ObjectName> instances, Document document) {
- Element root = document.createElement(XmlNetconfConstants.MODULE_KEY);
- XmlUtil.addNamespaceAttr(root, namespace);
+ public Element toXml(String namespace, String instanceName, Collection<ObjectName> runtimeBeanOns, Document document) {
+ Element moduleElement = document.createElement(XmlNetconfConstants.MODULE_KEY);
- Element nameElement = XmlUtil.createTextElement(document, XmlNetconfConstants.NAME_KEY, moduleName);
- root.appendChild(nameElement);
+ final String prefix = getPrefix(namespace);
+ Element typeElement = XmlUtil.createPrefixedTextElement(document, XmlNetconfConstants.TYPE_KEY, prefix,
+ moduleName);
+ XmlUtil.addPrefixedNamespaceAttr(typeElement, prefix, namespace);
+ moduleElement.appendChild(typeElement);
- for (String instanceName : instances.keySet()) {
- Element instance = document.createElement(XmlNetconfConstants.INSTANCE_KEY);
+ Element nameElement = XmlUtil.createTextElement(document, XmlNetconfConstants.NAME_KEY, instanceName);
+ moduleElement.appendChild(nameElement);
- Element innerNameElement = XmlUtil.createTextElement(document, XmlNetconfConstants.NAME_KEY, instanceName);
- instance.appendChild(innerNameElement);
+ ObjectName rootName = findRoot(runtimeBeanOns);
- Collection<ObjectName> runtimeBeanOns = instances.get(instanceName);
- ObjectName rootName = findRoot(runtimeBeanOns);
+ Set<ObjectName> childrenRuntimeBeans = Sets.newHashSet(runtimeBeanOns);
+ childrenRuntimeBeans.remove(rootName);
- Set<ObjectName> childrenRuntimeBeans = Sets.newHashSet(runtimeBeanOns);
- childrenRuntimeBeans.remove(rootName);
+ instanceRuntime.toXml(rootName, childrenRuntimeBeans, document, moduleElement, namespace);
- instance.appendChild(instanceRuntime.toXml(rootName, childrenRuntimeBeans, document));
-
- root.appendChild(instance);
- }
+ return moduleElement;
+ }
- return root;
+ private String getPrefix(String namespace) {
+ return XmlNetconfConstants.PREFIX;
}
}
if (instanceToRbe == null)
continue;
- ModuleRuntime moduleRuntime = moduleRuntimes.get(localNamespace).get(moduleName);
- Element innerXml = moduleRuntime.toXml(localNamespace, instanceToRbe, document);
- modulesElement.appendChild(innerXml);
+ for (String instanceName : instanceToRbe.keySet()) {
+ ModuleRuntime moduleRuntime = moduleRuntimes.get(localNamespace).get(moduleName);
+ Element innerXml = moduleRuntime.toXml(localNamespace, instanceName, instanceToRbe.get(instanceName), document);
+ modulesElement.appendChild(innerXml);
+ }
+
}
}
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.config.ModuleConfig;
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.config.ModuleElementResolved;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfigXmlParser.EditConfigExecution;
import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlNetconfConstants;
}
private void executeTests(ConfigRegistryClient configRegistryClient,
- EditConfigXmlParser.EditConfigExecution editConfigExecution) throws NetconfDocumentedException {
+ EditConfigExecution editConfigExecution) throws NetconfDocumentedException {
try {
- test(configRegistryClient, editConfigExecution.resolvedXmlElements);
+ test(configRegistryClient, editConfigExecution.getResolvedXmlElements(), editConfigExecution.getDefaultStrategy());
} catch (IllegalStateException | JmxAttributeValidationException | ValidationException e) {
logger.warn("Test phase for {} failed", EditConfigXmlParser.EDIT_CONFIG, e);
final Map<String, String> errorInfo = new HashMap<>();
}
private void test(ConfigRegistryClient configRegistryClient,
- Map<String, Multimap<String, ModuleElementResolved>> resolvedModules) {
+ Map<String, Multimap<String, ModuleElementResolved>> resolvedModules, EditStrategyType editStrategyType) {
ObjectName taON = transactionProvider.getTestTransaction();
try {
// default strategy = replace wipes config
- if (EditStrategyType.defaultStrategy() == EditStrategyType.replace) {
+ if (editStrategyType == EditStrategyType.replace) {
transactionProvider.wipeTestTransaction(taON);
}
setOnTransaction(configRegistryClient, resolvedModules, taON);
ObjectName taON = transactionProvider.getOrCreateTransaction();
// default strategy = replace wipes config
- if (EditStrategyType.defaultStrategy() == EditStrategyType.replace) {
+ if (editConfigExecution.getDefaultStrategy() == EditStrategyType.replace) {
transactionProvider.wipeTransaction();
}
- setOnTransaction(configRegistryClient, editConfigExecution.resolvedXmlElements, taON);
+ setOnTransaction(configRegistryClient, editConfigExecution.getResolvedXmlElements(), taON);
}
private void setOnTransaction(ConfigRegistryClient configRegistryClient,
}
public static Config getConfigMapping(ConfigRegistryClient configRegistryClient,
- Map<String, Map<String, ModuleMXBeanEntry>> mBeanEntries) {
+ Map<String/* Namespace from yang file */,
+ Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> mBeanEntries) {
Map<String, Map<String, ModuleConfig>> factories = transform(configRegistryClient, mBeanEntries);
+
return new Config(factories);
}
// TODO refactor
- private static Map<String, Map<String, ModuleConfig>> transform(final ConfigRegistryClient configRegistryClient,
- Map<String, Map<String, ModuleMXBeanEntry>> mBeanEntries) {
+ private static Map<String/* Namespace from yang file */,
+ Map<String /* Name of module entry from yang file */, ModuleConfig>> transform
+ (final ConfigRegistryClient configRegistryClient, Map<String/* Namespace from yang file */,
+ Map<String /* Name of module entry from yang file */, ModuleMXBeanEntry>> mBeanEntries) {
return Maps.transformEntries(mBeanEntries,
new Maps.EntryTransformer<String, Map<String, ModuleMXBeanEntry>, Map<String, ModuleConfig>>() {
new Maps.EntryTransformer<String, ModuleMXBeanEntry, ModuleConfig>() {
@Override
- public ModuleConfig transformEntry(String key, ModuleMXBeanEntry value) {
- return new ModuleConfig(key, new InstanceConfig(configRegistryClient, value
- .getAttributes()));
+ public ModuleConfig transformEntry(String key, ModuleMXBeanEntry moduleMXBeanEntry) {
+ return new ModuleConfig(key, new InstanceConfig(configRegistryClient, moduleMXBeanEntry
+ .getAttributes()), moduleMXBeanEntry.getProvidedServices().values());
}
});
}
EditConfigXmlParser.EditConfigExecution editConfigExecution;
Config cfg = getConfigMapping(configRegistryClient, yangStoreSnapshot.getModuleMXBeanEntryMap());
try {
- editConfigExecution = editConfigXmlParser.fromXml(xml, cfg);
+ editConfigExecution = editConfigXmlParser.fromXml(xml, cfg, transactionProvider, configRegistryClient);
} catch (IllegalStateException e) {
logger.warn("Error parsing xml", e);
final Map<String, String> errorInfo = new HashMap<>();
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Multimap;
+import org.opendaylight.controller.config.util.ConfigRegistryClient;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.config.Config;
import org.opendaylight.controller.netconf.confignetconfconnector.mapping.config.ModuleElementResolved;
import org.opendaylight.controller.netconf.confignetconfconnector.operations.Datastore;
+import org.opendaylight.controller.netconf.confignetconfconnector.transactions.TransactionProvider;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.opendaylight.controller.netconf.util.xml.XmlNetconfConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import javax.management.ObjectName;
import java.util.Arrays;
+import java.util.Collections;
import java.util.Map;
+import java.util.Set;
public class EditConfigXmlParser {
public EditConfigXmlParser() {
}
- EditConfigXmlParser.EditConfigExecution fromXml(final XmlElement xml, final Config cfgMapping)
+ EditConfigXmlParser.EditConfigExecution fromXml(final XmlElement xml, final Config cfgMapping,
+ TransactionProvider transactionProvider, ConfigRegistryClient configRegistryClient)
throws NetconfDocumentedException {
- EditStrategyType.resetDefaultStrategy();
+ EditStrategyType editStrategyType = EditStrategyType.getDefaultStrategy();
xml.checkName(EditConfigXmlParser.EDIT_CONFIG);
xml.checkNamespace(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0);
// Default op
Optional<XmlElement> defaultContent = xml
.getOnlyChildElementWithSameNamespaceOptionally(EditConfigXmlParser.DEFAULT_OPERATION_KEY);
- if (defaultContent.isPresent())
- EditStrategyType.setDefaultStrategy(EditStrategyType.valueOf(defaultContent.get().getTextContent()));
+ if (defaultContent.isPresent()) {
+ String mergeStrategyString = defaultContent.get().getTextContent();
+ logger.trace("Setting merge strategy to {}", mergeStrategyString);
+ editStrategyType = EditStrategyType.valueOf(mergeStrategyString);
+ }
+ Set<ObjectName> instancesForFillingServiceRefMapping = Collections.emptySet();
+ if (editStrategyType == EditStrategyType.merge) {
+ instancesForFillingServiceRefMapping = Datastore.getInstanceQueryStrategy(targetDatastore, transactionProvider)
+ .queryInstances(configRegistryClient);
+ logger.trace("Pre-filling services from following instances: {}", instancesForFillingServiceRefMapping);
+ }
XmlElement configElement = xml.getOnlyChildElementWithSameNamespace(XmlNetconfConstants.CONFIG_KEY);
- return new EditConfigXmlParser.EditConfigExecution(xml, cfgMapping, configElement, testOption);
+ return new EditConfigXmlParser.EditConfigExecution(xml, cfgMapping, configElement, testOption,
+ instancesForFillingServiceRefMapping, editStrategyType);
}
private void removeMountpointsFromConfig(XmlElement configElement, XmlElement mountpointsElement) {
@VisibleForTesting
static class EditConfigExecution {
- XmlElement editConfigXml;
- Map<String, Multimap<String, ModuleElementResolved>> resolvedXmlElements;
- TestOption testOption;
+ private final XmlElement editConfigXml;
+ private final Map<String, Multimap<String, ModuleElementResolved>> resolvedXmlElements;
+ private final TestOption testOption;
+ private final EditStrategyType defaultEditStrategyType;
- EditConfigExecution(XmlElement xml, Config configResolver, XmlElement configElement, TestOption testOption) {
+ EditConfigExecution(XmlElement xml, Config configResolver, XmlElement configElement, TestOption testOption, Set<ObjectName> instancesForFillingServiceRefMapping,
+ EditStrategyType defaultStrategy) {
this.editConfigXml = xml;
- this.resolvedXmlElements = configResolver.fromXml(configElement);
+ this.resolvedXmlElements = configResolver.fromXml(configElement, instancesForFillingServiceRefMapping, defaultStrategy);
this.testOption = testOption;
+ this.defaultEditStrategyType = defaultStrategy;
}
boolean shouldTest() {
boolean shouldSet() {
return testOption == TestOption.set || testOption == TestOption.testThenSet;
}
+
+ Map<String, Multimap<String, ModuleElementResolved>> getResolvedXmlElements() {
+ return resolvedXmlElements;
+ }
+
+ EditStrategyType getDefaultStrategy() {
+ return defaultEditStrategyType;
+ }
}
}
import java.util.EnumSet;
import java.util.Set;
-import com.google.common.base.Preconditions;
-
+//FIXME: make thread safe
public enum EditStrategyType {
// can be default
merge, replace, none,
private static final Set<EditStrategyType> defaultStrats = EnumSet.of(merge, replace, none);
- private static EditStrategyType defaultStrat = merge;
-
- public static EditStrategyType defaultStrategy() {
- return defaultStrat;
- }
-
- public static void setDefaultStrategy(EditStrategyType strat) {
- Preconditions.checkArgument(strat.canBeDefault(), "Default edit strategy can be only of value " + defaultStrats
- + ", but was " + strat);
- defaultStrat = strat;
- }
-
- public static void resetDefaultStrategy() {
- setDefaultStrategy(EditStrategyType.merge);
+ public static EditStrategyType getDefaultStrategy() {
+ return merge;
}
public boolean isEnforcing() {
}
}
- private static final EnumSet<EditStrategyType> defaults;
-
- static {
- defaults = EnumSet.of(merge, replace, none);
- }
-
- private boolean canBeDefault() {
- return defaults.contains(this);
- }
-
public EditConfigStrategy getFittingStrategy() {
switch (this) {
case merge:
Element dataElement = document.createElement(XmlNetconfConstants.DATA_KEY);
final Set<ObjectName> instances = Datastore.getInstanceQueryStrategy(source, this.transactionProvider)
.queryInstances(configRegistryClient);
+
final Config configMapping = new Config(transform(configRegistryClient,
yangStoreSnapshot.getModuleMXBeanEntryMap()));
dataElement = configMapping.toXml(instances, this.maybeNamespace, document, dataElement);
if (contextInstanceElement.isPresent() == false)
return HandlingPriority.CANNOT_HANDLE;
+ // FIXME update xpath to instance to conform to config-api yang
final RuntimeRpcElementResolved id = RuntimeRpcElementResolved.fromXpath(contextInstanceElement.get()
.getTextContent(), netconfOperationName, netconfOperationNamespace);
private RuntimeRpcElementResolved(String namespace, String moduleName, String instanceName, String runtimeBeanName,
Map<String, String> additionalAttributes) {
- this.moduleName = moduleName;
- this.instanceName = instanceName;
+ this.moduleName = Preconditions.checkNotNull(moduleName, "Module name");
+ this.instanceName = Preconditions.checkNotNull(instanceName, "Instance name");
this.additionalAttributes = additionalAttributes;
- this.namespace = namespace;
- this.runtimeBeanName = runtimeBeanName;
+ this.namespace = Preconditions.checkNotNull(namespace, "Namespace");
+ this.runtimeBeanName = Preconditions.checkNotNull(runtimeBeanName, "Runtime bean name");
}
public String getModuleName() {
return ObjectNameUtil.createRuntimeBeanName(moduleName, instanceName, additionalAttributesJavaNames);
}
- private static final String xpathPatternBlueprint = "/" + XmlNetconfConstants.DATA_KEY + "/"
- + XmlNetconfConstants.MODULES_KEY + "/" + XmlNetconfConstants.MODULE_KEY + "\\["
- + XmlNetconfConstants.NAME_KEY + "='(.+)'\\]/" + XmlNetconfConstants.INSTANCE_KEY + "\\["
- + XmlNetconfConstants.NAME_KEY + "='([^']+)'\\](.*)";
+ private static final String xpathPatternBlueprint =
+ "/" + XmlNetconfConstants.MODULES_KEY
+ + "/" + XmlNetconfConstants.MODULE_KEY
+ + "\\["
+
+ + "(?<key1>type|name)"
+ + "='(?<value1>[^']+)'"
+ + "( and |\\]\\[)"
+ + "(?<key2>type|name)"
+ + "='(?<value2>[^']+)'"
+
+ + "\\]"
+ + "(?<additional>.*)";
+
private static final Pattern xpathPattern = Pattern.compile(xpathPatternBlueprint);
- private static final String additionalPatternBlueprint = "(.+)\\[(.+)='(.+)'\\]";
+ private static final String additionalPatternBlueprint = "(?<additionalKey>.+)\\[(.+)='(?<additionalValue>.+)'\\]";
private static final Pattern additionalPattern = Pattern.compile(additionalPatternBlueprint);
public static RuntimeRpcElementResolved fromXpath(String xpath, String elementName, String namespace) {
"Node %s with value '%s' not in required form on rpc element %s, required format is %s",
RuntimeRpc.CONTEXT_INSTANCE, xpath, elementName, xpathPatternBlueprint);
- String moduleName = matcher.group(1);
- String instanceName = matcher.group(2);
- String additionalString = matcher.group(3);
- HashMap<String, String> additionalAttributes = Maps.<String, String> newHashMap();
- String runtimeBeanYangName = moduleName;
- for (String additionalKeyValue : additionalString.split("/")) {
- if (Strings.isNullOrEmpty(additionalKeyValue))
- continue;
- matcher = additionalPattern.matcher(additionalKeyValue);
- Preconditions
- .checkState(
- matcher.matches(),
- "Attribute %s not in required form on rpc element %s, required format for additional attributes is %s",
- additionalKeyValue, elementName, additionalPatternBlueprint);
- String name = matcher.group(1);
- runtimeBeanYangName = name;
- additionalAttributes.put(name, matcher.group(3));
- }
+ PatternGroupResolver groups = new PatternGroupResolver(matcher.group("key1"), matcher.group("value1"),
+ matcher.group("key2"), matcher.group("value2"), matcher.group("additional"));
+
+ String moduleName = groups.getModuleName();
+ String instanceName = groups.getInstanceName();
+
+ HashMap<String, String> additionalAttributes = groups.getAdditionalKeys(elementName, moduleName);
- return new RuntimeRpcElementResolved(namespace, moduleName, instanceName, runtimeBeanYangName,
+ return new RuntimeRpcElementResolved(namespace, moduleName, instanceName, groups.getRuntimeBeanYangName(),
additionalAttributes);
}
+
+ private static final class PatternGroupResolver {
+
+ private final String key1, key2, value1, value2;
+ private final String additional;
+ private String runtimeBeanYangName;
+
+ PatternGroupResolver(String key1, String value1, String key2, String value2, String additional) {
+ this.key1 = Preconditions.checkNotNull(key1);
+ this.value1 = Preconditions.checkNotNull(value1);
+
+ this.key2 = Preconditions.checkNotNull(key2);
+ this.value2 = Preconditions.checkNotNull(value2);
+
+ this.additional = Preconditions.checkNotNull(additional);
+ }
+
+ String getModuleName() {
+ return key1.equals(XmlNetconfConstants.TYPE_KEY) ? value1 : value2;
+ }
+
+ String getInstanceName() {
+ return key1.equals(XmlNetconfConstants.NAME_KEY) ? value1 : value2;
+ }
+
+ HashMap<String, String> getAdditionalKeys(String elementName, String moduleName) {
+ HashMap<String, String> additionalAttributes = Maps.newHashMap();
+
+ runtimeBeanYangName = moduleName;
+ for (String additionalKeyValue : additional.split("/")) {
+ if (Strings.isNullOrEmpty(additionalKeyValue))
+ continue;
+ Matcher matcher = additionalPattern.matcher(additionalKeyValue);
+ Preconditions
+ .checkState(
+ matcher.matches(),
+ "Attribute %s not in required form on rpc element %s, required format for additional attributes is %s",
+ additionalKeyValue, elementName, additionalPatternBlueprint);
+ String name = matcher.group("additionalKey");
+ runtimeBeanYangName = name;
+ additionalAttributes.put(name, matcher.group("additionalValue"));
+ }
+ return additionalAttributes;
+ }
+
+ private String getRuntimeBeanYangName() {
+ Preconditions.checkState(runtimeBeanYangName!=null);
+ return runtimeBeanYangName;
+ }
+ }
}
@Override
public synchronized void close() {
for (ObjectName tx : allOpenedTransactions) {
- if (isStillOpenTransaction(tx)) {
- try {
+ try {
+ if (isStillOpenTransaction(tx)) {
configRegistryClient.getConfigTransactionClient(tx).abortConfig();
- } catch (Exception e) {
- logger.debug("Ignoring {} while closing transaction {}", e.toString(), tx, e);
}
+ } catch (Exception e) {
+ logger.debug("Ignoring exception while closing transaction {}", tx, e);
}
}
allOpenedTransactions.clear();
edit("netconfMessages/editConfig.xml");
checkBinaryLeafEdited(getConfigCandidate());
+
// default-operation:none, should not affect binary leaf
edit("netconfMessages/editConfig_none.xml");
checkBinaryLeafEdited(getConfigCandidate());
// check after edit
commit();
Element response = getConfigRunning();
- // System.out.println(Xml.toString(response));
checkBinaryLeafEdited(response);
checkTypeConfigAttribute(response);
Element response = get();
- assertEquals(2, getElementsSize(response, "instance"));
+ System.err.println(XmlUtil.toString(response));
+
+ assertEquals(2, getElementsSize(response, "module"));
assertEquals(2, getElementsSize(response, "asdf"));
assertEquals(5, getElementsSize(response, "inner-running-data"));
assertEquals(5, getElementsSize(response, "deep2"));
/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
+* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+*
+* This program and the accompanying materials are made available under the
+* terms of the Eclipse Public License v1.0 which accompanies this distribution,
+* and is available at http://www.eclipse.org/legal/epl-v10.html
+*/
package org.opendaylight.controller.netconf.confignetconfconnector;
@Test
public void testOneInstanceMultipleServices() {
Services services = new Services();
- services.addServiceEntry("s1", "module", "instance");
+ services.addServiceEntry("nm", "s1", "module", "instance");
assertEquals(1, services.getMappedServices().size());
- services.addServiceEntry("s2", "module", "instance");
+ services.addServiceEntry("nm2", "s2", "module", "instance");
assertEquals(2, services.getMappedServices().size());
}
@Test
public void testMultipleInstancesOneName() throws Exception {
Services services = new Services();
- services.addServiceEntry("s1", "module", "instance");
+ services.addServiceEntry("nm", "s1", "module", "instance");
assertEquals(1, services.getMappedServices().size());
- services.addServiceEntry("s1", "module2", "instance");
+ services.addServiceEntry("nm", "s1", "module2", "instance");
assertEquals(1, services.getMappedServices().size());
- assertEquals(2, services.getMappedServices().get("s1").size());
- assertTrue(services.getMappedServices().get("s1").containsKey("ref_instance"));
- assertTrue(services.getMappedServices().get("s1").containsKey("ref_instance_1"));
+ assertEquals(2, services.getMappedServices().get("nm").get("s1").size());
+ assertTrue(services.getMappedServices().get("nm").get("s1").containsKey("ref_instance"));
+ assertTrue(services.getMappedServices().get("nm").get("s1").containsKey("ref_instance_1"));
}
@Test
public void testMultipleInstancesOneName2() throws Exception {
Services services = new Services();
- services.addServiceEntry("s1", "module", "instance_1");
-
- services.addServiceEntry("s2", "module2", "instance");
- services.addServiceEntry("s2", "module3", "instance");
- services.addServiceEntry("s1", "module3", "instance");
-
- assertEquals(2, services.getMappedServices().get("s1").size());
- assertEquals(2, services.getMappedServices().get("s2").size());
- assertTrue(services.getMappedServices().get("s1").containsKey("ref_instance_2"));
- assertTrue(services.getMappedServices().get("s1").containsKey("ref_instance_1"));
- assertTrue(services.getMappedServices().get("s2").containsKey("ref_instance"));
- assertTrue(services.getMappedServices().get("s2").containsKey("ref_instance_2"));
+ services.addServiceEntry("nm", "s1", "module", "instance_1");
+
+ services.addServiceEntry("nm2", "s2", "module2", "instance");
+ services.addServiceEntry("nm2", "s2", "module3", "instance");
+ services.addServiceEntry("nm", "s1", "module3", "instance");
+
+ assertEquals(2, services.getMappedServices().get("nm").get("s1").size());
+ assertEquals(2, services.getMappedServices().get("nm2").get("s2").size());
+ assertTrue(services.getMappedServices().get("nm").get("s1").containsKey("ref_instance_2"));
+ assertTrue(services.getMappedServices().get("nm").get("s1").containsKey("ref_instance_1"));
+ assertTrue(services.getMappedServices().get("nm2").get("s2").containsKey("ref_instance"));
+ assertTrue(services.getMappedServices().get("nm2").get("s2").containsKey("ref_instance_2"));
}
}
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import javax.management.ObjectName;
+import java.util.Collections;
import java.util.Map;
+import java.util.Set;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyMap;
Config cfg = mock(Config.class);
XmlElement xmlElement = mock(XmlElement.class);
- doReturn(resolvedXmlElements).when(cfg).fromXml(xmlElement);
+ Set<ObjectName> instancesForFillingServiceRefMapping = Collections.emptySet();
+ EditStrategyType defaultStrategy = EditStrategyType.getDefaultStrategy();
+ doReturn(resolvedXmlElements).when(cfg).fromXml(xmlElement, instancesForFillingServiceRefMapping, defaultStrategy);
EditConfigExecution editConfigExecution = new EditConfigExecution(null, cfg, xmlElement,
- EditConfigXmlParser.TestOption.testThenSet);
+ EditConfigXmlParser.TestOption.testThenSet, instancesForFillingServiceRefMapping, defaultStrategy);
edit.getResponseInternal(XmlUtil.newDocument(), editConfigExecution);
import java.io.InputStream;
import java.net.InetSocketAddress;
import java.util.Collections;
+import java.util.HashSet;
import java.util.Set;
/**
private static final Logger logger = LoggerFactory.getLogger(ConfigPersisterNotificationHandler.class);
private final InetSocketAddress address;
- private final NetconfClientDispatcher dispatcher;
private final EventLoopGroup nettyThreadgroup;
+ private NetconfClientDispatcher netconfClientDispatcher;
private NetconfClient netconfClient;
private final Persister persister;
this.timeout = timeout;
this.nettyThreadgroup = new NioEventLoopGroup();
- this.dispatcher = new NetconfClientDispatcher(Optional.<SSLContext>absent(), nettyThreadgroup, nettyThreadgroup);
}
public void init() throws InterruptedException {
while (true) {
attempt++;
+ netconfClientDispatcher = new NetconfClientDispatcher(Optional.<SSLContext>absent(), nettyThreadgroup, nettyThreadgroup);
try {
- netconfClient = new NetconfClient(this.toString(), address, delay, dispatcher);
- // TODO is this correct ex to catch ?
+ netconfClient = new NetconfClient(this.toString(), address, delay, netconfClientDispatcher);
} catch (IllegalStateException e) {
logger.debug("Netconf {} was not initialized or is not stable, attempt {}", address, attempt, e);
+ netconfClientDispatcher.close();
Thread.sleep(delay);
continue;
}
logger.debug("Polling hello from netconf, attempt {}, capabilities {}", attempt, currentCapabilities);
- try {
- netconfClient.close();
- } catch (IOException e) {
- throw new RuntimeException("Error closing temporary client " + netconfClient);
- }
+ closeClientAndDispatcher(netconfClient, netconfClientDispatcher);
Thread.sleep(delay);
}
+ Set<String> allNotFound = new HashSet<>(expectedCaps);
+ allNotFound.removeAll(currentCapabilities);
+ logger.error("Netconf server did not provide required capabilities. Expected but not found: {}, all expected {}, current {}",
+ allNotFound, expectedCaps ,currentCapabilities);
+ throw new RuntimeException("Netconf server did not provide required capabilities. Expected but not found:" + allNotFound);
- throw new RuntimeException("Netconf server did not provide required capabilities " + expectedCaps
- + " in time, provided capabilities " + currentCapabilities);
+ }
+ private static void closeClientAndDispatcher(Closeable client, Closeable dispatcher) {
+ Exception fromClient = null;
+ try {
+ client.close();
+ } catch (Exception e) {
+ fromClient = e;
+ } finally {
+ try {
+ dispatcher.close();
+ } catch (Exception e) {
+ if (fromClient != null) {
+ e.addSuppressed(fromClient);
+ }
+
+ throw new RuntimeException("Error closing temporary client ", e);
+ }
+ }
}
private boolean isSubset(Set<String> currentCapabilities, Set<String> expectedCaps) {
}
}
+ if (netconfClientDispatcher != null) {
+ try {
+ netconfClientDispatcher.close();
+ } catch (Exception e) {
+ logger.warn("Unable to close connection to netconf {}", netconfClientDispatcher, e);
+ }
+ }
+
try {
nettyThreadgroup.shutdownGracefully();
} catch (Exception e) {
- logger.warn("Unable to close netconf client thread group {}", dispatcher, e);
+ logger.warn("Unable to close netconf client thread group {}", netconfClientDispatcher, e);
}
// unregister from JMX
import org.opendaylight.protocol.framework.ReconnectStrategy;
import org.opendaylight.protocol.framework.SessionListener;
import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLEngine;
+import java.io.Closeable;
import java.net.InetSocketAddress;
-public class NetconfClientDispatcher extends AbstractDispatcher<NetconfClientSession, NetconfClientSessionListener> {
+public class NetconfClientDispatcher extends AbstractDispatcher<NetconfClientSession, NetconfClientSessionListener> implements Closeable {
+
+ private static final Logger logger = LoggerFactory.getLogger(NetconfClient.class);
private final Optional<SSLContext> maybeContext;
private final NetconfClientSessionNegotiatorFactory negotatorFactory;
+ private final HashedWheelTimer timer;
public NetconfClientDispatcher(final Optional<SSLContext> maybeContext, EventLoopGroup bossGroup, EventLoopGroup workerGroup) {
super(bossGroup, workerGroup);
this.maybeContext = Preconditions.checkNotNull(maybeContext);
- this.negotatorFactory = new NetconfClientSessionNegotiatorFactory(new HashedWheelTimer());
+ timer = new HashedWheelTimer();
+ this.negotatorFactory = new NetconfClientSessionNegotiatorFactory(timer);
}
public Future<NetconfClientSession> createClient(InetSocketAddress address,
}
}
+ @Override
+ public void close() {
+ try {
+ timer.stop();
+ } catch (Exception e) {
+ logger.debug("Ignoring exception while closing {}", timer, e);
+ }
+ }
}
private DefaultCommitNotificationProducer commitNot;
private NetconfServerDispatcher dispatch;
private NioEventLoopGroup eventLoopGroup;
+ private HashedWheelTimer timer;
@Override
public void start(final BundleContext context) throws Exception {
factoriesTracker.open();
SessionIdProvider idProvider = new SessionIdProvider();
+ timer = new HashedWheelTimer();
NetconfServerSessionNegotiatorFactory serverNegotiatorFactory = new NetconfServerSessionNegotiatorFactory(
- new HashedWheelTimer(), factoriesListener, idProvider);
+ timer, factoriesListener, idProvider);
commitNot = new DefaultCommitNotificationProducer(ManagementFactory.getPlatformMBeanServer());
commitNot.close();
eventLoopGroup.shutdownGracefully();
+ timer.stop();
}
}
*/
package org.opendaylight.controller.netconf.impl.osgi;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfOperationRouter;
import org.opendaylight.controller.netconf.api.NetconfSession;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
public class NetconfOperationRouterImpl implements NetconfOperationRouter {
@Override
public synchronized Document onNetconfMessage(Document message,
NetconfSession session) throws NetconfDocumentedException {
- NetconfOperationExecution netconfOperationExecution = getNetconfOperationWithHighestPriority(
- message, session);
- logger.debug("Forwarding netconf message {} to {}", XmlUtil.toString(message),
+ NetconfOperationExecution netconfOperationExecution = null;
+
+ String messageAsString = XmlUtil.toString(message);
+
+ try {
+ netconfOperationExecution = getNetconfOperationWithHighestPriority(message, session);
+ } catch (IllegalArgumentException | IllegalStateException e) {
+ logger.warn("Unable to handle rpc {} on session {}", messageAsString, session, e);
+
+ String errorMessage = String.format("Unable to handle rpc %s on session %s", messageAsString, session);
+ Map<String, String> errorInfo = Maps.newHashMap();
+
+ NetconfDocumentedException.ErrorTag tag = null;
+ if (e instanceof IllegalArgumentException) {
+ errorInfo.put(NetconfDocumentedException.ErrorTag.operation_not_supported.toString(), e.getMessage());
+ tag = NetconfDocumentedException.ErrorTag.operation_not_supported;
+ } else if (e instanceof IllegalStateException) {
+ errorInfo.put(NetconfDocumentedException.ErrorTag.operation_failed.toString(), e.getMessage());
+ tag = NetconfDocumentedException.ErrorTag.operation_failed;
+ }
+
+ throw new NetconfDocumentedException(errorMessage, e, NetconfDocumentedException.ErrorType.application,
+ tag, NetconfDocumentedException.ErrorSeverity.error, errorInfo);
+ } catch (RuntimeException e) {
+ throw handleUnexpectedEx("Unexpected exception during netconf operation sort", e);
+ }
+
+ try {
+ return executeOperationWithHighestPriority(message, netconfOperationExecution, messageAsString);
+ } catch (RuntimeException e) {
+ throw handleUnexpectedEx("Unexpected exception during netconf operation execution", e);
+ }
+ }
+
+ private NetconfDocumentedException handleUnexpectedEx(String s, Exception e) throws NetconfDocumentedException {
+ logger.error(s, e);
+
+ Map<String, String> info = Maps.newHashMap();
+ info.put(NetconfDocumentedException.ErrorSeverity.error.toString(), e.toString());
+ return new NetconfDocumentedException("Unexpected error",
+ NetconfDocumentedException.ErrorType.application,
+ NetconfDocumentedException.ErrorTag.operation_failed,
+ NetconfDocumentedException.ErrorSeverity.error, info);
+ }
+
+ private Document executeOperationWithHighestPriority(Document message, NetconfOperationExecution netconfOperationExecution, String messageAsString) throws NetconfDocumentedException {
+ logger.debug("Forwarding netconf message {} to {}", messageAsString,
netconfOperationExecution.operationWithHighestPriority);
final LinkedList<NetconfOperationFilterChain> chain = new LinkedList<>();
TreeMap<HandlingPriority, Set<NetconfOperation>> sortedPriority = getSortedNetconfOperationsWithCanHandle(
message, session);
- Preconditions.checkState(sortedPriority.isEmpty() == false, "No %s available to handle message %s",
+ Preconditions.checkArgument(sortedPriority.isEmpty() == false, "No %s available to handle message %s",
NetconfOperation.class.getName(), XmlUtil.toString(message));
HandlingPriority highestFoundPriority = sortedPriority.lastKey();
public void tearDown() throws Exception {
commitNot.close();
nettyThreadgroup.shutdownGracefully();
+ clientDispatcher.close();
}
private void loadMessages() throws IOException, SAXException, ParserConfigurationException {
+ "<no-arg xmlns=\""
+ expectedNamespace
+ "\"> "
- + "<context-instance>/data/modules/module[name='impl-netconf']/instance[name='instance']</context-instance>"
+ + "<context-instance>/modules/module[type='impl-netconf'][name='instance']</context-instance>"
+ "<arg1>argument1</arg1>" + "</no-arg>" + "</rpc>";
final Document doc = XmlUtil.readXmlToDocument(rpc);
final NetconfMessage message = netconfClient.sendMessage(new NetconfMessage(doc));
prefix = "";
}
if (namespaces.containsKey(prefix) == false) {
- throw new IllegalArgumentException("Cannot find namespace for " + element + ". Prefix from content is "
+ throw new IllegalArgumentException("Cannot find namespace for " + XmlUtil.toString(element) + ". Prefix from content is "
+ prefix + ". Found namespaces " + namespaces);
}
return Maps.immutableEntry(prefix, namespaces.get(prefix));
public static final String NAME_KEY = "name";
public static final String NOTIFICATION_ELEMENT_NAME = "notification";
+ public static final String PREFIX = "prefix";
+
//
//
public static final String RFC4741_TARGET_NAMESPACE = "urn:ietf:params:xml:ns:netconf:base:1.0";
<core-size>44</core-size>
</peers>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
</type>
<name>test2</name>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<provider>/config/modules/module[name='impl-dep']/instance[name='dep']
<core-size>44</core-size>
</peers>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
</type>
<name>test2</name>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
</modules>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<provider>/config/modules/module[name='impl-dep']/instance[name='dep']
<core-size>44</core-size>
</peers>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
</type>
<name>test2</name>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<provider>/config/modules/module[name='impl-dep']/instance[name='dep']
<core-size>44</core-size>
</peers>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
</type>
<name>test2</name>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<provider>/config/modules/module[name='impl-dep']/instance[name='dep']
<core-size>44</core-size>
</peers>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
</type>
<name>test2</name>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<provider>/config/modules/module[name='impl-dep']/instance[name='dep']
<core-size>44</core-size>
</peers>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
</type>
<name>test2</name>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<provider>/config/modules/module[name='impl-dep']/instance[name='dep']
<rpc message-id="a" a="64" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<no-arg xmlns="urn:opendaylight:params:xml:ns:yang:controller:test:impl">
- <context-instance>/data/modules/module[name='impl-netconf']/instance[name='instance']</context-instance>
+ <context-instance>/modules/module[type='impl-netconf' and name='instance']</context-instance>
<arg1>
testarg1
</arg1>
<rpc message-id="a" a="64" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<noArgInner xmlns="urn:opendaylight:params:xml:ns:yang:controller:test:impl">
<context-instance>
- /data/modules/module[name='impl-netconf']/instance[name='instance2']/inner-running-data-additional[key='randomString_1003']
+ /modules/module[name='instance2'][type='impl-netconf']/inner-running-data-additional[key='randomString_1003']
</context-instance>
</noArgInner>
</rpc>
<noArgInnerInner
xmlns="urn:opendaylight:params:xml:ns:yang:controller:test:impl">
<context-instance>
- /data/modules/module[name='impl-netconf']/instance[name='instance2']/inner-running-data[key='1015']/inner-inner-running-data[key='1017']
+ /modules/module[type='impl-netconf'][name='instance2']/inner-running-data[key='1015']/inner-inner-running-data[key='1017']
</context-instance>
<arg1>
<leaf-list-output
xmlns="urn:opendaylight:params:xml:ns:yang:controller:test:impl">
<context-instance>
- /data/modules/module[name='impl-netconf']/instance[name='instance2']/inner-running-data[key='1015']/inner-inner-running-data[key='1017']
+ /modules/module[type='impl-netconf'][name='instance2']/inner-running-data[key='1015']/inner-inner-running-data[key='1017']
</context-instance>
</leaf-list-output>
</rpc>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<provider>/config/modules/module[name='impl-dep']/instance[name='dep']
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<provider>/config/modules/module[name='impl-dep']/instance[name='dep']
<unknownAttribute>error</unknownAttribute>
l
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<provider>/config/modules/module[name='impl-dep']/instance[name='dep']
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
l
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<unknownAttribute>error</unknownAttribute>
<instance>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
l
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<provider>/config/modules/module[name='impl-dep']/instance[name='dep']
<core-size>44</core-size>
</peers>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
</type>
<name>test2</name>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<provider>/config/modules/module[name='impl-dep']/instance[name='dep']
<unknownAttribute>error</unknownAttribute>
</peers>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
</type>
<name>test2</name>
<testing-dep>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<name>ref_dep</name>
</testing-dep>
</module>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
- <type>testing</type>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:test">prefix:testing</type>
<instance>
<name>ref_dep</name>
<provider>/config/modules/module[name='impl-dep']/instance[name='dep']
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>forwarding.staticrouting</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${forwarding.staticrouting}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>arphandler</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${arphandler.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker.implementation</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>forwarding.staticrouting</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${forwarding.staticrouting}</version>
</dependency>
<dependency>
<groupId>org.codehaus.enunciate</groupId>
*/
if (inPort == null
|| container.equals(GlobalConstants.DEFAULT.toString())
- || this.containerToNc.get(container).contains(inPort)) {
+ || (containerToNc.containsKey(container) && containerToNc.get(container).contains(inPort))) {
notifier.flowRemoved(node, flow);
}
}
</scm>
<artifactId>samples.loadbalancer</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.1-SNAPSHOT</version>
<packaging>bundle</packaging>
<build>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
import org.apache.felix.dm.Component;
import org.opendaylight.controller.forwardingrulesmanager.FlowEntry;
import org.opendaylight.controller.forwardingrulesmanager.IForwardingRulesManager;
+import org.opendaylight.controller.hosttracker.HostIdFactory;
+import org.opendaylight.controller.hosttracker.IHostId;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
import org.opendaylight.controller.sal.action.Action;
import org.slf4j.LoggerFactory;
/**
- * This class is the main class that represents the load balancer service.
- * This is a sample load balancer application that balances traffic to backend servers
- * based on the source address and source port on each incoming packet. The service
- * reactively installs OpenFlow rules to direct all packets with a specific source address
- * and source port to one of the appropriate backend servers. The servers may be chosen
- * using a round robin policy or a random policy. This service can be configured via a
- * REST APIs which are similar to the OpenStack Quantum LBaaS (Load-balancer-as-a-Service)
- * v1.0 API proposal (http://wiki.openstack.org/Quantum/LBaaS)
+ * This class is the main class that represents the load balancer service. This
+ * is a sample load balancer application that balances traffic to backend
+ * servers based on the source address and source port on each incoming packet.
+ * The service reactively installs OpenFlow rules to direct all packets with a
+ * specific source address and source port to one of the appropriate backend
+ * servers. The servers may be chosen using a round robin policy or a random
+ * policy. This service can be configured via a REST APIs which are similar to
+ * the OpenStack Quantum LBaaS (Load-balancer-as-a-Service) v1.0 API proposal
+ * (http://wiki.openstack.org/Quantum/LBaaS)
*
- * To use this service, a virtual IP (or VIP) should be exposed to the clients of this service
- * and used as the destination address. A VIP is a entity that comprises of a virtual IP, port
- * and protocol (TCP or UDP).
- * Assumptions:
- * 1. One or more VIPs may be mapped to the same server pool. All VIPs that share the same
- * pool must also share the same load balancing policy (random or round robin).
+ * To use this service, a virtual IP (or VIP) should be exposed to the clients
+ * of this service and used as the destination address. A VIP is a entity that
+ * comprises of a virtual IP, port and protocol (TCP or UDP). Assumptions: 1.
+ * One or more VIPs may be mapped to the same server pool. All VIPs that share
+ * the same pool must also share the same load balancing policy (random or round
+ * robin).
*
- * 2. Only one server pool can be be assigned to a VIP.
+ * 2. Only one server pool can be be assigned to a VIP.
*
- * 3. All flow rules are installed with an idle timeout of 5 seconds.
+ * 3. All flow rules are installed with an idle timeout of 5 seconds.
*
- * 4. Packets to a VIP must leave the OpenFlow cluster from the same switch from where
- * it entered it.
+ * 4. Packets to a VIP must leave the OpenFlow cluster from the same switch from
+ * where it entered it.
*
- * 5. When you delete a VIP or a server pool or a server from a pool, the service does not
- * delete the flow rules it has already installed. The flow rules should automatically
- * time out after the idle timeout of 5 seconds.
+ * 5. When you delete a VIP or a server pool or a server from a pool, the
+ * service does not delete the flow rules it has already installed. The flow
+ * rules should automatically time out after the idle timeout of 5 seconds.
*
*/
-public class LoadBalancerService implements IListenDataPacket, IConfigManager{
+public class LoadBalancerService implements IListenDataPacket, IConfigManager {
/*
* Logger instance
private static Logger lbsLogger = LoggerFactory.getLogger(LoadBalancerService.class);
/*
- * Single instance of the configuration manager. Application passes this reference to all
- * the new policies implemented for load balancing.
+ * Single instance of the configuration manager. Application passes this
+ * reference to all the new policies implemented for load balancing.
*/
private static ConfigManager configManager = new ConfigManager();
* Round robing policy instance. Need to implement factory patterns to get
* policy instance.
*/
- private static RoundRobinLBPolicy rrLBMethod= new RoundRobinLBPolicy(configManager);
+ private static RoundRobinLBPolicy rrLBMethod = new RoundRobinLBPolicy(configManager);
/*
* Random policy instance.
*/
- private static RandomLBPolicy ranLBMethod= new RandomLBPolicy(configManager);
+ private static RandomLBPolicy ranLBMethod = new RandomLBPolicy(configManager);
/*
* Reference to the data packet service
private String containerName = null;
/*
- * Set/unset methods for the service instance that load balancer
- * service requires
+ * Set/unset methods for the service instance that load balancer service
+ * requires
*/
public String getContainerName() {
if (containerName == null)
}
}
- public void setForwardingRulesManager(
- IForwardingRulesManager forwardingRulesManager) {
+ public void setForwardingRulesManager(IForwardingRulesManager forwardingRulesManager) {
lbsLogger.debug("Setting ForwardingRulesManager");
this.ruleManager = forwardingRulesManager;
}
- public void unsetForwardingRulesManager(
- IForwardingRulesManager forwardingRulesManager) {
+ public void unsetForwardingRulesManager(IForwardingRulesManager forwardingRulesManager) {
if (this.ruleManager == forwardingRulesManager) {
this.ruleManager = null;
}
}
/**
- * This method receives first packet of flows for which there is no
- * matching flow rule installed on the switch. IP addresses used for VIPs
- * are not supposed to be used by any real/virtual host in the network.
- * Hence, any forwarding/routing service will not install any flows rules matching
- * these VIPs. This ensures that all the flows destined for VIPs will not find a match
- * in the switch and will be forwarded to the load balancing service.
- * Service will decide where to route this traffic based on the load balancing
- * policy of the VIP's attached pool and will install appropriate flow rules
- * in a reactive manner.
+ * This method receives first packet of flows for which there is no matching
+ * flow rule installed on the switch. IP addresses used for VIPs are not
+ * supposed to be used by any real/virtual host in the network. Hence, any
+ * forwarding/routing service will not install any flows rules matching
+ * these VIPs. This ensures that all the flows destined for VIPs will not
+ * find a match in the switch and will be forwarded to the load balancing
+ * service. Service will decide where to route this traffic based on the
+ * load balancing policy of the VIP's attached pool and will install
+ * appropriate flow rules in a reactive manner.
*/
@Override
- public PacketResult receiveDataPacket(RawPacket inPkt){
+ public PacketResult receiveDataPacket(RawPacket inPkt) {
if (inPkt == null) {
return PacketResult.IGNORED;
if (ipPkt instanceof IPv4) {
- lbsLogger.debug("Packet recieved from switch : {}",inPkt.getIncomingNodeConnector().getNode().toString());
- IPv4 ipv4Pkt = (IPv4)ipPkt;
- if(IPProtocols.getProtocolName(ipv4Pkt.getProtocol()).equals(IPProtocols.TCP.toString())
- || IPProtocols.getProtocolName(ipv4Pkt.getProtocol()).equals(IPProtocols.UDP.toString())){
+ lbsLogger.debug("Packet recieved from switch : {}", inPkt.getIncomingNodeConnector().getNode()
+ .toString());
+ IPv4 ipv4Pkt = (IPv4) ipPkt;
+ if (IPProtocols.getProtocolName(ipv4Pkt.getProtocol()).equals(IPProtocols.TCP.toString())
+ || IPProtocols.getProtocolName(ipv4Pkt.getProtocol()).equals(IPProtocols.UDP.toString())) {
- lbsLogger.debug("Packet protocol : {}",IPProtocols.getProtocolName(ipv4Pkt.getProtocol()));
+ lbsLogger.debug("Packet protocol : {}", IPProtocols.getProtocolName(ipv4Pkt.getProtocol()));
Client client = new LBUtil().getClientFromPacket(ipv4Pkt);
VIP vip = new LBUtil().getVIPFromPacket(ipv4Pkt);
- if(configManager.vipExists(vip)){
+ if (configManager.vipExists(vip)) {
VIP vipWithPoolName = configManager.getVIPWithPoolName(vip);
String poolMemberIp = null;
- if(vipWithPoolName.getPoolName() == null){
- lbsLogger.error("No pool attached. Please attach pool with the VIP -- {}",vip);
+ if (vipWithPoolName.getPoolName() == null) {
+ lbsLogger.error("No pool attached. Please attach pool with the VIP -- {}", vip);
return PacketResult.IGNORED;
}
- if(configManager.getPool(vipWithPoolName.getPoolName()).getLbMethod().equalsIgnoreCase(LBConst.ROUND_ROBIN_LB_METHOD)){
+ if (configManager.getPool(vipWithPoolName.getPoolName()).getLbMethod()
+ .equalsIgnoreCase(LBConst.ROUND_ROBIN_LB_METHOD)) {
- poolMemberIp = rrLBMethod.getPoolMemberForClient(client,vipWithPoolName);
+ poolMemberIp = rrLBMethod.getPoolMemberForClient(client, vipWithPoolName);
}
- if(configManager.getPool(vipWithPoolName.getPoolName()).getLbMethod().equalsIgnoreCase(LBConst.RANDOM_LB_METHOD)){
- poolMemberIp = ranLBMethod.getPoolMemberForClient(client,vipWithPoolName);
+ if (configManager.getPool(vipWithPoolName.getPoolName()).getLbMethod()
+ .equalsIgnoreCase(LBConst.RANDOM_LB_METHOD)) {
+ poolMemberIp = ranLBMethod.getPoolMemberForClient(client, vipWithPoolName);
}
try {
Node clientNode = inPkt.getIncomingNodeConnector().getNode();
- HostNodeConnector hnConnector = this.hostTracker.hostFind(InetAddress.getByName(poolMemberIp));
+ // HostTracker hosts db key scheme implementation
+ IHostId id = HostIdFactory.create(InetAddress.getByName(poolMemberIp), null);
+ HostNodeConnector hnConnector = this.hostTracker.hostFind(id);
Node destNode = hnConnector.getnodeconnectorNode();
- lbsLogger.debug("Client is connected to switch : {}",clientNode.toString());
- lbsLogger.debug("Destination pool machine is connected to switch : {}",destNode.toString());
+ lbsLogger.debug("Client is connected to switch : {}", clientNode.toString());
+ lbsLogger
+ .debug("Destination pool machine is connected to switch : {}", destNode.toString());
- //Get path between both the nodes
+ // Get path between both the nodes
NodeConnector forwardPort = null;
- if(clientNode.getNodeIDString().equals(destNode.getNodeIDString())){
+ if (clientNode.getNodeIDString().equals(destNode.getNodeIDString())) {
forwardPort = hnConnector.getnodeConnector();
- lbsLogger.info("Both source (client) and destination pool machine is connected to same switch nodes. Respective ports are - {},{}",forwardPort,inPkt.getIncomingNodeConnector());
+ lbsLogger
+ .info("Both source (client) and destination pool machine is connected to same switch nodes. Respective ports are - {},{}",
+ forwardPort, inPkt.getIncomingNodeConnector());
- }else{
+ } else {
Path route = this.routing.getRoute(clientNode, destNode);
- lbsLogger.info("Path between source (client) and destination switch nodes : {}",route.toString());
+ lbsLogger.info("Path between source (client) and destination switch nodes : {}",
+ route.toString());
forwardPort = route.getEdges().get(0).getTailNodeConnector();
}
- if(installLoadBalancerFlow(client,
- vip,
- clientNode,
- poolMemberIp,
- hnConnector.getDataLayerAddressBytes(),
- forwardPort,
- LBConst.FORWARD_DIRECTION_LB_FLOW)){
- lbsLogger.info("Traffic from client : {} will be routed " +
- "to pool machine : {}",client,poolMemberIp);
- }else{
- lbsLogger.error("Not able to route traffic from client : {}",client );
+ if (installLoadBalancerFlow(client, vip, clientNode, poolMemberIp,
+ hnConnector.getDataLayerAddressBytes(), forwardPort,
+ LBConst.FORWARD_DIRECTION_LB_FLOW)) {
+ lbsLogger.info("Traffic from client : {} will be routed " + "to pool machine : {}",
+ client, poolMemberIp);
+ } else {
+ lbsLogger.error("Not able to route traffic from client : {}", client);
}
- if(installLoadBalancerFlow(client,
- vip,
- clientNode,
- poolMemberIp,
- vipMacAddr,
- inPkt.getIncomingNodeConnector(),
- LBConst.REVERSE_DIRECTION_LB_FLOW)){
- lbsLogger.info("Flow rule installed to change the source ip/mac from " +
- "pool machine ip {} to VIP {} for traffic coming pool machine",poolMemberIp,vip);
- }else{
- lbsLogger.error("Not able to route traffic from client : {}",client );
+ if (installLoadBalancerFlow(client, vip, clientNode, poolMemberIp, vipMacAddr,
+ inPkt.getIncomingNodeConnector(), LBConst.REVERSE_DIRECTION_LB_FLOW)) {
+ lbsLogger.info("Flow rule installed to change the source ip/mac from "
+ + "pool machine ip {} to VIP {} for traffic coming pool machine", poolMemberIp,
+ vip);
+ } else {
+ lbsLogger.error("Not able to route traffic from client : {}", client);
}
- }catch (UnknownHostException e) {
- lbsLogger.error("Pool member not found in the network : {}",e.getMessage());
- lbsLogger.error("",e);
+ } catch (UnknownHostException e) {
+ lbsLogger.error("Pool member not found in the network : {}", e.getMessage());
+ lbsLogger.error("", e);
}
}
}
}
/*
- * This method installs the flow rule for routing the traffic between two hosts.
- * @param source Traffic is sent by this source
- * @param dest Traffic is destined to this destination (VIP)
- * @param sourceSwitch Switch from where controller received the packet
- * @param destMachineIp IP address of the pool member where traffic needs to be routed
- * @param destMachineMac MAC address of the pool member where traffic needs to be routed
- * @param outport Use this port to send out traffic
- * @param flowDirection FORWARD_DIRECTION_LB_FLOW or REVERSE_DIRECTION_LB_FLOW
- * @return true If flow installation was successful
- * false else
- * @throws UnknownHostException
+ * This method installs the flow rule for routing the traffic between two
+ * hosts.
+ *
+ * @param source Traffic is sent by this source
+ *
+ * @param dest Traffic is destined to this destination (VIP)
+ *
+ * @param sourceSwitch Switch from where controller received the packet
+ *
+ * @param destMachineIp IP address of the pool member where traffic needs to
+ * be routed
+ *
+ * @param destMachineMac MAC address of the pool member where traffic needs
+ * to be routed
+ *
+ * @param outport Use this port to send out traffic
+ *
+ * @param flowDirection FORWARD_DIRECTION_LB_FLOW or
+ * REVERSE_DIRECTION_LB_FLOW
+ *
+ * @return true If flow installation was successful false else
+ *
+ * @throws UnknownHostException
*/
- private boolean installLoadBalancerFlow(Client source,
- VIP dest,
- Node sourceSwitch,
- String destMachineIp,
- byte[] destMachineMac,
- NodeConnector outport,
- int flowDirection) throws UnknownHostException{
+ private boolean installLoadBalancerFlow(Client source, VIP dest, Node sourceSwitch, String destMachineIp,
+ byte[] destMachineMac, NodeConnector outport, int flowDirection) throws UnknownHostException {
Match match = new Match();
List<Action> actions = new ArrayList<Action>();
- if(flowDirection == LBConst.FORWARD_DIRECTION_LB_FLOW){
+ if (flowDirection == LBConst.FORWARD_DIRECTION_LB_FLOW) {
match.setField(MatchType.DL_TYPE, EtherTypes.IPv4.shortValue());
match.setField(MatchType.NW_SRC, InetAddress.getByName(source.getIp()));
match.setField(MatchType.NW_DST, InetAddress.getByName(dest.getIp()));
actions.add(new SetDlDst(destMachineMac));
}
- if(flowDirection == LBConst.REVERSE_DIRECTION_LB_FLOW){
+ if (flowDirection == LBConst.REVERSE_DIRECTION_LB_FLOW) {
match.setField(MatchType.DL_TYPE, EtherTypes.IPv4.shortValue());
match.setField(MatchType.NW_SRC, InetAddress.getByName(destMachineIp));
match.setField(MatchType.NW_DST, InetAddress.getByName(source.getIp()));
match.setField(MatchType.NW_PROTO, IPProtocols.getProtocolNumberByte(source.getProtocol()));
match.setField(MatchType.TP_SRC, dest.getPort());
- match.setField(MatchType.TP_DST,source.getPort());
+ match.setField(MatchType.TP_DST, source.getPort());
actions.add(new SetNwSrc(InetAddress.getByName(dest.getIp())));
actions.add(new SetDlSrc(destMachineMac));
flow.setHardTimeout((short) 0);
flow.setPriority(LB_IPSWITCH_PRIORITY);
- String policyName = source.getIp()+":"+source.getProtocol()+":"+source.getPort();
- String flowName =null;
+ String policyName = source.getIp() + ":" + source.getProtocol() + ":" + source.getPort();
+ String flowName = null;
- if(flowDirection == LBConst.FORWARD_DIRECTION_LB_FLOW){
- flowName = "["+policyName+":"+source.getIp() + ":"+dest.getIp()+"]";
+ if (flowDirection == LBConst.FORWARD_DIRECTION_LB_FLOW) {
+ flowName = "[" + policyName + ":" + source.getIp() + ":" + dest.getIp() + "]";
}
- if(flowDirection == LBConst.REVERSE_DIRECTION_LB_FLOW){
+ if (flowDirection == LBConst.REVERSE_DIRECTION_LB_FLOW) {
- flowName = "["+policyName+":"+dest.getIp() + ":"+source.getIp()+"]";
+ flowName = "[" + policyName + ":" + dest.getIp() + ":" + source.getIp() + "]";
}
FlowEntry fEntry = new FlowEntry(policyName, flowName, flow, sourceSwitch);
- lbsLogger.info("Install flow entry {} on node {}",fEntry.toString(),sourceSwitch.toString());
+ lbsLogger.info("Install flow entry {} on node {}", fEntry.toString(), sourceSwitch.toString());
- if(!this.ruleManager.checkFlowEntryConflict(fEntry)){
- if(this.ruleManager.installFlowEntry(fEntry).isSuccess()){
+ if (!this.ruleManager.checkFlowEntryConflict(fEntry)) {
+ if (this.ruleManager.installFlowEntry(fEntry).isSuccess()) {
return true;
- }else{
- lbsLogger.error("Error in installing flow entry to node : {}",sourceSwitch);
+ } else {
+ lbsLogger.error("Error in installing flow entry to node : {}", sourceSwitch);
}
- }else{
- lbsLogger.error("Conflicting flow entry exists : {}",fEntry.toString());
+ } else {
+ lbsLogger.error("Conflicting flow entry exists : {}", fEntry.toString());
}
return false;
}
this.containerName = (String) props.get("containerName");
lbsLogger.info("Running container name:" + this.containerName);
- }else {
+ } else {
// In the Global instance case the containerName is empty
this.containerName = "";
}
lbsLogger.info(configManager.toString());
+
}
/**
- * Function called by the dependency manager when at least one
- * dependency become unsatisfied or when the component is shutting
- * down because for example bundle is being stopped.
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
*
*/
void destroy() {
}
/**
- * Function called by dependency manager after "init ()" is called
- * and after the services provided by the class are registered in
- * the service registry
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
*
*/
void start() {
}
/**
- * Function called by the dependency manager before the services
- * exported by the component are unregistered, this will be
- * followed by a "destroy ()" calls
+ * Function called by the dependency manager before the services exported by
+ * the component are unregistered, this will be followed by a "destroy ()"
+ * calls
*
*/
void stop() {
}
/*
- * All the methods below are just proxy methods to direct the REST API requests to configuration
- * manager. We need this redirection as currently, opendaylight supports only one
- * implementation of the service.
+ * All the methods below are just proxy methods to direct the REST API
+ * requests to configuration manager. We need this redirection as currently,
+ * opendaylight supports only one implementation of the service.
*/
@Override
public Set<VIP> getAllVIPs() {
}
@Override
- public boolean vipExists(String name, String ip, String protocol,
- short protocolPort, String poolName) {
+ public boolean vipExists(String name, String ip, String protocol, short protocolPort, String poolName) {
return configManager.vipExists(name, ip, protocol, protocolPort, poolName);
}
}
@Override
- public VIP createVIP(String name, String ip, String protocol,
- short protocolPort, String poolName) {
+ public VIP createVIP(String name, String ip, String protocol, short protocolPort, String poolName) {
return configManager.createVIP(name, ip, protocol, protocolPort, poolName);
}
}
@Override
- public PoolMember addPoolMember(String name,
- String memberIP,
- String poolName) {
+ public PoolMember addPoolMember(String name, String memberIP, String poolName) {
return configManager.addPoolMember(name, memberIP, poolName);
}
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>samples.loadbalancer</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${samples.loadbalancer}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>hosttracker</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>${hosttracker.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>forwarding.staticrouting</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>${forwarding.staticrouting}</version>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
*/
public class ChannelManager implements MessageHandler
{
- private static final Logger log = Logger.getLogger(ChannelManager.class);
-
- private final ServerConnectionState server_state;
- private final TransportManager tm;
-
- private final HashMap<String, X11ServerData> x11_magic_cookies = new HashMap<String, X11ServerData>();
-
- private final List<Channel> channels = new Vector<Channel>();
- private int nextLocalChannel = 100;
- private boolean shutdown = false;
- private int globalSuccessCounter = 0;
- private int globalFailedCounter = 0;
-
- private final HashMap<Integer, RemoteForwardingData> remoteForwardings = new HashMap<Integer, RemoteForwardingData>();
-
- private final List<IChannelWorkerThread> listenerThreads = new Vector<IChannelWorkerThread>();
-
- private boolean listenerThreadsAllowed = true;
-
- /**
- * Constructor for client-mode.
- * @param tm
- */
- public ChannelManager(TransportManager tm)
- {
- this.server_state = null;
- this.tm = tm;
- tm.registerMessageHandler(this, 80, 100);
- }
-
- /**
- * Constructor for server-mode.
- * @param state
- */
- public ChannelManager(ServerConnectionState state)
- {
- this.server_state = state;
- this.tm = state.tm;
- tm.registerMessageHandler(this, 80, 100);
- }
-
- private Channel getChannel(int id)
- {
- synchronized (channels)
- {
- for (Channel c : channels)
- {
- if (c.localID == id)
- return c;
- }
- }
- return null;
- }
-
- private void removeChannel(int id)
- {
- synchronized (channels)
- {
- for (Channel c : channels)
- {
- if (c.localID == id)
- {
- channels.remove(c);
- break;
- }
- }
- }
- }
-
- private int addChannel(Channel c)
- {
- synchronized (channels)
- {
- channels.add(c);
- return nextLocalChannel++;
- }
- }
-
- private void waitUntilChannelOpen(Channel c) throws IOException
- {
- boolean wasInterrupted = false;
-
- synchronized (c)
- {
- while (c.state == Channel.STATE_OPENING)
- {
- try
- {
- c.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- if (c.state != Channel.STATE_OPEN)
- {
- removeChannel(c.localID);
-
- String detail = c.getReasonClosed();
-
- if (detail == null)
- detail = "state: " + c.state;
-
- throw new IOException("Could not open channel (" + detail + ")");
- }
- }
-
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
-
- private void waitForGlobalSuccessOrFailure() throws IOException
- {
- boolean wasInterrupted = false;
-
- try
- {
- synchronized (channels)
- {
- while ((globalSuccessCounter == 0) && (globalFailedCounter == 0))
- {
- if (shutdown)
- {
- throw new IOException("The connection is being shutdown");
- }
-
- try
- {
- channels.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- if (globalFailedCounter != 0)
- {
- throw new IOException("The server denied the request (did you enable port forwarding?)");
- }
-
- if (globalSuccessCounter == 0)
- {
- throw new IOException("Illegal state.");
- }
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- private void waitForChannelSuccessOrFailure(Channel c) throws IOException
- {
- boolean wasInterrupted = false;
-
- try
- {
- synchronized (c)
- {
- while ((c.successCounter == 0) && (c.failedCounter == 0))
- {
- if (c.state != Channel.STATE_OPEN)
- {
- String detail = c.getReasonClosed();
-
- if (detail == null)
- detail = "state: " + c.state;
-
- throw new IOException("This SSH2 channel is not open (" + detail + ")");
- }
-
- try
- {
- c.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- if (c.failedCounter != 0)
- {
- throw new IOException("The server denied the request.");
- }
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- public void registerX11Cookie(String hexFakeCookie, X11ServerData data)
- {
- synchronized (x11_magic_cookies)
- {
- x11_magic_cookies.put(hexFakeCookie, data);
- }
- }
-
- public void unRegisterX11Cookie(String hexFakeCookie, boolean killChannels)
- {
- if (hexFakeCookie == null)
- throw new IllegalStateException("hexFakeCookie may not be null");
-
- synchronized (x11_magic_cookies)
- {
- x11_magic_cookies.remove(hexFakeCookie);
- }
-
- if (killChannels == false)
- return;
-
- log.debug("Closing all X11 channels for the given fake cookie");
-
- List<Channel> channel_copy = new Vector<Channel>();
-
- synchronized (channels)
- {
- channel_copy.addAll(channels);
- }
-
- for (Channel c : channel_copy)
- {
- synchronized (c)
- {
- if (hexFakeCookie.equals(c.hexX11FakeCookie) == false)
- continue;
- }
-
- try
- {
- closeChannel(c, "Closing X11 channel since the corresponding session is closing", true);
- }
- catch (IOException ignored)
- {
- }
- }
- }
-
- public X11ServerData checkX11Cookie(String hexFakeCookie)
- {
- synchronized (x11_magic_cookies)
- {
- if (hexFakeCookie != null)
- return x11_magic_cookies.get(hexFakeCookie);
- }
- return null;
- }
-
- public void closeAllChannels()
- {
- log.debug("Closing all channels");
-
- List<Channel> channel_copy = new Vector<Channel>();
-
- synchronized (channels)
- {
- channel_copy.addAll(channels);
- }
-
- for (Channel c : channel_copy)
- {
- try
- {
- closeChannel(c, "Closing all channels", true);
- }
- catch (IOException ignored)
- {
- }
- }
- }
-
- public void closeChannel(Channel c, String reason, boolean force) throws IOException
- {
- byte msg[] = new byte[5];
-
- synchronized (c)
- {
- if (force)
- {
- c.state = Channel.STATE_CLOSED;
- c.EOF = true;
- }
-
- c.setReasonClosed(reason);
-
- msg[0] = Packets.SSH_MSG_CHANNEL_CLOSE;
- msg[1] = (byte) (c.remoteID >> 24);
- msg[2] = (byte) (c.remoteID >> 16);
- msg[3] = (byte) (c.remoteID >> 8);
- msg[4] = (byte) (c.remoteID);
-
- c.notifyAll();
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent == true)
- return;
- tm.sendMessage(msg);
- c.closeMessageSent = true;
- }
-
- log.debug("Sent SSH_MSG_CHANNEL_CLOSE (channel " + c.localID + ")");
- }
-
- public void sendEOF(Channel c) throws IOException
- {
- byte[] msg = new byte[5];
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- return;
-
- msg[0] = Packets.SSH_MSG_CHANNEL_EOF;
- msg[1] = (byte) (c.remoteID >> 24);
- msg[2] = (byte) (c.remoteID >> 16);
- msg[3] = (byte) (c.remoteID >> 8);
- msg[4] = (byte) (c.remoteID);
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent == true)
- return;
- tm.sendMessage(msg);
- }
-
-
- log.debug("Sent EOF (Channel " + c.localID + "/" + c.remoteID + ")");
- }
-
- public void sendOpenConfirmation(Channel c) throws IOException
- {
- PacketChannelOpenConfirmation pcoc = null;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPENING)
- return;
-
- c.state = Channel.STATE_OPEN;
-
- pcoc = new PacketChannelOpenConfirmation(c.remoteID, c.localID, c.localWindow, c.localMaxPacketSize);
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent == true)
- return;
- tm.sendMessage(pcoc.getPayload());
- }
- }
-
- public void sendData(Channel c, byte[] buffer, int pos, int len) throws IOException
- {
- boolean wasInterrupted = false;
-
- try
- {
- while (len > 0)
- {
- int thislen = 0;
- byte[] msg;
-
- synchronized (c)
- {
- while (true)
- {
- if (c.state == Channel.STATE_CLOSED)
- throw new ChannelClosedException("SSH channel is closed. (" + c.getReasonClosed() + ")");
-
- if (c.state != Channel.STATE_OPEN)
- throw new ChannelClosedException("SSH channel in strange state. (" + c.state + ")");
-
- if (c.remoteWindow != 0)
- break;
-
- try
- {
- c.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- /* len > 0, no sign extension can happen when comparing */
-
- thislen = (c.remoteWindow >= len) ? len : (int) c.remoteWindow;
-
- int estimatedMaxDataLen = c.remoteMaxPacketSize - (tm.getPacketOverheadEstimate() + 9);
-
- /* The worst case scenario =) a true bottleneck */
-
- if (estimatedMaxDataLen <= 0)
- {
- estimatedMaxDataLen = 1;
- }
-
- if (thislen > estimatedMaxDataLen)
- thislen = estimatedMaxDataLen;
-
- c.remoteWindow -= thislen;
-
- msg = new byte[1 + 8 + thislen];
-
- msg[0] = Packets.SSH_MSG_CHANNEL_DATA;
- msg[1] = (byte) (c.remoteID >> 24);
- msg[2] = (byte) (c.remoteID >> 16);
- msg[3] = (byte) (c.remoteID >> 8);
- msg[4] = (byte) (c.remoteID);
- msg[5] = (byte) (thislen >> 24);
- msg[6] = (byte) (thislen >> 16);
- msg[7] = (byte) (thislen >> 8);
- msg[8] = (byte) (thislen);
-
- System.arraycopy(buffer, pos, msg, 9, thislen);
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent == true)
- throw new ChannelClosedException("SSH channel is closed. (" + c.getReasonClosed() + ")");
-
- tm.sendMessage(msg);
- }
-
- pos += thislen;
- len -= thislen;
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- public int requestGlobalForward(String bindAddress, int bindPort, String targetAddress, int targetPort)
- throws IOException
- {
- RemoteForwardingData rfd = new RemoteForwardingData();
-
- rfd.bindAddress = bindAddress;
- rfd.bindPort = bindPort;
- rfd.targetAddress = targetAddress;
- rfd.targetPort = targetPort;
-
- synchronized (remoteForwardings)
- {
- Integer key = new Integer(bindPort);
-
- if (remoteForwardings.get(key) != null)
- {
- throw new IOException("There is already a forwarding for remote port " + bindPort);
- }
-
- remoteForwardings.put(key, rfd);
- }
-
- synchronized (channels)
- {
- globalSuccessCounter = globalFailedCounter = 0;
- }
-
- PacketGlobalForwardRequest pgf = new PacketGlobalForwardRequest(true, bindAddress, bindPort);
- tm.sendMessage(pgf.getPayload());
-
- log.debug("Requesting a remote forwarding ('" + bindAddress + "', " + bindPort + ")");
-
- try
- {
- waitForGlobalSuccessOrFailure();
- }
- catch (IOException e)
- {
- synchronized (remoteForwardings)
- {
- remoteForwardings.remove(rfd);
- }
- throw e;
- }
-
- return bindPort;
- }
+ private static final Logger log = Logger.getLogger(ChannelManager.class);
+
+ private final ServerConnectionState server_state;
+ private final TransportManager tm;
+
+ private final HashMap<String, X11ServerData> x11_magic_cookies = new HashMap<String, X11ServerData>();
+
+ private final List<Channel> channels = new Vector<Channel>();
+ private int nextLocalChannel = 100;
+ private boolean shutdown = false;
+ private int globalSuccessCounter = 0;
+ private int globalFailedCounter = 0;
+
+ private final HashMap<Integer, RemoteForwardingData> remoteForwardings = new HashMap<Integer, RemoteForwardingData>();
+
+ private final List<IChannelWorkerThread> listenerThreads = new Vector<IChannelWorkerThread>();
+
+ private boolean listenerThreadsAllowed = true;
+
+ /**
+ * Constructor for client-mode.
+ * @param tm
+ */
+ public ChannelManager(TransportManager tm)
+ {
+ this.server_state = null;
+ this.tm = tm;
+ tm.registerMessageHandler(this, 80, 100);
+ }
+
+ /**
+ * Constructor for server-mode.
+ * @param state
+ */
+ public ChannelManager(ServerConnectionState state)
+ {
+ this.server_state = state;
+ this.tm = state.tm;
+ tm.registerMessageHandler(this, 80, 100);
+ }
+
+ private Channel getChannel(int id)
+ {
+ synchronized (channels)
+ {
+ for (Channel c : channels)
+ {
+ if (c.localID == id)
+ return c;
+ }
+ }
+ return null;
+ }
+
+ private void removeChannel(int id)
+ {
+ synchronized (channels)
+ {
+ for (Channel c : channels)
+ {
+ if (c.localID == id)
+ {
+ channels.remove(c);
+ break;
+ }
+ }
+ }
+ }
+
+ private int addChannel(Channel c)
+ {
+ synchronized (channels)
+ {
+ channels.add(c);
+ return nextLocalChannel++;
+ }
+ }
+
+ private void waitUntilChannelOpen(Channel c) throws IOException
+ {
+ boolean wasInterrupted = false;
+
+ synchronized (c)
+ {
+ while (c.state == Channel.STATE_OPENING)
+ {
+ try
+ {
+ c.wait();
+ }
+ catch (InterruptedException ignore)
+ {
+ wasInterrupted = true;
+ }
+ }
+
+ if (c.state != Channel.STATE_OPEN)
+ {
+ removeChannel(c.localID);
+
+ String detail = c.getReasonClosed();
+
+ if (detail == null)
+ detail = "state: " + c.state;
+
+ throw new IOException("Could not open channel (" + detail + ")");
+ }
+ }
+
+ if (wasInterrupted)
+ Thread.currentThread().interrupt();
+ }
+
+ private void waitForGlobalSuccessOrFailure() throws IOException
+ {
+ boolean wasInterrupted = false;
+
+ try
+ {
+ synchronized (channels)
+ {
+ while ((globalSuccessCounter == 0) && (globalFailedCounter == 0))
+ {
+ if (shutdown)
+ {
+ throw new IOException("The connection is being shutdown");
+ }
+
+ try
+ {
+ channels.wait();
+ }
+ catch (InterruptedException ignore)
+ {
+ wasInterrupted = true;
+ }
+ }
+
+ if (globalFailedCounter != 0)
+ {
+ throw new IOException("The server denied the request (did you enable port forwarding?)");
+ }
+
+ if (globalSuccessCounter == 0)
+ {
+ throw new IOException("Illegal state.");
+ }
+ }
+ }
+ finally
+ {
+ if (wasInterrupted)
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ private void waitForChannelSuccessOrFailure(Channel c) throws IOException
+ {
+ boolean wasInterrupted = false;
+
+ try
+ {
+ synchronized (c)
+ {
+ while ((c.successCounter == 0) && (c.failedCounter == 0))
+ {
+ if (c.state != Channel.STATE_OPEN)
+ {
+ String detail = c.getReasonClosed();
+
+ if (detail == null)
+ detail = "state: " + c.state;
+
+ throw new IOException("This SSH2 channel is not open (" + detail + ")");
+ }
+
+ try
+ {
+ c.wait();
+ }
+ catch (InterruptedException ignore)
+ {
+ wasInterrupted = true;
+ }
+ }
+
+ if (c.failedCounter != 0)
+ {
+ throw new IOException("The server denied the request.");
+ }
+ }
+ }
+ finally
+ {
+ if (wasInterrupted)
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ public void registerX11Cookie(String hexFakeCookie, X11ServerData data)
+ {
+ synchronized (x11_magic_cookies)
+ {
+ x11_magic_cookies.put(hexFakeCookie, data);
+ }
+ }
+
+ public void unRegisterX11Cookie(String hexFakeCookie, boolean killChannels)
+ {
+ if (hexFakeCookie == null)
+ throw new IllegalStateException("hexFakeCookie may not be null");
+
+ synchronized (x11_magic_cookies)
+ {
+ x11_magic_cookies.remove(hexFakeCookie);
+ }
+
+ if (killChannels == false)
+ return;
+
+ log.debug("Closing all X11 channels for the given fake cookie");
+
+ List<Channel> channel_copy = new Vector<Channel>();
+
+ synchronized (channels)
+ {
+ channel_copy.addAll(channels);
+ }
+
+ for (Channel c : channel_copy)
+ {
+ synchronized (c)
+ {
+ if (hexFakeCookie.equals(c.hexX11FakeCookie) == false)
+ continue;
+ }
+
+ try
+ {
+ closeChannel(c, "Closing X11 channel since the corresponding session is closing", true);
+ }
+ catch (IOException ignored)
+ {
+ }
+ }
+ }
+
+ public X11ServerData checkX11Cookie(String hexFakeCookie)
+ {
+ synchronized (x11_magic_cookies)
+ {
+ if (hexFakeCookie != null)
+ return x11_magic_cookies.get(hexFakeCookie);
+ }
+ return null;
+ }
+
+ public void closeAllChannels()
+ {
+ log.debug("Closing all channels");
+
+ List<Channel> channel_copy = new Vector<Channel>();
+
+ synchronized (channels)
+ {
+ channel_copy.addAll(channels);
+ }
+
+ for (Channel c : channel_copy)
+ {
+ try
+ {
+ closeChannel(c, "Closing all channels", true);
+ }
+ catch (IOException ignored)
+ {
+ }
+ }
+ }
+
+ public void closeChannel(Channel c, String reason, boolean force) throws IOException
+ {
+ byte msg[] = new byte[5];
+
+ synchronized (c)
+ {
+ if (force)
+ {
+ c.state = Channel.STATE_CLOSED;
+ c.EOF = true;
+ }
+
+ c.setReasonClosed(reason);
+
+ msg[0] = Packets.SSH_MSG_CHANNEL_CLOSE;
+ msg[1] = (byte) (c.remoteID >> 24);
+ msg[2] = (byte) (c.remoteID >> 16);
+ msg[3] = (byte) (c.remoteID >> 8);
+ msg[4] = (byte) (c.remoteID);
+
+ c.notifyAll();
+ }
+
+ synchronized (c.channelSendLock)
+ {
+ if (c.closeMessageSent == true)
+ return;
+ tm.sendMessage(msg);
+ c.closeMessageSent = true;
+ }
+
+ log.debug("Sent SSH_MSG_CHANNEL_CLOSE (channel " + c.localID + ")");
+ }
+
+ public void sendEOF(Channel c) throws IOException
+ {
+ byte[] msg = new byte[5];
+
+ synchronized (c)
+ {
+ if (c.state != Channel.STATE_OPEN)
+ return;
+
+ msg[0] = Packets.SSH_MSG_CHANNEL_EOF;
+ msg[1] = (byte) (c.remoteID >> 24);
+ msg[2] = (byte) (c.remoteID >> 16);
+ msg[3] = (byte) (c.remoteID >> 8);
+ msg[4] = (byte) (c.remoteID);
+ }
+
+ synchronized (c.channelSendLock)
+ {
+ if (c.closeMessageSent == true)
+ return;
+ tm.sendMessage(msg);
+ }
+
+
+ log.debug("Sent EOF (Channel " + c.localID + "/" + c.remoteID + ")");
+ }
+
+ public void sendOpenConfirmation(Channel c) throws IOException
+ {
+ PacketChannelOpenConfirmation pcoc = null;
+
+ synchronized (c)
+ {
+ if (c.state != Channel.STATE_OPENING)
+ return;
+
+ c.state = Channel.STATE_OPEN;
+
+ pcoc = new PacketChannelOpenConfirmation(c.remoteID, c.localID, c.localWindow, c.localMaxPacketSize);
+ }
+
+ synchronized (c.channelSendLock)
+ {
+ if (c.closeMessageSent == true)
+ return;
+ tm.sendMessage(pcoc.getPayload());
+ }
+ }
+
+ public void sendData(Channel c, byte[] buffer, int pos, int len) throws IOException
+ {
+ boolean wasInterrupted = false;
+
+ try
+ {
+ while (len > 0)
+ {
+ int thislen = 0;
+ byte[] msg;
+
+ synchronized (c)
+ {
+ while (true)
+ {
+ if (c.state == Channel.STATE_CLOSED)
+ throw new ChannelClosedException("SSH channel is closed. (" + c.getReasonClosed() + ")");
+
+ if (c.state != Channel.STATE_OPEN)
+ throw new ChannelClosedException("SSH channel in strange state. (" + c.state + ")");
+
+ if (c.remoteWindow != 0)
+ break;
+
+ try
+ {
+ c.wait();
+ }
+ catch (InterruptedException ignore)
+ {
+ wasInterrupted = true;
+ }
+ }
+
+ /* len > 0, no sign extension can happen when comparing */
+
+ thislen = (c.remoteWindow >= len) ? len : (int) c.remoteWindow;
+
+ int estimatedMaxDataLen = c.remoteMaxPacketSize - (tm.getPacketOverheadEstimate() + 9);
+
+ /* The worst case scenario =) a true bottleneck */
+
+ if (estimatedMaxDataLen <= 0)
+ {
+ estimatedMaxDataLen = 1;
+ }
+
+ if (thislen > estimatedMaxDataLen)
+ thislen = estimatedMaxDataLen;
+
+ c.remoteWindow -= thislen;
+
+ msg = new byte[1 + 8 + thislen];
+
+ msg[0] = Packets.SSH_MSG_CHANNEL_DATA;
+ msg[1] = (byte) (c.remoteID >> 24);
+ msg[2] = (byte) (c.remoteID >> 16);
+ msg[3] = (byte) (c.remoteID >> 8);
+ msg[4] = (byte) (c.remoteID);
+ msg[5] = (byte) (thislen >> 24);
+ msg[6] = (byte) (thislen >> 16);
+ msg[7] = (byte) (thislen >> 8);
+ msg[8] = (byte) (thislen);
+
+ System.arraycopy(buffer, pos, msg, 9, thislen);
+ }
+
+ synchronized (c.channelSendLock)
+ {
+ if (c.closeMessageSent == true)
+ throw new ChannelClosedException("SSH channel is closed. (" + c.getReasonClosed() + ")");
+
+ tm.sendMessage(msg);
+ }
+
+ pos += thislen;
+ len -= thislen;
+ }
+ }
+ finally
+ {
+ if (wasInterrupted)
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ public int requestGlobalForward(String bindAddress, int bindPort, String targetAddress, int targetPort)
+ throws IOException
+ {
+ RemoteForwardingData rfd = new RemoteForwardingData();
+
+ rfd.bindAddress = bindAddress;
+ rfd.bindPort = bindPort;
+ rfd.targetAddress = targetAddress;
+ rfd.targetPort = targetPort;
+
+ synchronized (remoteForwardings)
+ {
+ Integer key = new Integer(bindPort);
+
+ if (remoteForwardings.get(key) != null)
+ {
+ throw new IOException("There is already a forwarding for remote port " + bindPort);
+ }
+
+ remoteForwardings.put(key, rfd);
+ }
+
+ synchronized (channels)
+ {
+ globalSuccessCounter = globalFailedCounter = 0;
+ }
+
+ PacketGlobalForwardRequest pgf = new PacketGlobalForwardRequest(true, bindAddress, bindPort);
+ tm.sendMessage(pgf.getPayload());
+
+ log.debug("Requesting a remote forwarding ('" + bindAddress + "', " + bindPort + ")");
+
+ try
+ {
+ waitForGlobalSuccessOrFailure();
+ }
+ catch (IOException e)
+ {
+ synchronized (remoteForwardings)
+ {
+ remoteForwardings.remove(rfd);
+ }
+ throw e;
+ }
+
+ return bindPort;
+ }
- public void requestCancelGlobalForward(int bindPort) throws IOException
- {
- RemoteForwardingData rfd = null;
-
- synchronized (remoteForwardings)
- {
- rfd = remoteForwardings.get(new Integer(bindPort));
-
- if (rfd == null)
- throw new IOException("Sorry, there is no known remote forwarding for remote port " + bindPort);
- }
-
- synchronized (channels)
- {
- globalSuccessCounter = globalFailedCounter = 0;
- }
-
- PacketGlobalCancelForwardRequest pgcf = new PacketGlobalCancelForwardRequest(true, rfd.bindAddress,
- rfd.bindPort);
- tm.sendMessage(pgcf.getPayload());
-
- log.debug("Requesting cancelation of remote forward ('" + rfd.bindAddress + "', " + rfd.bindPort + ")");
-
- waitForGlobalSuccessOrFailure();
-
- /* Only now we are sure that no more forwarded connections will arrive */
-
- synchronized (remoteForwardings)
- {
- remoteForwardings.remove(rfd);
- }
- }
-
- public void registerThread(IChannelWorkerThread thr) throws IOException
- {
- synchronized (listenerThreads)
- {
- if (listenerThreadsAllowed == false)
- throw new IOException("Too late, this connection is closed.");
- listenerThreads.add(thr);
- }
- }
-
- public Channel openDirectTCPIPChannel(String host_to_connect, int port_to_connect, String originator_IP_address,
- int originator_port) throws IOException
- {
- Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.localID = addChannel(c);
- // end of synchronized block forces writing out to main memory
- }
-
- PacketOpenDirectTCPIPChannel dtc = new PacketOpenDirectTCPIPChannel(c.localID, c.localWindow,
- c.localMaxPacketSize, host_to_connect, port_to_connect, originator_IP_address, originator_port);
-
- tm.sendMessage(dtc.getPayload());
-
- waitUntilChannelOpen(c);
-
- return c;
- }
-
- public Channel openSessionChannel() throws IOException
- {
- Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.localID = addChannel(c);
- // end of synchronized block forces the writing out to main memory
- }
-
- log.debug("Sending SSH_MSG_CHANNEL_OPEN (Channel " + c.localID + ")");
-
- PacketOpenSessionChannel smo = new PacketOpenSessionChannel(c.localID, c.localWindow, c.localMaxPacketSize);
- tm.sendMessage(smo.getPayload());
-
- waitUntilChannelOpen(c);
-
- return c;
- }
-
- public void requestPTY(Channel c, String term, int term_width_characters, int term_height_characters,
- int term_width_pixels, int term_height_pixels, byte[] terminal_modes) throws IOException
- {
- PacketSessionPtyRequest spr;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot request PTY on this channel (" + c.getReasonClosed() + ")");
-
- spr = new PacketSessionPtyRequest(c.remoteID, true, term, term_width_characters, term_height_characters,
- term_width_pixels, term_height_pixels, terminal_modes);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot request PTY on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(spr.getPayload());
- }
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("PTY request failed").initCause(e);
- }
- }
-
- public void requestX11(Channel c, boolean singleConnection, String x11AuthenticationProtocol,
- String x11AuthenticationCookie, int x11ScreenNumber) throws IOException
- {
- PacketSessionX11Request psr;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot request X11 on this channel (" + c.getReasonClosed() + ")");
-
- psr = new PacketSessionX11Request(c.remoteID, true, singleConnection, x11AuthenticationProtocol,
- x11AuthenticationCookie, x11ScreenNumber);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot request X11 on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(psr.getPayload());
- }
-
- log.debug("Requesting X11 forwarding (Channel " + c.localID + "/" + c.remoteID + ")");
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("The X11 request failed.").initCause(e);
- }
- }
-
- public void requestSubSystem(Channel c, String subSystemName) throws IOException
- {
- PacketSessionSubsystemRequest ssr;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot request subsystem on this channel (" + c.getReasonClosed() + ")");
-
- ssr = new PacketSessionSubsystemRequest(c.remoteID, true, subSystemName);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot request subsystem on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(ssr.getPayload());
- }
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("The subsystem request failed.").initCause(e);
- }
- }
-
- public void requestExecCommand(Channel c, String cmd) throws IOException
- {
- this.requestExecCommand(c, cmd, null);
- }
-
- /**
- * @param charsetName The charset used to convert between Java Unicode Strings and byte encodings
- */
- public void requestExecCommand(Channel c, String cmd, String charsetName) throws IOException
- {
- PacketSessionExecCommand sm;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot execute command on this channel (" + c.getReasonClosed() + ")");
-
- sm = new PacketSessionExecCommand(c.remoteID, true, cmd);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot execute command on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(sm.getPayload(charsetName));
- }
-
- log.debug("Executing command (channel " + c.localID + ", '" + cmd + "')");
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("The execute request failed.").initCause(e);
- }
- }
-
- public void requestShell(Channel c) throws IOException
- {
- PacketSessionStartShell sm;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot start shell on this channel (" + c.getReasonClosed() + ")");
-
- sm = new PacketSessionStartShell(c.remoteID, true);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot start shell on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(sm.getPayload());
- }
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("The shell request failed.").initCause(e);
- }
- }
-
- public void msgChannelExtendedData(byte[] msg, int msglen) throws IOException
- {
- if (msglen <= 13)
- throw new IOException("SSH_MSG_CHANNEL_EXTENDED_DATA message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
- int dataType = ((msg[5] & 0xff) << 24) | ((msg[6] & 0xff) << 16) | ((msg[7] & 0xff) << 8) | (msg[8] & 0xff);
- int len = ((msg[9] & 0xff) << 24) | ((msg[10] & 0xff) << 16) | ((msg[11] & 0xff) << 8) | (msg[12] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_EXTENDED_DATA message for non-existent channel " + id);
-
- if (dataType != Packets.SSH_EXTENDED_DATA_STDERR)
- throw new IOException("SSH_MSG_CHANNEL_EXTENDED_DATA message has unknown type (" + dataType + ")");
-
- if (len != (msglen - 13))
- throw new IOException("SSH_MSG_CHANNEL_EXTENDED_DATA message has wrong len (calculated " + (msglen - 13)
- + ", got " + len + ")");
-
- log.debug("Got SSH_MSG_CHANNEL_EXTENDED_DATA (channel " + id + ", " + len + ")");
-
- synchronized (c)
- {
- if (c.state == Channel.STATE_CLOSED)
- return; // ignore
-
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Got SSH_MSG_CHANNEL_EXTENDED_DATA, but channel is not in correct state ("
- + c.state + ")");
-
- if (c.localWindow < len)
- throw new IOException("Remote sent too much data, does not fit into window.");
-
- c.localWindow -= len;
-
- System.arraycopy(msg, 13, c.stderrBuffer, c.stderrWritepos, len);
- c.stderrWritepos += len;
-
- c.notifyAll();
- }
- }
-
- /**
- * Wait until for a condition.
- *
- * @param c Channel
- * @param timeout in ms, 0 means no timeout.
- * @param condition_mask minimum event mask (at least one of the conditions must be fulfilled)
- * @return all current events
- */
- public int waitForCondition(Channel c, long timeout, int condition_mask)
- {
- boolean wasInterrupted = false;
-
- try
- {
- long end_time = 0;
- boolean end_time_set = false;
-
- synchronized (c)
- {
- while (true)
- {
- int current_cond = 0;
-
- int stdoutAvail = c.stdoutWritepos - c.stdoutReadpos;
- int stderrAvail = c.stderrWritepos - c.stderrReadpos;
-
- if (stdoutAvail > 0)
- current_cond = current_cond | ChannelCondition.STDOUT_DATA;
-
- if (stderrAvail > 0)
- current_cond = current_cond | ChannelCondition.STDERR_DATA;
-
- if (c.EOF)
- current_cond = current_cond | ChannelCondition.EOF;
-
- if (c.getExitStatus() != null)
- current_cond = current_cond | ChannelCondition.EXIT_STATUS;
-
- if (c.getExitSignal() != null)
- current_cond = current_cond | ChannelCondition.EXIT_SIGNAL;
-
- if (c.state == Channel.STATE_CLOSED)
- return current_cond | ChannelCondition.CLOSED | ChannelCondition.EOF;
-
- if ((current_cond & condition_mask) != 0)
- return current_cond;
-
- if (timeout > 0)
- {
- if (!end_time_set)
- {
- end_time = System.currentTimeMillis() + timeout;
- end_time_set = true;
- }
- else
- {
- timeout = end_time - System.currentTimeMillis();
-
- if (timeout <= 0)
- return current_cond | ChannelCondition.TIMEOUT;
- }
- }
-
- try
- {
- if (timeout > 0)
- c.wait(timeout);
- else
- c.wait();
- }
- catch (InterruptedException e)
- {
- wasInterrupted = true;
- }
- }
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- public int getAvailable(Channel c, boolean extended) throws IOException
- {
- synchronized (c)
- {
- int avail;
-
- if (extended)
- avail = c.stderrWritepos - c.stderrReadpos;
- else
- avail = c.stdoutWritepos - c.stdoutReadpos;
-
- return ((avail > 0) ? avail : (c.EOF ? -1 : 0));
- }
- }
-
- public int getChannelData(Channel c, boolean extended, byte[] target, int off, int len) throws IOException
- {
- boolean wasInterrupted = false;
-
- try
- {
- int copylen = 0;
- int increment = 0;
- int remoteID = 0;
- int localID = 0;
-
- synchronized (c)
- {
- int stdoutAvail = 0;
- int stderrAvail = 0;
-
- while (true)
- {
- /*
- * Data available? We have to return remaining data even if the
- * channel is already closed.
- */
-
- stdoutAvail = c.stdoutWritepos - c.stdoutReadpos;
- stderrAvail = c.stderrWritepos - c.stderrReadpos;
-
- if ((!extended) && (stdoutAvail != 0))
- break;
-
- if ((extended) && (stderrAvail != 0))
- break;
-
- /* Do not wait if more data will never arrive (EOF or CLOSED) */
-
- if ((c.EOF) || (c.state != Channel.STATE_OPEN))
- return -1;
-
- try
- {
- c.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- /* OK, there is some data. Return it. */
-
- if (!extended)
- {
- copylen = (stdoutAvail > len) ? len : stdoutAvail;
- System.arraycopy(c.stdoutBuffer, c.stdoutReadpos, target, off, copylen);
- c.stdoutReadpos += copylen;
-
- if (c.stdoutReadpos != c.stdoutWritepos)
-
- System.arraycopy(c.stdoutBuffer, c.stdoutReadpos, c.stdoutBuffer, 0, c.stdoutWritepos
- - c.stdoutReadpos);
-
- c.stdoutWritepos -= c.stdoutReadpos;
- c.stdoutReadpos = 0;
- }
- else
- {
- copylen = (stderrAvail > len) ? len : stderrAvail;
- System.arraycopy(c.stderrBuffer, c.stderrReadpos, target, off, copylen);
- c.stderrReadpos += copylen;
-
- if (c.stderrReadpos != c.stderrWritepos)
-
- System.arraycopy(c.stderrBuffer, c.stderrReadpos, c.stderrBuffer, 0, c.stderrWritepos
- - c.stderrReadpos);
-
- c.stderrWritepos -= c.stderrReadpos;
- c.stderrReadpos = 0;
- }
-
- if (c.state != Channel.STATE_OPEN)
- return copylen;
-
- if (c.localWindow < ((Channel.CHANNEL_BUFFER_SIZE + 1) / 2))
- {
- int minFreeSpace = Math.min(Channel.CHANNEL_BUFFER_SIZE - c.stdoutWritepos,
- Channel.CHANNEL_BUFFER_SIZE - c.stderrWritepos);
-
- increment = minFreeSpace - c.localWindow;
- c.localWindow = minFreeSpace;
- }
-
- remoteID = c.remoteID; /* read while holding the lock */
- localID = c.localID; /* read while holding the lock */
- }
+ public void requestCancelGlobalForward(int bindPort) throws IOException
+ {
+ RemoteForwardingData rfd = null;
+
+ synchronized (remoteForwardings)
+ {
+ rfd = remoteForwardings.get(new Integer(bindPort));
+
+ if (rfd == null)
+ throw new IOException("Sorry, there is no known remote forwarding for remote port " + bindPort);
+ }
+
+ synchronized (channels)
+ {
+ globalSuccessCounter = globalFailedCounter = 0;
+ }
+
+ PacketGlobalCancelForwardRequest pgcf = new PacketGlobalCancelForwardRequest(true, rfd.bindAddress,
+ rfd.bindPort);
+ tm.sendMessage(pgcf.getPayload());
+
+ log.debug("Requesting cancelation of remote forward ('" + rfd.bindAddress + "', " + rfd.bindPort + ")");
+
+ waitForGlobalSuccessOrFailure();
+
+ /* Only now we are sure that no more forwarded connections will arrive */
+
+ synchronized (remoteForwardings)
+ {
+ remoteForwardings.remove(rfd);
+ }
+ }
+
+ public void registerThread(IChannelWorkerThread thr) throws IOException
+ {
+ synchronized (listenerThreads)
+ {
+ if (listenerThreadsAllowed == false)
+ throw new IOException("Too late, this connection is closed.");
+ listenerThreads.add(thr);
+ }
+ }
+
+ public Channel openDirectTCPIPChannel(String host_to_connect, int port_to_connect, String originator_IP_address,
+ int originator_port) throws IOException
+ {
+ Channel c = new Channel(this);
+
+ synchronized (c)
+ {
+ c.localID = addChannel(c);
+ // end of synchronized block forces writing out to main memory
+ }
+
+ PacketOpenDirectTCPIPChannel dtc = new PacketOpenDirectTCPIPChannel(c.localID, c.localWindow,
+ c.localMaxPacketSize, host_to_connect, port_to_connect, originator_IP_address, originator_port);
+
+ tm.sendMessage(dtc.getPayload());
+
+ waitUntilChannelOpen(c);
+
+ return c;
+ }
+
+ public Channel openSessionChannel() throws IOException
+ {
+ Channel c = new Channel(this);
+
+ synchronized (c)
+ {
+ c.localID = addChannel(c);
+ // end of synchronized block forces the writing out to main memory
+ }
+
+ log.debug("Sending SSH_MSG_CHANNEL_OPEN (Channel " + c.localID + ")");
+
+ PacketOpenSessionChannel smo = new PacketOpenSessionChannel(c.localID, c.localWindow, c.localMaxPacketSize);
+ tm.sendMessage(smo.getPayload());
+
+ waitUntilChannelOpen(c);
+
+ return c;
+ }
+
+ public void requestPTY(Channel c, String term, int term_width_characters, int term_height_characters,
+ int term_width_pixels, int term_height_pixels, byte[] terminal_modes) throws IOException
+ {
+ PacketSessionPtyRequest spr;
+
+ synchronized (c)
+ {
+ if (c.state != Channel.STATE_OPEN)
+ throw new IOException("Cannot request PTY on this channel (" + c.getReasonClosed() + ")");
+
+ spr = new PacketSessionPtyRequest(c.remoteID, true, term, term_width_characters, term_height_characters,
+ term_width_pixels, term_height_pixels, terminal_modes);
+
+ c.successCounter = c.failedCounter = 0;
+ }
+
+ synchronized (c.channelSendLock)
+ {
+ if (c.closeMessageSent)
+ throw new IOException("Cannot request PTY on this channel (" + c.getReasonClosed() + ")");
+ tm.sendMessage(spr.getPayload());
+ }
+
+ try
+ {
+ waitForChannelSuccessOrFailure(c);
+ }
+ catch (IOException e)
+ {
+ throw (IOException) new IOException("PTY request failed").initCause(e);
+ }
+ }
+
+ public void requestX11(Channel c, boolean singleConnection, String x11AuthenticationProtocol,
+ String x11AuthenticationCookie, int x11ScreenNumber) throws IOException
+ {
+ PacketSessionX11Request psr;
+
+ synchronized (c)
+ {
+ if (c.state != Channel.STATE_OPEN)
+ throw new IOException("Cannot request X11 on this channel (" + c.getReasonClosed() + ")");
+
+ psr = new PacketSessionX11Request(c.remoteID, true, singleConnection, x11AuthenticationProtocol,
+ x11AuthenticationCookie, x11ScreenNumber);
+
+ c.successCounter = c.failedCounter = 0;
+ }
+
+ synchronized (c.channelSendLock)
+ {
+ if (c.closeMessageSent)
+ throw new IOException("Cannot request X11 on this channel (" + c.getReasonClosed() + ")");
+ tm.sendMessage(psr.getPayload());
+ }
+
+ log.debug("Requesting X11 forwarding (Channel " + c.localID + "/" + c.remoteID + ")");
+
+ try
+ {
+ waitForChannelSuccessOrFailure(c);
+ }
+ catch (IOException e)
+ {
+ throw (IOException) new IOException("The X11 request failed.").initCause(e);
+ }
+ }
+
+ public void requestSubSystem(Channel c, String subSystemName) throws IOException
+ {
+ PacketSessionSubsystemRequest ssr;
+
+ synchronized (c)
+ {
+ if (c.state != Channel.STATE_OPEN)
+ throw new IOException("Cannot request subsystem on this channel (" + c.getReasonClosed() + ")");
+
+ ssr = new PacketSessionSubsystemRequest(c.remoteID, true, subSystemName);
+
+ c.successCounter = c.failedCounter = 0;
+ }
+
+ synchronized (c.channelSendLock)
+ {
+ if (c.closeMessageSent)
+ throw new IOException("Cannot request subsystem on this channel (" + c.getReasonClosed() + ")");
+ tm.sendMessage(ssr.getPayload());
+ }
+
+ try
+ {
+ waitForChannelSuccessOrFailure(c);
+ }
+ catch (IOException e)
+ {
+ throw (IOException) new IOException("The subsystem request failed.").initCause(e);
+ }
+ }
+
+ public void requestExecCommand(Channel c, String cmd) throws IOException
+ {
+ this.requestExecCommand(c, cmd, null);
+ }
+
+ /**
+ * @param charsetName The charset used to convert between Java Unicode Strings and byte encodings
+ */
+ public void requestExecCommand(Channel c, String cmd, String charsetName) throws IOException
+ {
+ PacketSessionExecCommand sm;
+
+ synchronized (c)
+ {
+ if (c.state != Channel.STATE_OPEN)
+ throw new IOException("Cannot execute command on this channel (" + c.getReasonClosed() + ")");
+
+ sm = new PacketSessionExecCommand(c.remoteID, true, cmd);
+
+ c.successCounter = c.failedCounter = 0;
+ }
+
+ synchronized (c.channelSendLock)
+ {
+ if (c.closeMessageSent)
+ throw new IOException("Cannot execute command on this channel (" + c.getReasonClosed() + ")");
+ tm.sendMessage(sm.getPayload(charsetName));
+ }
+
+ log.debug("Executing command (channel " + c.localID + ", '" + cmd + "')");
+
+ try
+ {
+ waitForChannelSuccessOrFailure(c);
+ }
+ catch (IOException e)
+ {
+ throw (IOException) new IOException("The execute request failed.").initCause(e);
+ }
+ }
+
+ public void requestShell(Channel c) throws IOException
+ {
+ PacketSessionStartShell sm;
+
+ synchronized (c)
+ {
+ if (c.state != Channel.STATE_OPEN)
+ throw new IOException("Cannot start shell on this channel (" + c.getReasonClosed() + ")");
+
+ sm = new PacketSessionStartShell(c.remoteID, true);
+
+ c.successCounter = c.failedCounter = 0;
+ }
+
+ synchronized (c.channelSendLock)
+ {
+ if (c.closeMessageSent)
+ throw new IOException("Cannot start shell on this channel (" + c.getReasonClosed() + ")");
+ tm.sendMessage(sm.getPayload());
+ }
+
+ try
+ {
+ waitForChannelSuccessOrFailure(c);
+ }
+ catch (IOException e)
+ {
+ throw (IOException) new IOException("The shell request failed.").initCause(e);
+ }
+ }
+
+ public void msgChannelExtendedData(byte[] msg, int msglen) throws IOException
+ {
+ if (msglen <= 13)
+ throw new IOException("SSH_MSG_CHANNEL_EXTENDED_DATA message has wrong size (" + msglen + ")");
+
+ int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
+ int dataType = ((msg[5] & 0xff) << 24) | ((msg[6] & 0xff) << 16) | ((msg[7] & 0xff) << 8) | (msg[8] & 0xff);
+ int len = ((msg[9] & 0xff) << 24) | ((msg[10] & 0xff) << 16) | ((msg[11] & 0xff) << 8) | (msg[12] & 0xff);
+
+ Channel c = getChannel(id);
+
+ if (c == null)
+ throw new IOException("Unexpected SSH_MSG_CHANNEL_EXTENDED_DATA message for non-existent channel " + id);
+
+ if (dataType != Packets.SSH_EXTENDED_DATA_STDERR)
+ throw new IOException("SSH_MSG_CHANNEL_EXTENDED_DATA message has unknown type (" + dataType + ")");
+
+ if (len != (msglen - 13))
+ throw new IOException("SSH_MSG_CHANNEL_EXTENDED_DATA message has wrong len (calculated " + (msglen - 13)
+ + ", got " + len + ")");
+
+ log.debug("Got SSH_MSG_CHANNEL_EXTENDED_DATA (channel " + id + ", " + len + ")");
+
+ synchronized (c)
+ {
+ if (c.state == Channel.STATE_CLOSED)
+ return; // ignore
+
+ if (c.state != Channel.STATE_OPEN)
+ throw new IOException("Got SSH_MSG_CHANNEL_EXTENDED_DATA, but channel is not in correct state ("
+ + c.state + ")");
+
+ if (c.localWindow < len)
+ throw new IOException("Remote sent too much data, does not fit into window.");
+
+ c.localWindow -= len;
+
+ System.arraycopy(msg, 13, c.stderrBuffer, c.stderrWritepos, len);
+ c.stderrWritepos += len;
+
+ c.notifyAll();
+ }
+ }
+
+ /**
+ * Wait until for a condition.
+ *
+ * @param c Channel
+ * @param timeout in ms, 0 means no timeout.
+ * @param condition_mask minimum event mask (at least one of the conditions must be fulfilled)
+ * @return all current events
+ */
+ public int waitForCondition(Channel c, long timeout, int condition_mask)
+ {
+ boolean wasInterrupted = false;
+
+ try
+ {
+ long end_time = 0;
+ boolean end_time_set = false;
+
+ synchronized (c)
+ {
+ while (true)
+ {
+ int current_cond = 0;
+
+ int stdoutAvail = c.stdoutWritepos - c.stdoutReadpos;
+ int stderrAvail = c.stderrWritepos - c.stderrReadpos;
+
+ if (stdoutAvail > 0)
+ current_cond = current_cond | ChannelCondition.STDOUT_DATA;
+
+ if (stderrAvail > 0)
+ current_cond = current_cond | ChannelCondition.STDERR_DATA;
+
+ if (c.EOF)
+ current_cond = current_cond | ChannelCondition.EOF;
+
+ if (c.getExitStatus() != null)
+ current_cond = current_cond | ChannelCondition.EXIT_STATUS;
+
+ if (c.getExitSignal() != null)
+ current_cond = current_cond | ChannelCondition.EXIT_SIGNAL;
+
+ if (c.state == Channel.STATE_CLOSED)
+ return current_cond | ChannelCondition.CLOSED | ChannelCondition.EOF;
+
+ if ((current_cond & condition_mask) != 0)
+ return current_cond;
+
+ if (timeout > 0)
+ {
+ if (!end_time_set)
+ {
+ end_time = System.currentTimeMillis() + timeout;
+ end_time_set = true;
+ }
+ else
+ {
+ timeout = end_time - System.currentTimeMillis();
+
+ if (timeout <= 0)
+ return current_cond | ChannelCondition.TIMEOUT;
+ }
+ }
+
+ try
+ {
+ if (timeout > 0)
+ c.wait(timeout);
+ else
+ c.wait();
+ }
+ catch (InterruptedException e)
+ {
+ wasInterrupted = true;
+ }
+ }
+ }
+ }
+ finally
+ {
+ if (wasInterrupted)
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ public int getAvailable(Channel c, boolean extended) throws IOException
+ {
+ synchronized (c)
+ {
+ int avail;
+
+ if (extended)
+ avail = c.stderrWritepos - c.stderrReadpos;
+ else
+ avail = c.stdoutWritepos - c.stdoutReadpos;
+
+ return ((avail > 0) ? avail : (c.EOF ? -1 : 0));
+ }
+ }
+
+ public int getChannelData(Channel c, boolean extended, byte[] target, int off, int len) throws IOException
+ {
+ boolean wasInterrupted = false;
+
+ try
+ {
+ int copylen = 0;
+ int increment = 0;
+ int remoteID = 0;
+ int localID = 0;
+
+ synchronized (c)
+ {
+ int stdoutAvail = 0;
+ int stderrAvail = 0;
+
+ while (true)
+ {
+ /*
+ * Data available? We have to return remaining data even if the
+ * channel is already closed.
+ */
+
+ stdoutAvail = c.stdoutWritepos - c.stdoutReadpos;
+ stderrAvail = c.stderrWritepos - c.stderrReadpos;
+
+ if ((!extended) && (stdoutAvail != 0))
+ break;
+
+ if ((extended) && (stderrAvail != 0))
+ break;
+
+ /* Do not wait if more data will never arrive (EOF or CLOSED) */
+
+ if ((c.EOF) || (c.state != Channel.STATE_OPEN))
+ return -1;
+
+ try
+ {
+ c.wait();
+ }
+ catch (InterruptedException ignore)
+ {
+ wasInterrupted = true;
+ }
+ }
+
+ /* OK, there is some data. Return it. */
+
+ if (!extended)
+ {
+ copylen = (stdoutAvail > len) ? len : stdoutAvail;
+ System.arraycopy(c.stdoutBuffer, c.stdoutReadpos, target, off, copylen);
+ c.stdoutReadpos += copylen;
+
+ if (c.stdoutReadpos != c.stdoutWritepos)
+
+ System.arraycopy(c.stdoutBuffer, c.stdoutReadpos, c.stdoutBuffer, 0, c.stdoutWritepos
+ - c.stdoutReadpos);
+
+ c.stdoutWritepos -= c.stdoutReadpos;
+ c.stdoutReadpos = 0;
+ }
+ else
+ {
+ copylen = (stderrAvail > len) ? len : stderrAvail;
+ System.arraycopy(c.stderrBuffer, c.stderrReadpos, target, off, copylen);
+ c.stderrReadpos += copylen;
+
+ if (c.stderrReadpos != c.stderrWritepos)
+
+ System.arraycopy(c.stderrBuffer, c.stderrReadpos, c.stderrBuffer, 0, c.stderrWritepos
+ - c.stderrReadpos);
+
+ c.stderrWritepos -= c.stderrReadpos;
+ c.stderrReadpos = 0;
+ }
+
+ if (c.state != Channel.STATE_OPEN)
+ return copylen;
+
+ if (c.localWindow < ((Channel.CHANNEL_BUFFER_SIZE + 1) / 2))
+ {
+ int minFreeSpace = Math.min(Channel.CHANNEL_BUFFER_SIZE - c.stdoutWritepos,
+ Channel.CHANNEL_BUFFER_SIZE - c.stderrWritepos);
+
+ increment = minFreeSpace - c.localWindow;
+ c.localWindow = minFreeSpace;
+ }
+
+ remoteID = c.remoteID; /* read while holding the lock */
+ localID = c.localID; /* read while holding the lock */
+ }
- /*
- * If a consumer reads stdout and stdin in parallel, we may end up with
- * sending two msgWindowAdjust messages. Luckily, it
- * does not matter in which order they arrive at the server.
- */
+ /*
+ * If a consumer reads stdout and stdin in parallel, we may end up with
+ * sending two msgWindowAdjust messages. Luckily, it
+ * does not matter in which order they arrive at the server.
+ */
- if (increment > 0)
- {
- log.debug("Sending SSH_MSG_CHANNEL_WINDOW_ADJUST (channel " + localID + ", " + increment + ")");
+ if (increment > 0)
+ {
+ log.debug("Sending SSH_MSG_CHANNEL_WINDOW_ADJUST (channel " + localID + ", " + increment + ")");
- synchronized (c.channelSendLock)
- {
- byte[] msg = c.msgWindowAdjust;
+ synchronized (c.channelSendLock)
+ {
+ byte[] msg = c.msgWindowAdjust;
- msg[0] = Packets.SSH_MSG_CHANNEL_WINDOW_ADJUST;
- msg[1] = (byte) (remoteID >> 24);
- msg[2] = (byte) (remoteID >> 16);
- msg[3] = (byte) (remoteID >> 8);
- msg[4] = (byte) (remoteID);
- msg[5] = (byte) (increment >> 24);
- msg[6] = (byte) (increment >> 16);
- msg[7] = (byte) (increment >> 8);
- msg[8] = (byte) (increment);
+ msg[0] = Packets.SSH_MSG_CHANNEL_WINDOW_ADJUST;
+ msg[1] = (byte) (remoteID >> 24);
+ msg[2] = (byte) (remoteID >> 16);
+ msg[3] = (byte) (remoteID >> 8);
+ msg[4] = (byte) (remoteID);
+ msg[5] = (byte) (increment >> 24);
+ msg[6] = (byte) (increment >> 16);
+ msg[7] = (byte) (increment >> 8);
+ msg[8] = (byte) (increment);
- if (c.closeMessageSent == false)
- tm.sendMessage(msg);
- }
- }
+ if (c.closeMessageSent == false)
+ tm.sendMessage(msg);
+ }
+ }
- return copylen;
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
+ return copylen;
+ }
+ finally
+ {
+ if (wasInterrupted)
+ Thread.currentThread().interrupt();
+ }
- }
+ }
- public void msgChannelData(byte[] msg, int msglen) throws IOException
- {
- if (msglen <= 9)
- throw new IOException("SSH_MSG_CHANNEL_DATA message has wrong size (" + msglen + ")");
+ public void msgChannelData(byte[] msg, int msglen) throws IOException
+ {
+ if (msglen <= 9)
+ throw new IOException("SSH_MSG_CHANNEL_DATA message has wrong size (" + msglen + ")");
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
- int len = ((msg[5] & 0xff) << 24) | ((msg[6] & 0xff) << 16) | ((msg[7] & 0xff) << 8) | (msg[8] & 0xff);
+ int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
+ int len = ((msg[5] & 0xff) << 24) | ((msg[6] & 0xff) << 16) | ((msg[7] & 0xff) << 8) | (msg[8] & 0xff);
- Channel c = getChannel(id);
+ Channel c = getChannel(id);
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_DATA message for non-existent channel " + id);
+ if (c == null)
+ throw new IOException("Unexpected SSH_MSG_CHANNEL_DATA message for non-existent channel " + id);
- if (len != (msglen - 9))
- throw new IOException("SSH_MSG_CHANNEL_DATA message has wrong len (calculated " + (msglen - 9) + ", got "
- + len + ")");
+ if (len != (msglen - 9))
+ throw new IOException("SSH_MSG_CHANNEL_DATA message has wrong len (calculated " + (msglen - 9) + ", got "
+ + len + ")");
- log.debug("Got SSH_MSG_CHANNEL_DATA (channel " + id + ", " + len + ")");
+ log.debug("Got SSH_MSG_CHANNEL_DATA (channel " + id + ", " + len + ")");
- synchronized (c)
- {
- if (c.state == Channel.STATE_CLOSED)
- return; // ignore
+ synchronized (c)
+ {
+ if (c.state == Channel.STATE_CLOSED)
+ return; // ignore
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Got SSH_MSG_CHANNEL_DATA, but channel is not in correct state (" + c.state + ")");
+ if (c.state != Channel.STATE_OPEN)
+ throw new IOException("Got SSH_MSG_CHANNEL_DATA, but channel is not in correct state (" + c.state + ")");
- if (c.localWindow < len)
- throw new IOException("Remote sent too much data, does not fit into window.");
+ if (c.localWindow < len)
+ throw new IOException("Remote sent too much data, does not fit into window.");
- c.localWindow -= len;
+ c.localWindow -= len;
- System.arraycopy(msg, 9, c.stdoutBuffer, c.stdoutWritepos, len);
- c.stdoutWritepos += len;
+ System.arraycopy(msg, 9, c.stdoutBuffer, c.stdoutWritepos, len);
+ c.stdoutWritepos += len;
- c.notifyAll();
- }
- }
+ c.notifyAll();
+ }
+ }
- public void msgChannelWindowAdjust(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 9)
- throw new IOException("SSH_MSG_CHANNEL_WINDOW_ADJUST message has wrong size (" + msglen + ")");
+ public void msgChannelWindowAdjust(byte[] msg, int msglen) throws IOException
+ {
+ if (msglen != 9)
+ throw new IOException("SSH_MSG_CHANNEL_WINDOW_ADJUST message has wrong size (" + msglen + ")");
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
- int windowChange = ((msg[5] & 0xff) << 24) | ((msg[6] & 0xff) << 16) | ((msg[7] & 0xff) << 8) | (msg[8] & 0xff);
+ int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
+ int windowChange = ((msg[5] & 0xff) << 24) | ((msg[6] & 0xff) << 16) | ((msg[7] & 0xff) << 8) | (msg[8] & 0xff);
- Channel c = getChannel(id);
+ Channel c = getChannel(id);
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_WINDOW_ADJUST message for non-existent channel " + id);
+ if (c == null)
+ throw new IOException("Unexpected SSH_MSG_CHANNEL_WINDOW_ADJUST message for non-existent channel " + id);
- synchronized (c)
- {
- final long huge = 0xFFFFffffL; /* 2^32 - 1 */
+ synchronized (c)
+ {
+ final long huge = 0xFFFFffffL; /* 2^32 - 1 */
- c.remoteWindow += (windowChange & huge); /* avoid sign extension */
+ c.remoteWindow += (windowChange & huge); /* avoid sign extension */
- /* TODO - is this a good heuristic? */
+ /* TODO - is this a good heuristic? */
- if ((c.remoteWindow > huge))
- c.remoteWindow = huge;
+ if ((c.remoteWindow > huge))
+ c.remoteWindow = huge;
- c.notifyAll();
- }
+ c.notifyAll();
+ }
- log.debug("Got SSH_MSG_CHANNEL_WINDOW_ADJUST (channel " + id + ", " + windowChange + ")");
- }
+ log.debug("Got SSH_MSG_CHANNEL_WINDOW_ADJUST (channel " + id + ", " + windowChange + ")");
+ }
- public void msgChannelOpen(byte[] msg, int msglen) throws IOException
- {
- TypesReader tr = new TypesReader(msg, 0, msglen);
+ public void msgChannelOpen(byte[] msg, int msglen) throws IOException
+ {
+ TypesReader tr = new TypesReader(msg, 0, msglen);
- tr.readByte(); // skip packet type
- String channelType = tr.readString();
- int remoteID = tr.readUINT32(); /* sender channel */
- int remoteWindow = tr.readUINT32(); /* initial window size */
- int remoteMaxPacketSize = tr.readUINT32(); /* maximum packet size */
+ tr.readByte(); // skip packet type
+ String channelType = tr.readString();
+ int remoteID = tr.readUINT32(); /* sender channel */
+ int remoteWindow = tr.readUINT32(); /* initial window size */
+ int remoteMaxPacketSize = tr.readUINT32(); /* maximum packet size */
- if ("x11".equals(channelType))
- {
- synchronized (x11_magic_cookies)
- {
- /* If we did not request X11 forwarding, then simply ignore this bogus request. */
+ if ("x11".equals(channelType))
+ {
+ synchronized (x11_magic_cookies)
+ {
+ /* If we did not request X11 forwarding, then simply ignore this bogus request. */
- if (x11_magic_cookies.size() == 0)
- {
- PacketChannelOpenFailure pcof = new PacketChannelOpenFailure(remoteID,
- Packets.SSH_OPEN_ADMINISTRATIVELY_PROHIBITED, "X11 forwarding not activated", "");
+ if (x11_magic_cookies.size() == 0)
+ {
+ PacketChannelOpenFailure pcof = new PacketChannelOpenFailure(remoteID,
+ Packets.SSH_OPEN_ADMINISTRATIVELY_PROHIBITED, "X11 forwarding not activated", "");
- tm.sendAsynchronousMessage(pcof.getPayload());
+ tm.sendAsynchronousMessage(pcof.getPayload());
- log.warning("Unexpected X11 request, denying it!");
+ log.warning("Unexpected X11 request, denying it!");
- return;
- }
- }
+ return;
+ }
+ }
- String remoteOriginatorAddress = tr.readString();
- int remoteOriginatorPort = tr.readUINT32();
+ String remoteOriginatorAddress = tr.readString();
+ int remoteOriginatorPort = tr.readUINT32();
- Channel c = new Channel(this);
+ Channel c = new Channel(this);
- synchronized (c)
- {
- c.remoteID = remoteID;
- c.remoteWindow = remoteWindow & 0xFFFFffffL; /* properly convert UINT32 to long */
- c.remoteMaxPacketSize = remoteMaxPacketSize;
- c.localID = addChannel(c);
- }
+ synchronized (c)
+ {
+ c.remoteID = remoteID;
+ c.remoteWindow = remoteWindow & 0xFFFFffffL; /* properly convert UINT32 to long */
+ c.remoteMaxPacketSize = remoteMaxPacketSize;
+ c.localID = addChannel(c);
+ }
- /*
- * The open confirmation message will be sent from another thread
- */
+ /*
+ * The open confirmation message will be sent from another thread
+ */
- RemoteX11AcceptThread rxat = new RemoteX11AcceptThread(c, remoteOriginatorAddress, remoteOriginatorPort);
- rxat.setDaemon(true);
- rxat.start();
+ RemoteX11AcceptThread rxat = new RemoteX11AcceptThread(c, remoteOriginatorAddress, remoteOriginatorPort);
+ rxat.setDaemon(true);
+ rxat.start();
- return;
- }
+ return;
+ }
- if ("forwarded-tcpip".equals(channelType))
- {
- String remoteConnectedAddress = tr.readString(); /* address that was connected */
- int remoteConnectedPort = tr.readUINT32(); /* port that was connected */
- String remoteOriginatorAddress = tr.readString(); /* originator IP address */
- int remoteOriginatorPort = tr.readUINT32(); /* originator port */
+ if ("forwarded-tcpip".equals(channelType))
+ {
+ String remoteConnectedAddress = tr.readString(); /* address that was connected */
+ int remoteConnectedPort = tr.readUINT32(); /* port that was connected */
+ String remoteOriginatorAddress = tr.readString(); /* originator IP address */
+ int remoteOriginatorPort = tr.readUINT32(); /* originator port */
- RemoteForwardingData rfd = null;
+ RemoteForwardingData rfd = null;
- synchronized (remoteForwardings)
- {
- rfd = remoteForwardings.get(new Integer(remoteConnectedPort));
- }
+ synchronized (remoteForwardings)
+ {
+ rfd = remoteForwardings.get(new Integer(remoteConnectedPort));
+ }
- if (rfd == null)
- {
- PacketChannelOpenFailure pcof = new PacketChannelOpenFailure(remoteID,
- Packets.SSH_OPEN_ADMINISTRATIVELY_PROHIBITED,
- "No thanks, unknown port in forwarded-tcpip request", "");
+ if (rfd == null)
+ {
+ PacketChannelOpenFailure pcof = new PacketChannelOpenFailure(remoteID,
+ Packets.SSH_OPEN_ADMINISTRATIVELY_PROHIBITED,
+ "No thanks, unknown port in forwarded-tcpip request", "");
- /* Always try to be polite. */
+ /* Always try to be polite. */
- tm.sendAsynchronousMessage(pcof.getPayload());
+ tm.sendAsynchronousMessage(pcof.getPayload());
- log.debug("Unexpected forwarded-tcpip request, denying it!");
+ log.debug("Unexpected forwarded-tcpip request, denying it!");
- return;
- }
+ return;
+ }
- Channel c = new Channel(this);
+ Channel c = new Channel(this);
- synchronized (c)
- {
- c.remoteID = remoteID;
- c.remoteWindow = remoteWindow & 0xFFFFffffL; /* convert UINT32 to long */
- c.remoteMaxPacketSize = remoteMaxPacketSize;
- c.localID = addChannel(c);
- }
+ synchronized (c)
+ {
+ c.remoteID = remoteID;
+ c.remoteWindow = remoteWindow & 0xFFFFffffL; /* convert UINT32 to long */
+ c.remoteMaxPacketSize = remoteMaxPacketSize;
+ c.localID = addChannel(c);
+ }
- /*
- * The open confirmation message will be sent from another thread.
- */
+ /*
+ * The open confirmation message will be sent from another thread.
+ */
- RemoteAcceptThread rat = new RemoteAcceptThread(c, remoteConnectedAddress, remoteConnectedPort,
- remoteOriginatorAddress, remoteOriginatorPort, rfd.targetAddress, rfd.targetPort);
-
- rat.setDaemon(true);
- rat.start();
-
- return;
- }
-
- if ((server_state != null) && ("session".equals(channelType)))
- {
- ServerConnectionCallback cb = null;
-
- synchronized (server_state)
- {
- cb = server_state.cb_conn;
- }
-
- if (cb == null)
- {
- tm.sendAsynchronousMessage(new PacketChannelOpenFailure(remoteID, Packets.SSH_OPEN_ADMINISTRATIVELY_PROHIBITED,
- "Sessions are currently not enabled", "en").getPayload());
-
- return;
- }
-
- final Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.remoteID = remoteID;
- c.remoteWindow = remoteWindow & 0xFFFFffffL; /* convert UINT32 to long */
- c.remoteMaxPacketSize = remoteMaxPacketSize;
- c.localID = addChannel(c);
- c.state = Channel.STATE_OPEN;
- c.ss = new ServerSessionImpl(c);
- }
-
- PacketChannelOpenConfirmation pcoc = new PacketChannelOpenConfirmation(c.remoteID, c.localID,
- c.localWindow, c.localMaxPacketSize);
-
- tm.sendAsynchronousMessage(pcoc.getPayload());
-
- c.ss.sscb = cb.acceptSession(c.ss);
+ RemoteAcceptThread rat = new RemoteAcceptThread(c, remoteConnectedAddress, remoteConnectedPort,
+ remoteOriginatorAddress, remoteOriginatorPort, rfd.targetAddress, rfd.targetPort);
+
+ rat.setDaemon(true);
+ rat.start();
+
+ return;
+ }
+
+ if ((server_state != null) && ("session".equals(channelType)))
+ {
+ ServerConnectionCallback cb = null;
+
+ synchronized (server_state)
+ {
+ cb = server_state.cb_conn;
+ }
+
+ if (cb == null)
+ {
+ tm.sendAsynchronousMessage(new PacketChannelOpenFailure(remoteID, Packets.SSH_OPEN_ADMINISTRATIVELY_PROHIBITED,
+ "Sessions are currently not enabled", "en").getPayload());
+
+ return;
+ }
+
+ final Channel c = new Channel(this);
+
+ synchronized (c)
+ {
+ c.remoteID = remoteID;
+ c.remoteWindow = remoteWindow & 0xFFFFffffL; /* convert UINT32 to long */
+ c.remoteMaxPacketSize = remoteMaxPacketSize;
+ c.localID = addChannel(c);
+ c.state = Channel.STATE_OPEN;
+ c.ss = new ServerSessionImpl(c);
+ }
+
+ PacketChannelOpenConfirmation pcoc = new PacketChannelOpenConfirmation(c.remoteID, c.localID,
+ c.localWindow, c.localMaxPacketSize);
+
+ tm.sendAsynchronousMessage(pcoc.getPayload());
+
+ c.ss.sscb = cb.acceptSession(c.ss);
- return;
- }
-
- /* Tell the server that we have no idea what it is talking about */
-
- PacketChannelOpenFailure pcof = new PacketChannelOpenFailure(remoteID, Packets.SSH_OPEN_UNKNOWN_CHANNEL_TYPE,
- "Unknown channel type", "");
-
- tm.sendAsynchronousMessage(pcof.getPayload());
-
-
- log.warning("The peer tried to open an unsupported channel type (" + channelType + ")");
- }
-
- /* Starts the given runnable in a foreground (non-daemon) thread */
- private void runAsync(Runnable r)
- {
- Thread t = new Thread(r);
- t.start();
- }
-
- public void msgChannelRequest(byte[] msg, int msglen) throws IOException
- {
- TypesReader tr = new TypesReader(msg, 0, msglen);
-
- tr.readByte(); // skip packet type
- int id = tr.readUINT32();
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_REQUEST message for non-existent channel " + id);
-
- ServerSessionImpl server_session = null;
-
- if (server_state != null)
- {
- synchronized (c)
- {
- server_session = c.ss;
- }
- }
-
- String type = tr.readString("US-ASCII");
- boolean wantReply = tr.readBoolean();
-
- log.debug("Got SSH_MSG_CHANNEL_REQUEST (channel " + id + ", '" + type + "')");
-
- if (type.equals("exit-status"))
- {
- if (wantReply != false)
- throw new IOException(
- "Badly formatted SSH_MSG_CHANNEL_REQUEST exit-status message, 'want reply' is true");
-
- int exit_status = tr.readUINT32();
-
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- synchronized (c)
- {
- c.exit_status = new Integer(exit_status);
- c.notifyAll();
- }
-
- log.debug("Got EXIT STATUS (channel " + id + ", status " + exit_status + ")");
-
- return;
- }
-
- if ((server_state == null) && (type.equals("exit-signal")))
- {
- if (wantReply != false)
- throw new IOException(
- "Badly formatted SSH_MSG_CHANNEL_REQUEST exit-signal message, 'want reply' is true");
-
- String signame = tr.readString("US-ASCII");
- tr.readBoolean();
- tr.readString();
- tr.readString();
-
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- synchronized (c)
- {
- c.exit_signal = signame;
- c.notifyAll();
- }
-
- log.debug("Got EXIT SIGNAL (channel " + id + ", signal " + signame + ")");
-
- return;
- }
-
- if ((server_session != null) && (type.equals("pty-req")))
- {
- PtySettings pty = new PtySettings();
-
- pty.term = tr.readString();
- pty.term_width_characters = tr.readUINT32();
- pty.term_height_characters = tr.readUINT32();
- pty.term_width_pixels = tr.readUINT32();
- pty.term_height_pixels = tr.readUINT32();
- pty.terminal_modes = tr.readByteString();
-
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- Runnable run_after_sending_success = null;
-
- ServerSessionCallback sscb = server_session.getServerSessionCallback();
-
- if (sscb != null)
- run_after_sending_success = sscb.requestPtyReq(server_session, pty);
-
- if (wantReply)
- {
- if (run_after_sending_success != null)
- {
- tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
- }
- else
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
- }
-
- if (run_after_sending_success != null)
- {
- runAsync(run_after_sending_success);
- }
-
- return;
- }
-
- if ((server_session != null) && (type.equals("shell")))
- {
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- Runnable run_after_sending_success = null;
- ServerSessionCallback sscb = server_session.getServerSessionCallback();
-
- if (sscb != null)
- run_after_sending_success = sscb.requestShell(server_session);
-
- if (wantReply)
- {
- if (run_after_sending_success != null)
- {
- tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
- }
- else
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
- }
-
- if (run_after_sending_success != null)
- {
- runAsync(run_after_sending_success);
- }
-
- return;
- }
-
- if ((server_session != null) && (type.equals("exec")))
- {
- String command = tr.readString();
-
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- Runnable run_after_sending_success = null;
- ServerSessionCallback sscb = server_session.getServerSessionCallback();
-
- if (sscb != null)
- run_after_sending_success = sscb.requestExec(server_session, command);
-
- if (wantReply)
- {
- if (run_after_sending_success != null)
- {
- tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
- }
- else
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
- }
-
- if (run_after_sending_success != null)
- {
- runAsync(run_after_sending_success);
- }
-
- return;
- }
-
- /* We simply ignore unknown channel requests, however, if the server wants a reply,
- * then we signal that we have no idea what it is about.
- */
-
- if (wantReply)
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
-
- log.debug("Channel request '" + type + "' is not known, ignoring it");
- }
-
- public void msgChannelEOF(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 5)
- throw new IOException("SSH_MSG_CHANNEL_EOF message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_EOF message for non-existent channel " + id);
-
- synchronized (c)
- {
- c.EOF = true;
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_EOF (channel " + id + ")");
- }
-
- public void msgChannelClose(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 5)
- throw new IOException("SSH_MSG_CHANNEL_CLOSE message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_CLOSE message for non-existent channel " + id);
-
- synchronized (c)
- {
- c.EOF = true;
- c.state = Channel.STATE_CLOSED;
- c.setReasonClosed("Close requested by remote");
- c.closeMessageRecv = true;
-
- removeChannel(c.localID);
-
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_CLOSE (channel " + id + ")");
- }
-
- public void msgChannelSuccess(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 5)
- throw new IOException("SSH_MSG_CHANNEL_SUCCESS message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_SUCCESS message for non-existent channel " + id);
-
- synchronized (c)
- {
- c.successCounter++;
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_SUCCESS (channel " + id + ")");
- }
-
- public void msgChannelFailure(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 5)
- throw new IOException("SSH_MSG_CHANNEL_FAILURE message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_FAILURE message for non-existent channel " + id);
-
- synchronized (c)
- {
- c.failedCounter++;
- c.notifyAll();
- }
+ return;
+ }
+
+ /* Tell the server that we have no idea what it is talking about */
+
+ PacketChannelOpenFailure pcof = new PacketChannelOpenFailure(remoteID, Packets.SSH_OPEN_UNKNOWN_CHANNEL_TYPE,
+ "Unknown channel type", "");
+
+ tm.sendAsynchronousMessage(pcof.getPayload());
+
+
+ log.warning("The peer tried to open an unsupported channel type (" + channelType + ")");
+ }
+
+ /* Starts the given runnable in a foreground (non-daemon) thread */
+ private void runAsync(Runnable r)
+ {
+ Thread t = new Thread(r);
+ t.start();
+ }
+
+ public void msgChannelRequest(byte[] msg, int msglen) throws IOException
+ {
+ TypesReader tr = new TypesReader(msg, 0, msglen);
+
+ tr.readByte(); // skip packet type
+ int id = tr.readUINT32();
+
+ Channel c = getChannel(id);
+
+ if (c == null)
+ throw new IOException("Unexpected SSH_MSG_CHANNEL_REQUEST message for non-existent channel " + id);
+
+ ServerSessionImpl server_session = null;
+
+ if (server_state != null)
+ {
+ synchronized (c)
+ {
+ server_session = c.ss;
+ }
+ }
+
+ String type = tr.readString("US-ASCII");
+ boolean wantReply = tr.readBoolean();
+
+ log.debug("Got SSH_MSG_CHANNEL_REQUEST (channel " + id + ", '" + type + "')");
+
+ if (type.equals("exit-status"))
+ {
+ if (wantReply != false)
+ throw new IOException(
+ "Badly formatted SSH_MSG_CHANNEL_REQUEST exit-status message, 'want reply' is true");
+
+ int exit_status = tr.readUINT32();
+
+ if (tr.remain() != 0)
+ throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
+
+ synchronized (c)
+ {
+ c.exit_status = new Integer(exit_status);
+ c.notifyAll();
+ }
+
+ log.debug("Got EXIT STATUS (channel " + id + ", status " + exit_status + ")");
+
+ return;
+ }
+
+ if ((server_state == null) && (type.equals("exit-signal")))
+ {
+ if (wantReply != false)
+ throw new IOException(
+ "Badly formatted SSH_MSG_CHANNEL_REQUEST exit-signal message, 'want reply' is true");
+
+ String signame = tr.readString("US-ASCII");
+ tr.readBoolean();
+ tr.readString();
+ tr.readString();
+
+ if (tr.remain() != 0)
+ throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
+
+ synchronized (c)
+ {
+ c.exit_signal = signame;
+ c.notifyAll();
+ }
+
+ log.debug("Got EXIT SIGNAL (channel " + id + ", signal " + signame + ")");
+
+ return;
+ }
+
+ if ((server_session != null) && (type.equals("pty-req")))
+ {
+ PtySettings pty = new PtySettings();
+
+ pty.term = tr.readString();
+ pty.term_width_characters = tr.readUINT32();
+ pty.term_height_characters = tr.readUINT32();
+ pty.term_width_pixels = tr.readUINT32();
+ pty.term_height_pixels = tr.readUINT32();
+ pty.terminal_modes = tr.readByteString();
+
+ if (tr.remain() != 0)
+ throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
+
+ Runnable run_after_sending_success = null;
+
+ ServerSessionCallback sscb = server_session.getServerSessionCallback();
+
+ if (sscb != null)
+ run_after_sending_success = sscb.requestPtyReq(server_session, pty);
+
+ if (wantReply)
+ {
+ if (run_after_sending_success != null)
+ {
+ tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
+ }
+ else
+ {
+ tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
+ }
+ }
+
+ if (run_after_sending_success != null)
+ {
+ runAsync(run_after_sending_success);
+ }
+
+ return;
+ }
+
+ if ((server_session != null) && (type.equals("subsystem")))
+ {
+ String command = tr.readString();
+ if (tr.remain() != 0)
+ throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
+
+ Runnable run_after_sending_success = null;
+ ServerSessionCallback sscb = server_session.getServerSessionCallback();
+
+ if (sscb != null)
+ run_after_sending_success = sscb.requestSubsystem(server_session, command);
+
+ if (wantReply)
+ {
+ if (run_after_sending_success != null)
+ {
+ tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
+ }
+ else
+ {
+ tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
+ }
+ }
+
+ if (run_after_sending_success != null)
+ {
+ runAsync(run_after_sending_success);
+ }
+
+ return;
+ }
+
+ if ((server_session != null) && (type.equals("shell")))
+ {
+ if (tr.remain() != 0)
+ throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
+
+ Runnable run_after_sending_success = null;
+ ServerSessionCallback sscb = server_session.getServerSessionCallback();
+
+ if (sscb != null)
+ run_after_sending_success = sscb.requestShell(server_session);
+
+ if (wantReply)
+ {
+ if (run_after_sending_success != null)
+ {
+ tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
+ }
+ else
+ {
+ tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
+ }
+ }
+
+ if (run_after_sending_success != null)
+ {
+ runAsync(run_after_sending_success);
+ }
+
+ return;
+ }
+
+ if ((server_session != null) && (type.equals("exec")))
+ {
+ String command = tr.readString();
+
+ if (tr.remain() != 0)
+ throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
+
+ Runnable run_after_sending_success = null;
+ ServerSessionCallback sscb = server_session.getServerSessionCallback();
+
+ if (sscb != null)
+ run_after_sending_success = sscb.requestExec(server_session, command);
+
+ if (wantReply)
+ {
+ if (run_after_sending_success != null)
+ {
+ tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
+ }
+ else
+ {
+ tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
+ }
+ }
+
+ if (run_after_sending_success != null)
+ {
+ runAsync(run_after_sending_success);
+ }
+
+ return;
+ }
+
+ /* We simply ignore unknown channel requests, however, if the server wants a reply,
+ * then we signal that we have no idea what it is about.
+ */
+
+ if (wantReply)
+ {
+ tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
+ }
+
+ log.debug("Channel request '" + type + "' is not known, ignoring it");
+ }
+
+ public void msgChannelEOF(byte[] msg, int msglen) throws IOException
+ {
+ if (msglen != 5)
+ throw new IOException("SSH_MSG_CHANNEL_EOF message has wrong size (" + msglen + ")");
+
+ int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
+
+ Channel c = getChannel(id);
+
+ if (c == null)
+ throw new IOException("Unexpected SSH_MSG_CHANNEL_EOF message for non-existent channel " + id);
+
+ synchronized (c)
+ {
+ c.EOF = true;
+ c.notifyAll();
+ }
+
+ log.debug("Got SSH_MSG_CHANNEL_EOF (channel " + id + ")");
+ }
+
+ public void msgChannelClose(byte[] msg, int msglen) throws IOException
+ {
+ if (msglen != 5)
+ throw new IOException("SSH_MSG_CHANNEL_CLOSE message has wrong size (" + msglen + ")");
+
+ int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
+
+ Channel c = getChannel(id);
+
+ if (c == null)
+ throw new IOException("Unexpected SSH_MSG_CHANNEL_CLOSE message for non-existent channel " + id);
+
+ synchronized (c)
+ {
+ c.EOF = true;
+ c.state = Channel.STATE_CLOSED;
+ c.setReasonClosed("Close requested by remote");
+ c.closeMessageRecv = true;
+
+ removeChannel(c.localID);
+
+ c.notifyAll();
+ }
+
+ log.debug("Got SSH_MSG_CHANNEL_CLOSE (channel " + id + ")");
+ }
+
+ public void msgChannelSuccess(byte[] msg, int msglen) throws IOException
+ {
+ if (msglen != 5)
+ throw new IOException("SSH_MSG_CHANNEL_SUCCESS message has wrong size (" + msglen + ")");
+
+ int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
+
+ Channel c = getChannel(id);
+
+ if (c == null)
+ throw new IOException("Unexpected SSH_MSG_CHANNEL_SUCCESS message for non-existent channel " + id);
+
+ synchronized (c)
+ {
+ c.successCounter++;
+ c.notifyAll();
+ }
+
+ log.debug("Got SSH_MSG_CHANNEL_SUCCESS (channel " + id + ")");
+ }
+
+ public void msgChannelFailure(byte[] msg, int msglen) throws IOException
+ {
+ if (msglen != 5)
+ throw new IOException("SSH_MSG_CHANNEL_FAILURE message has wrong size (" + msglen + ")");
+
+ int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
+
+ Channel c = getChannel(id);
+
+ if (c == null)
+ throw new IOException("Unexpected SSH_MSG_CHANNEL_FAILURE message for non-existent channel " + id);
+
+ synchronized (c)
+ {
+ c.failedCounter++;
+ c.notifyAll();
+ }
- log.debug("Got SSH_MSG_CHANNEL_FAILURE (channel " + id + ")");
- }
+ log.debug("Got SSH_MSG_CHANNEL_FAILURE (channel " + id + ")");
+ }
- public void msgChannelOpenConfirmation(byte[] msg, int msglen) throws IOException
- {
- PacketChannelOpenConfirmation sm = new PacketChannelOpenConfirmation(msg, 0, msglen);
+ public void msgChannelOpenConfirmation(byte[] msg, int msglen) throws IOException
+ {
+ PacketChannelOpenConfirmation sm = new PacketChannelOpenConfirmation(msg, 0, msglen);
- Channel c = getChannel(sm.recipientChannelID);
+ Channel c = getChannel(sm.recipientChannelID);
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_OPEN_CONFIRMATION message for non-existent channel "
- + sm.recipientChannelID);
+ if (c == null)
+ throw new IOException("Unexpected SSH_MSG_CHANNEL_OPEN_CONFIRMATION message for non-existent channel "
+ + sm.recipientChannelID);
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPENING)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_OPEN_CONFIRMATION message for channel "
- + sm.recipientChannelID);
+ synchronized (c)
+ {
+ if (c.state != Channel.STATE_OPENING)
+ throw new IOException("Unexpected SSH_MSG_CHANNEL_OPEN_CONFIRMATION message for channel "
+ + sm.recipientChannelID);
- c.remoteID = sm.senderChannelID;
- c.remoteWindow = sm.initialWindowSize & 0xFFFFffffL; /* convert UINT32 to long */
- c.remoteMaxPacketSize = sm.maxPacketSize;
- c.state = Channel.STATE_OPEN;
- c.notifyAll();
- }
+ c.remoteID = sm.senderChannelID;
+ c.remoteWindow = sm.initialWindowSize & 0xFFFFffffL; /* convert UINT32 to long */
+ c.remoteMaxPacketSize = sm.maxPacketSize;
+ c.state = Channel.STATE_OPEN;
+ c.notifyAll();
+ }
- log.debug("Got SSH_MSG_CHANNEL_OPEN_CONFIRMATION (channel " + sm.recipientChannelID + " / remote: "
- + sm.senderChannelID + ")");
- }
+ log.debug("Got SSH_MSG_CHANNEL_OPEN_CONFIRMATION (channel " + sm.recipientChannelID + " / remote: "
+ + sm.senderChannelID + ")");
+ }
- public void msgChannelOpenFailure(byte[] msg, int msglen) throws IOException
- {
- if (msglen < 5)
- throw new IOException("SSH_MSG_CHANNEL_OPEN_FAILURE message has wrong size (" + msglen + ")");
-
- TypesReader tr = new TypesReader(msg, 0, msglen);
+ public void msgChannelOpenFailure(byte[] msg, int msglen) throws IOException
+ {
+ if (msglen < 5)
+ throw new IOException("SSH_MSG_CHANNEL_OPEN_FAILURE message has wrong size (" + msglen + ")");
+
+ TypesReader tr = new TypesReader(msg, 0, msglen);
- tr.readByte(); // skip packet type
- int id = tr.readUINT32(); /* sender channel */
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_OPEN_FAILURE message for non-existent channel " + id);
-
- int reasonCode = tr.readUINT32();
- String description = tr.readString("UTF-8");
-
- String reasonCodeSymbolicName = null;
-
- switch (reasonCode)
- {
- case 1:
- reasonCodeSymbolicName = "SSH_OPEN_ADMINISTRATIVELY_PROHIBITED";
- break;
- case 2:
- reasonCodeSymbolicName = "SSH_OPEN_CONNECT_FAILED";
- break;
- case 3:
- reasonCodeSymbolicName = "SSH_OPEN_UNKNOWN_CHANNEL_TYPE";
- break;
- case 4:
- reasonCodeSymbolicName = "SSH_OPEN_RESOURCE_SHORTAGE";
- break;
- default:
- reasonCodeSymbolicName = "UNKNOWN REASON CODE (" + reasonCode + ")";
- }
-
- StringBuilder descriptionBuffer = new StringBuilder();
- descriptionBuffer.append(description);
-
- for (int i = 0; i < descriptionBuffer.length(); i++)
- {
- char cc = descriptionBuffer.charAt(i);
-
- if ((cc >= 32) && (cc <= 126))
- continue;
- descriptionBuffer.setCharAt(i, '\uFFFD');
- }
-
- synchronized (c)
- {
- c.EOF = true;
- c.state = Channel.STATE_CLOSED;
- c.setReasonClosed("The server refused to open the channel (" + reasonCodeSymbolicName + ", '"
- + descriptionBuffer.toString() + "')");
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_OPEN_FAILURE (channel " + id + ")");
- }
-
- public void msgGlobalRequest(byte[] msg, int msglen) throws IOException
- {
- /* Currently we do not support any kind of global request */
-
- TypesReader tr = new TypesReader(msg, 0, msglen);
-
- tr.readByte(); // skip packet type
- String requestName = tr.readString();
- boolean wantReply = tr.readBoolean();
-
- if (wantReply)
- {
- byte[] reply_failure = new byte[1];
- reply_failure[0] = Packets.SSH_MSG_REQUEST_FAILURE;
-
- tm.sendAsynchronousMessage(reply_failure);
- }
-
- /* We do not clean up the requestName String - that is OK for debug */
-
- log.debug("Got SSH_MSG_GLOBAL_REQUEST (" + requestName + ")");
- }
-
- public void msgGlobalSuccess() throws IOException
- {
- synchronized (channels)
- {
- globalSuccessCounter++;
- channels.notifyAll();
- }
-
- log.debug("Got SSH_MSG_REQUEST_SUCCESS");
- }
-
- public void msgGlobalFailure() throws IOException
- {
- synchronized (channels)
- {
- globalFailedCounter++;
- channels.notifyAll();
- }
-
- log.debug("Got SSH_MSG_REQUEST_FAILURE");
- }
-
- public void handleMessage(byte[] msg, int msglen) throws IOException
- {
- if (msg == null)
- {
-
- log.debug("HandleMessage: got shutdown");
-
- synchronized (listenerThreads)
- {
- for (IChannelWorkerThread lat : listenerThreads)
- {
- lat.stopWorking();
- }
- listenerThreadsAllowed = false;
- }
-
- synchronized (channels)
- {
- shutdown = true;
-
- for (Channel c : channels)
- {
- synchronized (c)
- {
- c.EOF = true;
- c.state = Channel.STATE_CLOSED;
- c.setReasonClosed("The connection is being shutdown");
- c.closeMessageRecv = true; /*
- * You never know, perhaps
- * we are waiting for a
- * pending close message
- * from the server...
- */
- c.notifyAll();
- }
- }
-
- channels.clear();
- channels.notifyAll(); /* Notify global response waiters */
- return;
- }
- }
-
- switch (msg[0])
- {
- case Packets.SSH_MSG_CHANNEL_OPEN_CONFIRMATION:
- msgChannelOpenConfirmation(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_WINDOW_ADJUST:
- msgChannelWindowAdjust(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_DATA:
- msgChannelData(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_EXTENDED_DATA:
- msgChannelExtendedData(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_REQUEST:
- msgChannelRequest(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_EOF:
- msgChannelEOF(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_OPEN:
- msgChannelOpen(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_CLOSE:
- msgChannelClose(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_SUCCESS:
- msgChannelSuccess(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_FAILURE:
- msgChannelFailure(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_OPEN_FAILURE:
- msgChannelOpenFailure(msg, msglen);
- break;
- case Packets.SSH_MSG_GLOBAL_REQUEST:
- msgGlobalRequest(msg, msglen);
- break;
- case Packets.SSH_MSG_REQUEST_SUCCESS:
- msgGlobalSuccess();
- break;
- case Packets.SSH_MSG_REQUEST_FAILURE:
- msgGlobalFailure();
- break;
- default:
- throw new IOException("Cannot handle unknown channel message " + (msg[0] & 0xff));
- }
- }
+ tr.readByte(); // skip packet type
+ int id = tr.readUINT32(); /* sender channel */
+
+ Channel c = getChannel(id);
+
+ if (c == null)
+ throw new IOException("Unexpected SSH_MSG_CHANNEL_OPEN_FAILURE message for non-existent channel " + id);
+
+ int reasonCode = tr.readUINT32();
+ String description = tr.readString("UTF-8");
+
+ String reasonCodeSymbolicName = null;
+
+ switch (reasonCode)
+ {
+ case 1:
+ reasonCodeSymbolicName = "SSH_OPEN_ADMINISTRATIVELY_PROHIBITED";
+ break;
+ case 2:
+ reasonCodeSymbolicName = "SSH_OPEN_CONNECT_FAILED";
+ break;
+ case 3:
+ reasonCodeSymbolicName = "SSH_OPEN_UNKNOWN_CHANNEL_TYPE";
+ break;
+ case 4:
+ reasonCodeSymbolicName = "SSH_OPEN_RESOURCE_SHORTAGE";
+ break;
+ default:
+ reasonCodeSymbolicName = "UNKNOWN REASON CODE (" + reasonCode + ")";
+ }
+
+ StringBuilder descriptionBuffer = new StringBuilder();
+ descriptionBuffer.append(description);
+
+ for (int i = 0; i < descriptionBuffer.length(); i++)
+ {
+ char cc = descriptionBuffer.charAt(i);
+
+ if ((cc >= 32) && (cc <= 126))
+ continue;
+ descriptionBuffer.setCharAt(i, '\uFFFD');
+ }
+
+ synchronized (c)
+ {
+ c.EOF = true;
+ c.state = Channel.STATE_CLOSED;
+ c.setReasonClosed("The server refused to open the channel (" + reasonCodeSymbolicName + ", '"
+ + descriptionBuffer.toString() + "')");
+ c.notifyAll();
+ }
+
+ log.debug("Got SSH_MSG_CHANNEL_OPEN_FAILURE (channel " + id + ")");
+ }
+
+ public void msgGlobalRequest(byte[] msg, int msglen) throws IOException
+ {
+ /* Currently we do not support any kind of global request */
+
+ TypesReader tr = new TypesReader(msg, 0, msglen);
+
+ tr.readByte(); // skip packet type
+ String requestName = tr.readString();
+ boolean wantReply = tr.readBoolean();
+
+ if (wantReply)
+ {
+ byte[] reply_failure = new byte[1];
+ reply_failure[0] = Packets.SSH_MSG_REQUEST_FAILURE;
+
+ tm.sendAsynchronousMessage(reply_failure);
+ }
+
+ /* We do not clean up the requestName String - that is OK for debug */
+
+ log.debug("Got SSH_MSG_GLOBAL_REQUEST (" + requestName + ")");
+ }
+
+ public void msgGlobalSuccess() throws IOException
+ {
+ synchronized (channels)
+ {
+ globalSuccessCounter++;
+ channels.notifyAll();
+ }
+
+ log.debug("Got SSH_MSG_REQUEST_SUCCESS");
+ }
+
+ public void msgGlobalFailure() throws IOException
+ {
+ synchronized (channels)
+ {
+ globalFailedCounter++;
+ channels.notifyAll();
+ }
+
+ log.debug("Got SSH_MSG_REQUEST_FAILURE");
+ }
+
+ public void handleMessage(byte[] msg, int msglen) throws IOException
+ {
+ if (msg == null)
+ {
+
+ log.debug("HandleMessage: got shutdown");
+
+ synchronized (listenerThreads)
+ {
+ for (IChannelWorkerThread lat : listenerThreads)
+ {
+ lat.stopWorking();
+ }
+ listenerThreadsAllowed = false;
+ }
+
+ synchronized (channels)
+ {
+ shutdown = true;
+
+ for (Channel c : channels)
+ {
+ synchronized (c)
+ {
+ c.EOF = true;
+ c.state = Channel.STATE_CLOSED;
+ c.setReasonClosed("The connection is being shutdown");
+ c.closeMessageRecv = true; /*
+ * You never know, perhaps
+ * we are waiting for a
+ * pending close message
+ * from the server...
+ */
+ c.notifyAll();
+ }
+ }
+
+ channels.clear();
+ channels.notifyAll(); /* Notify global response waiters */
+ return;
+ }
+ }
+
+ switch (msg[0])
+ {
+ case Packets.SSH_MSG_CHANNEL_OPEN_CONFIRMATION:
+ msgChannelOpenConfirmation(msg, msglen);
+ break;
+ case Packets.SSH_MSG_CHANNEL_WINDOW_ADJUST:
+ msgChannelWindowAdjust(msg, msglen);
+ break;
+ case Packets.SSH_MSG_CHANNEL_DATA:
+ msgChannelData(msg, msglen);
+ break;
+ case Packets.SSH_MSG_CHANNEL_EXTENDED_DATA:
+ msgChannelExtendedData(msg, msglen);
+ break;
+ case Packets.SSH_MSG_CHANNEL_REQUEST:
+ msgChannelRequest(msg, msglen);
+ break;
+ case Packets.SSH_MSG_CHANNEL_EOF:
+ msgChannelEOF(msg, msglen);
+ break;
+ case Packets.SSH_MSG_CHANNEL_OPEN:
+ msgChannelOpen(msg, msglen);
+ break;
+ case Packets.SSH_MSG_CHANNEL_CLOSE:
+ msgChannelClose(msg, msglen);
+ break;
+ case Packets.SSH_MSG_CHANNEL_SUCCESS:
+ msgChannelSuccess(msg, msglen);
+ break;
+ case Packets.SSH_MSG_CHANNEL_FAILURE:
+ msgChannelFailure(msg, msglen);
+ break;
+ case Packets.SSH_MSG_CHANNEL_OPEN_FAILURE:
+ msgChannelOpenFailure(msg, msglen);
+ break;
+ case Packets.SSH_MSG_GLOBAL_REQUEST:
+ msgGlobalRequest(msg, msglen);
+ break;
+ case Packets.SSH_MSG_REQUEST_SUCCESS:
+ msgGlobalSuccess();
+ break;
+ case Packets.SSH_MSG_REQUEST_FAILURE:
+ msgGlobalFailure();
+ break;
+ default:
+ throw new IOException("Cannot handle unknown channel message " + (msg[0] & 0xff));
+ }
+ }
}