<module name="AvoidStarImport"/>\r
<module name="UpperEll"/>\r
<module name="EmptyStatement"/>\r
+ <module name="EqualsHashCode"/>\r
</module>\r
\r
</module>\r
<artifactId>org.apache.catalina.filters.CorsFilter</artifactId>
<version>7.0.42</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.thirdparty</groupId>
+ <artifactId>ganymed</artifactId>
+ <version>1.0-SNAPSHOT</version>
+ </dependency>
<!-- yang model dependencies -->
<dependency>
<groupId>org.opendaylight.yangtools.model</groupId>
import javax.management.ObjectName;
import org.opendaylight.controller.config.api.annotations.AbstractServiceInterface;
+import org.opendaylight.yangtools.concepts.Identifiable;
/**
* Each new {@link org.opendaylight.controller.config.spi.Module} can receive
*
* @see org.opendaylight.controller.config.spi.Module
*/
-public interface DependencyResolver {
+public interface DependencyResolver extends Identifiable<ModuleIdentifier> {
/**
* To be used during validation phase to validate serice interface of
for (ModuleFactory moduleFactory : toBeAdded) {
Set<? extends Module> defaultModules = moduleFactory.getDefaultModules(dependencyResolverManager, bundleContext);
for (Module module : defaultModules) {
+ // ensure default module to be registered to jmx even if its module factory does not use dependencyResolverFactory
+ DependencyResolver dependencyResolver = dependencyResolverManager.getOrCreate(module.getIdentifier());
try {
- putConfigBeanToJMXAndInternalMaps(module.getIdentifier(), module, moduleFactory, null);
+ putConfigBeanToJMXAndInternalMaps(module.getIdentifier(), module, moduleFactory, null, dependencyResolver);
} catch (InstanceAlreadyExistsException e) {
throw new IllegalStateException(e);
}
"Error while copying old configuration from %s to %s",
oldConfigBeanInfo, moduleFactory), e);
}
- putConfigBeanToJMXAndInternalMaps(moduleIdentifier, module, moduleFactory, oldConfigBeanInfo);
+ putConfigBeanToJMXAndInternalMaps(moduleIdentifier, module, moduleFactory, oldConfigBeanInfo, dependencyResolver);
}
@Override
DependencyResolver dependencyResolver = dependencyResolverManager.getOrCreate(moduleIdentifier);
Module module = moduleFactory.createModule(instanceName, dependencyResolver, bundleContext);
return putConfigBeanToJMXAndInternalMaps(moduleIdentifier, module,
- moduleFactory, null);
+ moduleFactory, null, dependencyResolver);
}
private synchronized ObjectName putConfigBeanToJMXAndInternalMaps(
ModuleIdentifier moduleIdentifier, Module module,
ModuleFactory moduleFactory,
- @Nullable ModuleInternalInfo maybeOldConfigBeanInfo)
+ @Nullable ModuleInternalInfo maybeOldConfigBeanInfo, DependencyResolver dependencyResolver)
throws InstanceAlreadyExistsException {
+
logger.debug("Adding module {} to transaction {}", moduleIdentifier, this);
if (moduleIdentifier.equals(module.getIdentifier())==false) {
throw new IllegalStateException("Incorrect name reported by module. Expected "
+ moduleIdentifier + ", got " + module.getIdentifier());
}
+ if (dependencyResolver.getIdentifier().equals(moduleIdentifier) == false ) {
+ throw new IllegalStateException("Incorrect name reported by dependency resolver. Expected "
+ + moduleIdentifier + ", got " + dependencyResolver.getIdentifier());
+ }
DynamicMBean writableDynamicWrapper = new DynamicWritableWrapper(
module, moduleIdentifier, transactionIdentifier,
readOnlyAtomicBoolean, transactionsMBeanServer,
maybeOldConfigBeanInfo, transactionModuleJMXRegistration);
dependencyResolverManager.put(moduleInternalTransactionalInfo);
- // ensure default module to be registered to jmx even if its module factory does not use dependencyResolverFactory
- dependencyResolverManager.getOrCreate(moduleIdentifier);
return writableON;
}
*/
package org.opendaylight.controller.config.manager.impl.dependencyresolver;
-import static java.lang.String.format;
-
-import java.util.HashSet;
-import java.util.LinkedHashSet;
-import java.util.Set;
-
-import javax.annotation.concurrent.GuardedBy;
-import javax.management.ObjectName;
-
import org.opendaylight.controller.config.api.DependencyResolver;
import org.opendaylight.controller.config.api.JmxAttribute;
import org.opendaylight.controller.config.api.JmxAttributeValidationException;
import org.opendaylight.controller.config.manager.impl.TransactionStatus;
import org.opendaylight.controller.config.spi.Module;
import org.opendaylight.controller.config.spi.ModuleFactory;
-import org.opendaylight.yangtools.concepts.Identifiable;
+
+import javax.annotation.concurrent.GuardedBy;
+import javax.management.ObjectName;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.Set;
+
+import static java.lang.String.format;
/**
* Protect {@link org.opendaylight.controller.config.spi.Module#getInstance()}
* during validation. Tracks dependencies for ordering purposes.
*/
final class DependencyResolverImpl implements DependencyResolver,
- Identifiable<ModuleIdentifier>, Comparable<DependencyResolverImpl> {
+ Comparable<DependencyResolverImpl> {
private final ModulesHolder modulesHolder;
private final ModuleIdentifier name;
private final TransactionStatus transactionStatus;
this.modulesHolder = modulesHolder;
}
- @Deprecated
- public ModuleIdentifier getName() {
- return name;
- }
-
/**
* {@inheritDoc}
*/
int maxDepth = 0;
LinkedHashSet<ModuleIdentifier> chainForDetectingCycles2 = new LinkedHashSet<>(
chainForDetectingCycles);
- chainForDetectingCycles2.add(impl.getName());
+ chainForDetectingCycles2.add(impl.getIdentifier());
for (ModuleIdentifier dependencyName : impl.dependencies) {
DependencyResolverImpl dependentDRI = manager
.getOrCreate(dependencyName);
*/
package org.opendaylight.controller.config.manager.impl.dependencyresolver;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import javax.annotation.concurrent.GuardedBy;
-import javax.management.InstanceAlreadyExistsException;
-
import org.opendaylight.controller.config.api.DependencyResolver;
import org.opendaylight.controller.config.api.DependencyResolverFactory;
import org.opendaylight.controller.config.api.JmxAttribute;
import org.opendaylight.controller.config.spi.Module;
import org.opendaylight.controller.config.spi.ModuleFactory;
+import javax.annotation.concurrent.GuardedBy;
+import javax.management.InstanceAlreadyExistsException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
/**
* Holds information about modules being created and destroyed within this
* transaction. Observes usage of DependencyResolver within modules to figure
List<ModuleIdentifier> result = new ArrayList<>(
moduleIdentifiersToDependencyResolverMap.size());
for (DependencyResolverImpl dri : getAllSorted()) {
- ModuleIdentifier driName = dri.getName();
+ ModuleIdentifier driName = dri.getIdentifier();
result.add(driName);
}
return result;
*/
package org.opendaylight.controller.config.manager.impl.dependencyresolver;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import javax.annotation.concurrent.GuardedBy;
-import javax.management.InstanceAlreadyExistsException;
-
import org.opendaylight.controller.config.api.JmxAttribute;
import org.opendaylight.controller.config.api.JmxAttributeValidationException;
import org.opendaylight.controller.config.api.ModuleIdentifier;
import org.opendaylight.controller.config.spi.Module;
import org.opendaylight.controller.config.spi.ModuleFactory;
+import javax.annotation.concurrent.GuardedBy;
+import javax.management.InstanceAlreadyExistsException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
/**
* Represents modules to be committed.
*/
throws InstanceAlreadyExistsException {
if (commitMap.containsKey(moduleIdentifier)) {
throw new InstanceAlreadyExistsException(
- "There is an instance registered with name "
- + moduleIdentifier);
+ "There is an instance registered with name " + moduleIdentifier);
}
}
*/
package org.opendaylight.controller.config.manager.impl.dependencyresolver;
-import java.util.Map;
-
-import javax.management.InstanceAlreadyExistsException;
-
import org.opendaylight.controller.config.api.JmxAttribute;
import org.opendaylight.controller.config.api.ModuleIdentifier;
import org.opendaylight.controller.config.manager.impl.CommitInfo;
import org.opendaylight.controller.config.spi.Module;
import org.opendaylight.controller.config.spi.ModuleFactory;
+import javax.management.InstanceAlreadyExistsException;
+import java.util.Map;
+
interface TransactionHolder {
CommitInfo toCommitInfo();
<version>2.4</version>
</dependency>
- <dependency>
+ <dependency>
<groupId>org.opendaylight.yangtools.thirdparty</groupId>
<artifactId>antlr4-runtime-osgi-nohead</artifactId>
<version>4.0</version>
<artifactId>yang-model-api</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>yang-ext</artifactId>
- </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>yang-ext</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.thirdparty</groupId>
+ <artifactId>ganymed</artifactId>
+ </dependency>
</dependencies>
</profile>
</profiles>
<Import-Package>
org.opendaylight.controller.sal.binding.api,
org.opendaylight.controller.sal.binding.api.data,
- org.opendaylight.controller.md.sal.common.api.data,
- org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev130819.flow,
+ org.opendaylight.controller.md.sal.common.api.data,
+ org.opendaylight.controller.sal.utils,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.group.config.rev131024.groups,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.buckets,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.buckets.bucket,
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819,
+ org.opendaylight.controller.clustering.services, org.opendaylight.controller.sal.core,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction,
+ org.opendaylight.controller.switchmanager,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list,
+ org.opendaylight.yang.gen.v1.urn.opendaylight.group.config.rev131024,
org.opendaylight.yangtools.concepts,
org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819,
org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819,
package org.opendaylight.controller.forwardingrulesmanager_mdsal.consumer.impl;
+import org.eclipse.osgi.framework.console.CommandProvider;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
import org.opendaylight.controller.sal.binding.api.AbstractBindingAwareProvider;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
import org.opendaylight.controller.sal.binding.api.NotificationService;
import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
+import org.opendaylight.controller.sal.core.IContainer;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+import org.opendaylight.controller.switchmanager.ISwitchManager;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.FrameworkUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class FRMConsumerImpl extends AbstractBindingAwareProvider {
+public class FRMConsumerImpl extends AbstractBindingAwareProvider implements CommandProvider{
protected static final Logger logger = LoggerFactory.getLogger(FRMConsumerImpl.class);
private static ProviderContext p_session;
private static DataBrokerService dataBrokerService;
private GroupConsumerImpl groupImplRef;
private static DataProviderService dataProviderService;
+ private static IClusterContainerServices clusterContainerService = null;
+ private static ISwitchManager switchManager;
+ private static IContainer container;
+
@Override
public void onSessionInitiated(ProviderContext session) {
FRMConsumerImpl.p_session = session;
+ if (!getDependentModule()) {
+ logger.error("Unable to fetch handlers for dependent modules");
+ System.out.println("Unable to fetch handlers for dependent modules");
+ return;
+ }
+
if (null != session) {
notificationService = session.getSALService(NotificationService.class);
if (null != dataProviderService) {
flowImplRef = new FlowConsumerImpl();
- groupImplRef = new GroupConsumerImpl();
+ // groupImplRef = new GroupConsumerImpl();
+ registerWithOSGIConsole();
}
else {
logger.error("Data Provider Service is down or NULL. " +
System.out.println("Consumer session is NULL. Please check if provider is registered");
}
+ }
+
+ public static IClusterContainerServices getClusterContainerService() {
+ return clusterContainerService;
+ }
+
+ public static void setClusterContainerService(
+ IClusterContainerServices clusterContainerService) {
+ FRMConsumerImpl.clusterContainerService = clusterContainerService;
+ }
+
+ public static ISwitchManager getSwitchManager() {
+ return switchManager;
+ }
+
+ public static void setSwitchManager(ISwitchManager switchManager) {
+ FRMConsumerImpl.switchManager = switchManager;
+ }
+
+ public static IContainer getContainer() {
+ return container;
}
- public static DataProviderService getDataProviderService() {
+ public static void setContainer(IContainer container) {
+ FRMConsumerImpl.container = container;
+ }
+
+ private void registerWithOSGIConsole() {
+ BundleContext bundleContext = FrameworkUtil.getBundle(this.getClass()).getBundleContext();
+ bundleContext.registerService(CommandProvider.class.getName(), this, null);
+ }
+
+ private boolean getDependentModule() {
+ do {
+ clusterContainerService = (IClusterContainerServices) ServiceHelper.getGlobalInstance(IClusterContainerServices.class, this);
+ try {
+ Thread.sleep(4);
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ } while(clusterContainerService == null);
+
+ do {
+
+
+ container = (IContainer) ServiceHelper.getGlobalInstance(IContainer.class, this);
+ try {
+ Thread.sleep(5);
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ } while (container == null);
+
+ do {
+ switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, container.getName(), this);
+ try {
+ Thread.sleep(5);
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ } while(null == switchManager);
+ return true;
+ }
+
+
+
+ public static DataProviderService getDataProviderService() {
return dataProviderService;
}
public FlowConsumerImpl getFlowImplRef() {
- return flowImplRef;
+ return flowImplRef;
}
public GroupConsumerImpl getGroupImplRef() {
- return groupImplRef;
+ return groupImplRef;
}
public static ProviderContext getProviderSession() {
public static DataBrokerService getDataBrokerService() {
return dataBrokerService;
}
+
+ /*
+ * OSGI COMMANDS
+ */
+ @Override
+ public String getHelp() {
+ StringBuffer help = new StringBuffer();
+ return help.toString();
+ }
}
--- /dev/null
+package org.opendaylight.controller.forwardingrulesmanager_mdsal.consumer.impl;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.buckets.bucket.Actions;
+
+public class FRMUtil {
+ private static final String NAMEREGEX = "^[a-zA-Z0-9]+$";
+ public enum operation {ADD, DELETE, UPDATE, GET};
+
+
+ public static boolean isNameValid(String name) {
+
+ // Name validation
+ if (name == null || name.trim().isEmpty() || !name.matches(NAMEREGEX)) {
+ return false;
+ }
+ return true;
+
+ }
+
+ public static boolean areActionsValid(Actions actions) {
+ // List<Action> actionList;
+ // Action actionRef;
+ // if (null != actions && null != actions.getAction()) {
+ // actionList = actions.getAction();
+
+
+
+
+ // }
+
+ return true;
+ }
+}
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.Flows;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.Flow;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInput;
+
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowAdded;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowRemoved;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.NodeFlow;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
}
listener = new FlowDataListener();
+
if (null == FRMConsumerImpl.getDataBrokerService().registerDataChangeListener(path, listener)) {
logger.error("Failed to listen on flow data modifcation events");
System.out.println("Consumer SAL Service is down or NULL.");
System.out.println("Consumer SAL Service is down or NULL.");
return;
}
- addFlowTest();
+ //addFlowTest();
System.out.println("-------------------------------------------------------------------");
allocateCaches();
commitHandler = new FlowDataCommitHandler();
private void addFlow(InstanceIdentifier<?> path, Flow dataObject) {
AddFlowInputBuilder input = new AddFlowInputBuilder();
+ List<Instruction> inst = (dataObject).getInstructions().getInstruction();
input.setNode((dataObject).getNode());
input.setPriority((dataObject).getPriority());
input.setMatch((dataObject).getMatch());
input.setCookie((dataObject).getCookie());
- input.setAction((dataObject).getAction());
+ input.setInstructions((dataObject).getInstructions());
+ dataObject.getMatch().getLayer3Match()
+ for (int i=0;i<inst.size();i++) {
+ System.out.println("i = "+ i + inst.get(i).getInstruction().toString());
+ System.out.println("i = "+ i + inst.get(i).toString());
+ }
+
+ System.out.println("Instruction list" + (dataObject).getInstructions().getInstruction().toString());
// We send flow to the sounthbound plugin
flowService.addFlow(input.build());
private void commitToPlugin(internalTransaction transaction) {
for(Entry<InstanceIdentifier<?>, Flow> entry :transaction.additions.entrySet()) {
+ System.out.println("Coming add cc in FlowDatacommitHandler");
addFlow(entry.getKey(),entry.getValue());
}
- for(@SuppressWarnings("unused") Entry<InstanceIdentifier<?>, Flow> entry :transaction.additions.entrySet()) {
+ for(@SuppressWarnings("unused") Entry<InstanceIdentifier<?>, Flow> entry :transaction.updates.entrySet()) {
+ System.out.println("Coming update cc in FlowDatacommitHandler");
// updateFlow(entry.getKey(),entry.getValue());
}
Flow original = originalSwView.get(key);
if (original != null) {
// It is update for us
+ System.out.println("Coming update in FlowDatacommitHandler");
updates.put(key, flow);
} else {
// It is addition for us
+ System.out.println("Coming add in FlowDatacommitHandler");
additions.put(key, flow);
}
}
for (DataObject dataObject : additions) {
if (dataObject instanceof NodeFlow) {
NodeRef nodeOne = createNodeRef("foo:node:1");
- // validating the dataObject here
+ // validating the dataObject here
AddFlowInputBuilder input = new AddFlowInputBuilder();
input.setNode(((NodeFlow) dataObject).getNode());
input.setNode(nodeOne);
return new NodeRef(path);
}
- /* private void loadFlowData() {
- DataModification modification = (DataModification) dataservice.beginTransaction();
- String id = "abc";
- FlowKey key = new FlowKey(id, new NodeRef());
- InstanceIdentifier<?> path1;
- FlowBuilder flow = new FlowBuilder();
- flow.setKey(key);
- path1 = InstanceIdentifier.builder().node(Flows.class).node(Flow.class, key).toInstance();
- DataObject cls = (DataObject) modification.readConfigurationData(path);
- modification.putConfigurationData(path, flow.build());
- modification.commit();
- }*/
}
package org.opendaylight.controller.forwardingrulesmanager_mdsal.consumer.impl;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
+import org.opendaylight.controller.md.sal.common.api.data.DataModification;
+import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction;
+import org.opendaylight.controller.sal.common.util.Rpcs;
+import org.opendaylight.controller.sal.core.IContainer;
+import org.opendaylight.controller.sal.core.Node;
+import org.opendaylight.controller.sal.utils.GlobalConstants;
+import org.opendaylight.controller.sal.utils.Status;
+import org.opendaylight.controller.sal.utils.StatusCode;
+import org.opendaylight.controller.switchmanager.ISwitchManager;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.config.rev131024.Groups;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.config.rev131024.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.config.rev131024.groups.GroupKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.GroupAdded;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.GroupRemoved;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.GroupUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.SalGroupListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.SalGroupService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupTypes.GroupType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.Buckets;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.buckets.Bucket;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings("unused")
public class GroupConsumerImpl {
- public GroupConsumerImpl() {
-
+
+ protected static final Logger logger = LoggerFactory.getLogger(GroupConsumerImpl.class);
+ private GroupEventListener groupEventListener = new GroupEventListener();
+ private Registration<NotificationListener> groupListener;
+ private SalGroupService groupService;
+ private GroupDataCommitHandler commitHandler;
+
+ private ConcurrentMap<GroupKey, Group> originalSwGroupView;
+ private ConcurrentMap<GroupKey, Group> installedSwGroupView;
+
+ private ConcurrentMap<Node, List<Group>> nodeGroups;
+ private ConcurrentMap<GroupKey, Group> inactiveGroups;
+
+ private IClusterContainerServices clusterGroupContainerService = null;
+ private ISwitchManager switchGroupManager;
+ private IContainer container;
+
+ public GroupConsumerImpl() {
+ InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder().node(Groups.class).toInstance();
+ groupService = FRMConsumerImpl.getProviderSession().getRpcService(SalGroupService.class);
+
+ clusterGroupContainerService = FRMConsumerImpl.getClusterContainerService();
+ switchGroupManager = FRMConsumerImpl.getSwitchManager();
+ container = FRMConsumerImpl.getContainer();
+
+ if (!(cacheStartup())) {
+ logger.error("Unanle to allocate/retrieve group cache");
+ System.out.println("Unable to allocate/retrieve group cache");
+ }
+
+ if (null == groupService) {
+ logger.error("Consumer SAL Group Service is down or NULL. FRM may not function as intended");
+ System.out.println("Consumer SAL Group Service is down or NULL.");
+ return;
+ }
+
+ // For switch events
+ groupListener = FRMConsumerImpl.getNotificationService().registerNotificationListener(groupEventListener);
+
+ if (null == groupListener) {
+ logger.error("Listener to listen on group data modifcation events");
+ System.out.println("Listener to listen on group data modifcation events.");
+ return;
+ }
+
+ commitHandler = new GroupDataCommitHandler();
+ FRMConsumerImpl.getDataProviderService().registerCommitHandler(path, commitHandler);
}
+
+ private boolean allocateGroupCaches() {
+ if (this.clusterGroupContainerService == null) {
+ logger.warn("Group: Un-initialized clusterGroupContainerService, can't create cache");
+ return false;
+ }
+
+ try {
+ clusterGroupContainerService.createCache("frm.originalSwGroupView",
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+
+ clusterGroupContainerService.createCache("frm.installedSwGroupView",
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+
+ clusterGroupContainerService.createCache("frm.inactiveGroups",
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+
+ clusterGroupContainerService.createCache("frm.nodeGroups",
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+
+//TODO for cluster mode
+ /* clusterGroupContainerService.createCache(WORK_STATUS_CACHE,
+ EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL, IClusterServices.cacheMode.ASYNC));
+
+ clusterGroupContainerService.createCache(WORK_ORDER_CACHE,
+ EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL, IClusterServices.cacheMode.ASYNC));*/
+
+ } catch (CacheConfigException cce) {
+ logger.error("Group CacheConfigException");
+ return false;
+
+ } catch (CacheExistException cce) {
+ logger.error(" Group CacheExistException");
+ }
+
+ return true;
+ }
+
+ private void nonClusterGroupObjectCreate() {
+ originalSwGroupView = new ConcurrentHashMap<GroupKey, Group>();
+ installedSwGroupView = new ConcurrentHashMap<GroupKey, Group>();
+ nodeGroups = new ConcurrentHashMap<Node, List<Group>>();
+ inactiveGroups = new ConcurrentHashMap<GroupKey, Group>();
+ }
+
+ @SuppressWarnings({ "unchecked" })
+ private boolean retrieveGroupCaches() {
+ ConcurrentMap<?, ?> map;
+
+ if (this.clusterGroupContainerService == null) {
+ logger.warn("Group: un-initialized clusterGroupContainerService, can't retrieve cache");
+ nonClusterGroupObjectCreate();
+ return false;
+ }
+
+ map = clusterGroupContainerService.getCache("frm.originalSwGroupView");
+ if (map != null) {
+ originalSwGroupView = (ConcurrentMap<GroupKey, Group>) map;
+ } else {
+ logger.error("Retrieval of cache(originalSwGroupView) failed");
+ return false;
+ }
+
+ map = clusterGroupContainerService.getCache("frm.installedSwGroupView");
+ if (map != null) {
+ installedSwGroupView = (ConcurrentMap<GroupKey, Group>) map;
+ } else {
+ logger.error("Retrieval of cache(installedSwGroupView) failed");
+ return false;
+ }
+
+ map = clusterGroupContainerService.getCache("frm.inactiveGroups");
+ if (map != null) {
+ inactiveGroups = (ConcurrentMap<GroupKey, Group>) map;
+ } else {
+ logger.error("Retrieval of cache(inactiveGroups) failed");
+ return false;
+ }
+
+ map = clusterGroupContainerService.getCache("frm.nodeGroups");
+ if (map != null) {
+ nodeGroups = (ConcurrentMap<Node, List<Group>>) map;
+ } else {
+ logger.error("Retrieval of cache(nodeGroup) failed");
+ return false;
+ }
+
+ return true;
+ }
+
+ private boolean cacheStartup() {
+ if (allocateGroupCaches()) {
+ if (retrieveGroupCaches()) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ public Status validateGroup(Group group, FRMUtil.operation operation) {
+ String containerName;
+ String groupName;
+ Iterator<Bucket> bucketIterator;
+ boolean returnResult;
+ Buckets groupBuckets;
+
+ if (null != group) {
+ containerName = group.getContainerName();
+
+ if (null == containerName) {
+ containerName = GlobalConstants.DEFAULT.toString();
+ }
+ else if (!FRMUtil.isNameValid(containerName)) {
+ logger.error("Container Name is invalid %s" + containerName);
+ return new Status(StatusCode.BADREQUEST, "Container Name is invalid");
+ }
+
+ groupName = group.getGroupName();
+ if (!FRMUtil.isNameValid(groupName)) {
+ logger.error("Group Name is invalid %s" + groupName);
+ return new Status(StatusCode.BADREQUEST, "Group Name is invalid");
+ }
+
+ returnResult = doesGroupEntryExists(group.getKey(), groupName, containerName);
+
+ if (FRMUtil.operation.ADD == operation && returnResult) {
+ logger.error("Record with same Group Name exists");
+ return new Status(StatusCode.BADREQUEST, "Group record exists");
+ }
+ else if (!returnResult) {
+ logger.error("Group record does not exist");
+ return new Status(StatusCode.BADREQUEST, "Group record does not exist");
+ }
+
+ if (!(group.getGroupType().getIntValue() >= GroupType.GroupAll.getIntValue() &&
+ group.getGroupType().getIntValue() <= GroupType.GroupFf.getIntValue())) {
+ logger.error("Invalid Group type %d" + group.getGroupType().getIntValue());
+ return new Status(StatusCode.BADREQUEST, "Invalid Group type");
+ }
+
+ groupBuckets = group.getBuckets();
+
+ if (null != groupBuckets && null != groupBuckets.getBucket()) {
+ bucketIterator = groupBuckets.getBucket().iterator();
+
+ while (bucketIterator.hasNext()) {
+ if(!(FRMUtil.areActionsValid(bucketIterator.next().getActions()))) {
+ logger.error("Error in action bucket");
+ return new Status(StatusCode.BADREQUEST, "Invalid Group bucket contents");
+ }
+ }
+ }
+ }
+
+ return new Status(StatusCode.SUCCESS);
+
+ }
+
+ private boolean doesGroupEntryExists(GroupKey key, String groupName, String containerName) {
+ if (! originalSwGroupView.containsKey(key)) {
+ return false;
+ }
+
+ for (ConcurrentMap.Entry<GroupKey, Group> entry : originalSwGroupView.entrySet()) {
+ if (entry.getValue().getGroupName().equals(groupName)) {
+ if (entry.getValue().getContainerName().equals(containerName)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+
+ /**
+ * Update Group entries to the southbound plugin/inventory and our internal database
+ *
+ * @param path
+ * @param dataObject
+ */
+ private Status updateGroup(InstanceIdentifier<?> path, Group groupUpdateDataObject) {
+ GroupKey groupKey = groupUpdateDataObject.getKey();
+ Status groupOperationStatus = validateGroup(groupUpdateDataObject, FRMUtil.operation.UPDATE);
+
+ if (!groupOperationStatus.isSuccess()) {
+ logger.error("Group data object validation failed %s" + groupUpdateDataObject.getGroupName());
+ return groupOperationStatus;
+ }
+
+ originalSwGroupView.remove(groupKey);
+ originalSwGroupView.put(groupKey, groupUpdateDataObject);
+
+ if (groupUpdateDataObject.isInstall()) {
+ UpdateGroupInputBuilder groupData = new UpdateGroupInputBuilder();
+ //TODO how to get original group and modified group.
+
+ if (installedSwGroupView.containsKey(groupKey)) {
+ installedSwGroupView.remove(groupKey);
+ }
+
+ installedSwGroupView.put(groupKey, groupUpdateDataObject);
+ groupService.updateGroup(groupData.build());
+ }
+
+ return groupOperationStatus;
+ }
+
+ /**
+ * Adds Group to the southbound plugin and our internal database
+ *
+ * @param path
+ * @param dataObject
+ */
+ private Status addGroup(InstanceIdentifier<?> path, Group groupAddDataObject) {
+ GroupKey groupKey = groupAddDataObject.getKey();
+ Status groupOperationStatus = validateGroup(groupAddDataObject, FRMUtil.operation.ADD);
+
+ if (!groupOperationStatus.isSuccess()) {
+ logger.error("Group data object validation failed %s" + groupAddDataObject.getGroupName());
+ return groupOperationStatus;
+ }
+ validateGroup(groupAddDataObject, FRMUtil.operation.ADD);
+ originalSwGroupView.put(groupKey, groupAddDataObject);
+
+ if (groupAddDataObject.isInstall()) {
+ AddGroupInputBuilder groupData = new AddGroupInputBuilder();
+ groupData.setBuckets(groupAddDataObject.getBuckets());
+ groupData.setContainerName(groupAddDataObject.getContainerName());
+ groupData.setGroupId(groupAddDataObject.getGroupId());
+ groupData.setGroupType(groupAddDataObject.getGroupType());
+ groupData.setNode(groupAddDataObject.getNode());
+ installedSwGroupView.put(groupKey, groupAddDataObject);
+ groupService.addGroup(groupData.build());
+ }
+
+ return groupOperationStatus;
+ }
+
+ private RpcResult<Void> commitToPlugin(internalTransaction transaction) {
+ for(Entry<InstanceIdentifier<?>, Group> entry :transaction.additions.entrySet()) {
+
+ if (!addGroup(entry.getKey(),entry.getValue()).isSuccess()) {
+ return Rpcs.getRpcResult(false, null, null);
+ }
+ }
+ for(@SuppressWarnings("unused") Entry<InstanceIdentifier<?>, Group> entry :transaction.additions.entrySet()) {
+
+ if (!updateGroup(entry.getKey(),entry.getValue()).isSuccess()) {
+ return Rpcs.getRpcResult(false, null, null);
+ }
+ }
+
+ for(InstanceIdentifier<?> removal : transaction.removals) {
+ // removeFlow(removal);
+ }
+
+ return Rpcs.getRpcResult(true, null, null);
+ }
+
+ private final class GroupDataCommitHandler implements DataCommitHandler<InstanceIdentifier<?>, DataObject> {
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public DataCommitTransaction requestCommit(DataModification<InstanceIdentifier<?>, DataObject> modification) {
+ // We should verify transaction
+ System.out.println("Coming in FlowDatacommitHandler");
+ internalTransaction transaction = new internalTransaction(modification);
+ transaction.prepareUpdate();
+ return transaction;
+ }
+ }
+
+ private final class internalTransaction implements DataCommitTransaction<InstanceIdentifier<?>, DataObject> {
+
+ private final DataModification<InstanceIdentifier<?>, DataObject> modification;
+
+ @Override
+ public DataModification<InstanceIdentifier<?>, DataObject> getModification() {
+ return modification;
+ }
+
+ public internalTransaction(DataModification<InstanceIdentifier<?>, DataObject> modification) {
+ this.modification = modification;
+ }
+
+ Map<InstanceIdentifier<?>, Group> additions = new HashMap<>();
+ Map<InstanceIdentifier<?>, Group> updates = new HashMap<>();
+ Set<InstanceIdentifier<?>> removals = new HashSet<>();
+
+ /**
+ * We create a plan which flows will be added, which will be updated and
+ * which will be removed based on our internal state.
+ *
+ */
+ void prepareUpdate() {
+
+ Set<Entry<InstanceIdentifier<?>, DataObject>> puts = modification.getUpdatedConfigurationData().entrySet();
+ for (Entry<InstanceIdentifier<?>, DataObject> entry : puts) {
+ if (entry.getValue() instanceof Group) {
+ Group group = (Group) entry.getValue();
+ preparePutEntry(entry.getKey(), group);
+ }
+
+ }
+
+ removals = modification.getRemovedConfigurationData();
+ }
+
+ private void preparePutEntry(InstanceIdentifier<?> key, Group group) {
+
+ Group original = originalSwGroupView.get(key);
+ if (original != null) {
+ // It is update for us
+
+ updates.put(key, group);
+ } else {
+ // It is addition for us
+
+ additions.put(key, group);
+ }
+ }
+
+ /**
+ * We are OK to go with execution of plan
+ *
+ */
+ @Override
+ public RpcResult<Void> finish() throws IllegalStateException {
+
+ RpcResult<Void> rpcStatus = commitToPlugin(this);
+ // We return true if internal transaction is successful.
+ // return Rpcs.getRpcResult(true, null, Collections.emptySet());
+ return rpcStatus;
+ }
+
+ /**
+ *
+ * We should rollback our preparation
+ *
+ */
+ @Override
+ public RpcResult<Void> rollback() throws IllegalStateException {
+ // NOOP - we did not modified any internal state during
+ // requestCommit phase
+ // return Rpcs.getRpcResult(true, null, Collections.emptySet());
+ return Rpcs.getRpcResult(true, null, null);
+
+ }
+
+ }
+
+
+ final class GroupEventListener implements SalGroupListener {
+
+ List<GroupAdded> addedGroups = new ArrayList<>();
+ List<GroupRemoved> removedGroups = new ArrayList<>();
+ List<GroupUpdated> updatedGroups = new ArrayList<>();
+
+
+ @Override
+ public void onGroupAdded(GroupAdded notification) {
+ System.out.println("added Group..........................");
+ addedGroups.add(notification);
+ }
+
+ @Override
+ public void onGroupRemoved(GroupRemoved notification) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void onGroupUpdated(GroupUpdated notification) {
+ // TODO Auto-generated method stub
+
+ }
+ }
}
type string;
}
+ leaf barrier {
+ type boolean;
+ }
+
container buckets {
list bucket {
key "order";
type meter-id;
}
+ leaf install {
+ type boolean;
+ }
+ leaf meter-name {
+ type string;
+ }
+
+ leaf container-name {
+ type string;
+ }
+
container meter-band-headers {
list meter-band-header {
key "order";
}
typedef port-state {
- type enumeration {
- enum link-down;
- enum blocked;
- enum live;
+ type enumeration {
+ enum link-down;
+ enum blocked;
+ enum live;
}
}
uses common-port;
leaf mask {
- type uint32;
+ type port-config;
description "Bitmap of OFPPC-* flags to be changed";
}
leaf port-name {
type string;
- }
+ }
+
+ leaf barrier {
+ type boolean;
+ }
}
}
}
}
grouping meter-entry {
-
leaf node {
type inv:node-ref;
}
-
uses meter:meter;
}
--- /dev/null
+module flow-capable-transaction {
+ namespace "urn:opendaylight:flow:transaction";
+ prefix type;
+
+ import opendaylight-inventory {prefix inv; revision-date "2013-08-19";}
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+
+ revision "2013-11-03" {
+ description "Initial revision";
+ }
+
+ typedef transaction-id {
+ type uint64;
+ }
+
+ grouping transaction-aware {
+ leaf transaction-id {
+ type transaction-id;
+ }
+ }
+
+ rpc get-next-transaction-id {
+ input {
+ leaf node {
+ ext:context-reference "inv:node-context";
+ type inv:node-ref;
+ }
+ }
+ output {
+ uses transaction-aware;
+ }
+ }
+
+ // Barier request?
+ rpc finish-transaction {
+ input {
+ leaf node {
+ ext:context-reference "inv:node-context";
+ type inv:node-ref;
+ }
+ leaf transaction-id {
+ type transaction-id;
+ }
+ }
+ }
+}
\ No newline at end of file
import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
import ietf-inet-types {prefix inet;revision-date 2010-09-24;}
import opendaylight-group-types {prefix group-type;revision-date 2013-10-18;}
+ import flow-capable-transaction {prefix tr;}
revision "2013-09-18" {
description "Initial revision of group service";
}
grouping node-group {
- uses "inv:node-context-ref";
-
+ uses "inv:node-context-ref";
uses group-type:group;
}
rpc add-group {
input {
uses node-group;
+ uses tr:transaction-aware;
+ }
+ output {
+ uses tr:transaction-aware;
}
}
rpc remove-group {
input {
uses group-update;
+ uses tr:transaction-aware;
+ }
+ output {
+ uses tr:transaction-aware;
}
}
rpc update-group {
input {
uses group-update;
+ uses tr:transaction-aware;
+ }
+ output {
+ uses tr:transaction-aware;
}
}
import yang-ext {prefix ext; revision-date "2013-07-09";}
import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
import opendaylight-meter-types {prefix meter-type;revision-date "2013-09-18";}
+ import flow-capable-transaction {prefix tr;}
revision "2013-09-18" {
description "Initial revision of meter service";
rpc add-meter {
input {
uses node-meter;
+ uses tr:transaction-aware;
+ }
+ output {
+ uses tr:transaction-aware;
}
}
rpc remove-meter {
input {
uses node-meter;
+ uses tr:transaction-aware;
+ }
+ output {
+ uses tr:transaction-aware;
}
}
rpc update-meter {
input {
uses meter-update;
+ uses tr:transaction-aware;
+ }
+ output {
+ uses tr:transaction-aware;
}
}
--- /dev/null
+module sal-port {
+ namespace "urn:opendaylight:port:service";
+ prefix port;
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+ import opendaylight-port-types {prefix port-type;revision-date "2013-09-25";}
+
+ revision "2013-11-07" {
+ description "Initial revision of port service";
+ }
+
+ grouping node-port {
+ uses "inv:node-context-ref";
+
+ uses port-type:ofp-port-mod;
+ }
+
+ /** Base configuration structure **/
+ grouping port-update {
+ uses "inv:node-context-ref";
+
+ container original-port {
+ uses port-type:ofp-port-mod;
+ }
+ container updated-port {
+ uses port-type:ofp-port-mod;
+ }
+ }
+
+ rpc update-port {
+ input {
+ uses port-update;
+ }
+ }
+
+ rpc get-port {
+ output {
+ uses port-type:flow-capable-port;
+ }
+ }
+
+ notification port-removed {
+ uses node-port;
+ }
+}
\ No newline at end of file
import yang-ext {prefix ext; revision-date "2013-07-09";}
import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
import opendaylight-table-types {prefix table-type;revision-date "2013-10-26";}
+ import flow-capable-transaction {prefix tr;}
revision "2013-10-26" {
description "Initial revision of table service";
- }
+ }
/** Base configuration structure **/
grouping table-update {
+ uses "inv:node-context-ref";
container original-table {
uses table-type:table-features;
}
rpc update-table {
input {
- leaf node {
- ext:context-reference "inv:node-context";
- type inv:node-ref;
- }
uses table-update;
+ uses tr:transaction-aware;
+ }
+ output {
+ uses tr:transaction-aware;
}
}
}
\ No newline at end of file
<artifactId>netty-handler</artifactId>
<version>${netconf.netty.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.thirdparty</groupId>
+ <artifactId>ganymed</artifactId>
+ </dependency>
</dependencies>
<build>
org.opendaylight.controller.config.stat,
com.google.common.base,
com.google.common.collect,
+ ch.ethz.ssh2,
io.netty.buffer,
io.netty.channel,
io.netty.channel.socket,
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.handler.ssh;
+
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelFutureListener;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelOutboundHandlerAdapter;
+import io.netty.channel.ChannelPromise;
+import java.io.IOException;
+import java.net.SocketAddress;
+import org.opendaylight.controller.netconf.util.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.handler.ssh.client.Invoker;
+import org.opendaylight.controller.netconf.util.handler.ssh.client.SshClient;
+import org.opendaylight.controller.netconf.util.handler.ssh.client.SshClientAdapter;
+import org.opendaylight.controller.netconf.util.handler.ssh.virtualsocket.VirtualSocket;
+
+/**
+ * Netty SSH handler class. Acts as interface between Netty and SSH library. All standard Netty message handling
+ * stops at instance of this class. All downstream events are handed of to wrapped {@link org.opendaylight.controller.netconf.util.handler.ssh.client.SshClientAdapter};
+ */
+public class SshHandler extends ChannelOutboundHandlerAdapter {
+ private final VirtualSocket virtualSocket = new VirtualSocket();
+ private final SshClientAdapter sshClientAdapter;
+
+ public SshHandler(AuthenticationHandler authenticationHandler, Invoker invoker) throws IOException {
+ SshClient sshClient = new SshClient(virtualSocket, authenticationHandler);
+ this.sshClientAdapter = new SshClientAdapter(sshClient, invoker);
+ }
+
+ @Override
+ public void handlerAdded(ChannelHandlerContext ctx){
+ if (ctx.channel().pipeline().get("socket") == null) {
+ ctx.channel().pipeline().addFirst("socket", virtualSocket);
+ }
+ }
+
+ @Override
+ public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
+ if (ctx.channel().pipeline().get("socket") != null) {
+ ctx.channel().pipeline().remove("socket");
+ }
+ }
+
+ @Override
+ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
+ this.sshClientAdapter.write((String) msg);
+ }
+
+ @Override
+ public void connect(final ChannelHandlerContext ctx,
+ SocketAddress remoteAddress,
+ SocketAddress localAddress,
+ ChannelPromise promise) throws Exception {
+ ctx.connect(remoteAddress, localAddress, promise);
+
+ promise.addListener(new ChannelFutureListener() {
+ public void operationComplete(ChannelFuture channelFuture) throws Exception {
+ sshClientAdapter.start(ctx);
+ }}
+ );
+ }
+
+ @Override
+ public void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
+ sshClientAdapter.stop(promise);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.handler.ssh.authentication;
+
+import ch.ethz.ssh2.Connection;
+
+import java.io.IOException;
+
+/**
+ * Class providing authentication facility to SSH handler.
+ */
+public abstract class AuthenticationHandler {
+ public abstract void authenticate(Connection connection) throws IOException;
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.handler.ssh.authentication;
+
+import ch.ethz.ssh2.Connection;
+
+import java.io.IOException;
+
+/**
+ * Class Providing username/password authentication option to {@link org.opendaylight.controller.netconf.util.handler.ssh.SshHandler}
+ */
+public class LoginPassword extends AuthenticationHandler {
+ private final String username;
+ private final String password;
+
+ public LoginPassword(String username, String password) {
+ this.username = username;
+ this.password = password;
+ }
+
+ @Override
+ public void authenticate(Connection connection) throws IOException {
+ boolean isAuthenticated = connection.authenticateWithPassword(username, password);
+
+ if (isAuthenticated == false) throw new IOException("Authentication failed.");
+ }
+}
--- /dev/null
+package org.opendaylight.controller.netconf.util.handler.ssh.client;
+
+import java.io.IOException;
+
+/**
+ * Abstract class providing mechanism of invoking various SSH level services.
+ * Class is not allowed to be extended, as it provides its own implementations via instance initiators.
+ */
+public abstract class Invoker {
+ private boolean invoked = false;
+
+ private Invoker(){}
+
+ protected boolean isInvoked() {
+ return invoked;
+ }
+
+ abstract void invoke(SshSession session) throws IOException;
+
+ /**
+ * Invoker implementation to invokes subsystem SSH service.
+ *
+ * @param subsystem
+ * @return
+ */
+ public static Invoker subsystem(final String subsystem) {
+ return new Invoker() {
+ @Override
+ void invoke(SshSession session) throws IOException {
+ if (isInvoked() == true) throw new IllegalStateException("Already invoked.");
+
+ session.startSubSystem(subsystem);
+ }
+ };
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.handler.ssh.client;
+
+import ch.ethz.ssh2.Connection;
+import ch.ethz.ssh2.Session;
+import ch.ethz.ssh2.channel.Channel;
+import org.opendaylight.controller.netconf.util.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.handler.ssh.virtualsocket.VirtualSocket;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+
+/**
+ * Wrapper class around GANYMED SSH java library.
+ */
+public class SshClient {
+ private final VirtualSocket socket;
+ private final Map<Integer, SshSession> openSessions = new HashMap();
+ private final AuthenticationHandler authenticationHandler;
+ private Connection connection;
+
+ public SshClient(VirtualSocket socket,
+ AuthenticationHandler authenticationHandler) throws IOException {
+ this.socket = socket;
+ this.authenticationHandler = authenticationHandler;
+ }
+
+ public SshSession openSession() throws IOException {
+ if(connection == null) connect();
+
+ Session session = connection.openSession();
+ SshSession sshSession = new SshSession(session);
+ openSessions.put(openSessions.size(), sshSession);
+
+ return sshSession;
+ }
+
+ private void connect() throws IOException {
+ connection = new Connection(socket);
+ connection.connect();
+ authenticationHandler.authenticate(connection);
+ }
+
+ public void closeSession(SshSession session) {
+ if( session.getState() == Channel.STATE_OPEN
+ || session.getState() == Channel.STATE_OPENING) {
+ session.session.close();
+ }
+ }
+
+ public void close() {
+ for(SshSession session : openSessions.values()) closeSession(session);
+
+ openSessions.clear();
+
+ if(connection != null) connection.close();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.handler.ssh.client;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelPromise;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.opendaylight.controller.netconf.util.handler.ssh.virtualsocket.VirtualSocketException;
+
+/**
+ * Worker thread class. Handles all downstream and upstream events in SSH Netty pipeline.
+ */
+public class SshClientAdapter implements Runnable {
+ private final SshClient sshClient;
+ private final Invoker invoker;
+
+ private SshSession session;
+ private InputStream stdOut;
+ private InputStream stdErr;
+ private OutputStream stdIn;
+
+ private ChannelHandlerContext ctx;
+ private ChannelPromise disconnectPromise;
+
+ private final AtomicBoolean stopRequested = new AtomicBoolean(false);
+
+ private final Object lock = new Object();
+
+ public SshClientAdapter(SshClient sshClient,
+ Invoker invoker) {
+ this.sshClient = sshClient;
+ this.invoker = invoker;
+ }
+
+ public void run() {
+ try {
+ session = sshClient.openSession();
+ invoker.invoke(session);
+
+ stdOut = session.getStdout();
+ stdErr = session.getStderr();
+
+ synchronized(lock) {
+ stdIn = session.getStdin();
+ }
+
+ while (stopRequested.get() == false) {
+ byte[] readBuff = new byte[1024];
+ int c = stdOut.read(readBuff);
+
+ byte[] tranBuff = new byte[c];
+ System.arraycopy(readBuff, 0, tranBuff, 0, c);
+
+ ByteBuf byteBuf = Unpooled.buffer(c);
+ byteBuf.writeBytes(tranBuff);
+ ctx.fireChannelRead(byteBuf);
+ }
+
+ } catch (VirtualSocketException e) {
+ // Netty closed connection prematurely.
+ // Just pass and move on.
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ } finally {
+ sshClient.close();
+
+ synchronized (lock) {
+ if(disconnectPromise != null) ctx.disconnect(disconnectPromise);
+ }
+ }
+ }
+
+ // TODO: needs rework to match netconf framer API.
+ public void write(String message) throws IOException {
+ synchronized (lock) {
+ if (stdIn == null) throw new IllegalStateException("StdIn not available");
+ }
+ stdIn.write(message.getBytes());
+ stdIn.flush();
+ }
+
+ public void stop(ChannelPromise promise) {
+ synchronized (lock) {
+ stopRequested.set(true);
+ disconnectPromise = promise;
+ }
+ }
+
+ public void start(ChannelHandlerContext ctx) {
+ if(this.ctx != null) return; // context is already associated.
+
+ this.ctx = ctx;
+ new Thread(this).start();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.handler.ssh.client;
+
+import ch.ethz.ssh2.Session;
+import ch.ethz.ssh2.StreamGobbler;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ * Wrapper class for proprietary SSH sessions implementations
+ */
+public class SshSession {
+ final Session session;
+
+ public SshSession(Session session) {
+ this.session = session;
+ }
+
+ public void execCommand(String cmd) throws IOException {
+ session.execCommand(cmd);
+ }
+
+ public void execCommand(String cmd, String charsetName) throws IOException {
+ session.execCommand(cmd, charsetName);
+ }
+
+ public void startShell() throws IOException {
+ session.startShell();
+ }
+
+ public void startSubSystem(String name) throws IOException {
+ session.startSubSystem(name);
+ }
+
+ public int getState() {
+ return session.getState();
+ }
+
+ public InputStream getStdout() {
+ return new StreamGobbler(session.getStdout());
+ }
+
+ public InputStream getStderr() {
+ return session.getStderr();
+ }
+
+ public OutputStream getStdin() {
+ return session.getStdin();
+ }
+
+ public int waitUntilDataAvailable(long timeout) throws IOException {
+ return session.waitUntilDataAvailable(timeout);
+ }
+
+ public int waitForCondition(int condition_set, long timeout) {
+ return session.waitForCondition(condition_set, timeout);
+ }
+
+ public Integer getExitStatus() {
+ return session.getExitStatus();
+ }
+
+ public String getExitSignal() {
+ return session.getExitSignal();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.handler.ssh.virtualsocket;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandler;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Class provides {@link InputStream} functionality to users of virtual socket.
+ */
+public class ChannelInputStream extends InputStream implements ChannelInboundHandler {
+ private final Object lock = new Object();
+ private final ByteBuf bb = Unpooled.buffer();
+
+ @Override
+ public int read(byte b[], int off, int len) throws IOException {
+ if (b == null) {
+ throw new NullPointerException();
+ } else if (off < 0 || len < 0 || len > b.length - off) {
+ throw new IndexOutOfBoundsException();
+ } else if (len == 0) {
+ return 0;
+ }
+
+ int bytesRead = 1;
+ synchronized (lock) {
+ int c = read();
+
+ b[off] = (byte)c;
+
+ if(this.bb.readableBytes() == 0) return bytesRead;
+
+ int ltr = len-1;
+ ltr = (ltr <= bb.readableBytes()) ? ltr : bb.readableBytes();
+
+ bb.readBytes(b, 1, ltr);
+ bytesRead += ltr;
+ }
+ return bytesRead;
+ }
+
+ @Override
+ public int read() throws IOException {
+ synchronized (lock) {
+ while (this.bb.readableBytes() == 0) {
+ try {
+ lock.wait();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ return this.bb.readByte() & 0xFF;
+ }
+ }
+
+ @Override
+ public int available() throws IOException {
+ synchronized (lock) {
+ return this.bb.readableBytes();
+ }
+ }
+
+ public void channelRegistered(ChannelHandlerContext ctx)
+ throws Exception {
+ ctx.fireChannelRegistered();
+ }
+
+ public void channelUnregistered(ChannelHandlerContext ctx)
+ throws Exception {
+ ctx.fireChannelUnregistered();
+ }
+
+ public void channelActive(ChannelHandlerContext ctx)
+ throws Exception {
+ ctx.fireChannelActive();
+ }
+
+ public void channelInactive(ChannelHandlerContext ctx)
+ throws Exception {
+ ctx.fireChannelInactive();
+ }
+
+ public void channelRead(ChannelHandlerContext ctx, Object o)
+ throws Exception {
+ synchronized(lock) {
+ this.bb.discardReadBytes();
+ this.bb.writeBytes((ByteBuf) o);
+ lock.notifyAll();
+ }
+ }
+
+ public void channelReadComplete(ChannelHandlerContext ctx)
+ throws Exception {
+ ctx.fireChannelReadComplete();
+ }
+
+ public void userEventTriggered(ChannelHandlerContext ctx, Object o)
+ throws Exception {
+ ctx.fireUserEventTriggered(o);
+ }
+
+ public void channelWritabilityChanged(ChannelHandlerContext ctx)
+ throws Exception {
+ ctx.fireChannelWritabilityChanged();
+ }
+
+ public void handlerAdded(ChannelHandlerContext ctx)
+ throws Exception {
+ }
+
+ public void handlerRemoved(ChannelHandlerContext ctx)
+ throws Exception {
+ }
+
+ public void exceptionCaught(ChannelHandlerContext ctx, Throwable throwable)
+ throws Exception {
+ ctx.fireExceptionCaught(throwable);
+ }
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.handler.ssh.virtualsocket;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelOutboundHandler;
+import io.netty.channel.ChannelPromise;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.SocketAddress;
+
+/**
+ * Class provides {@link OutputStream) functionality to users of virtual socket.
+ */
+public class ChannelOutputStream extends OutputStream implements ChannelOutboundHandler {
+ private final Object lock = new Object();
+ private ByteBuf buff = Unpooled.buffer();
+ private ChannelHandlerContext ctx;
+
+ @Override
+ public void flush() throws IOException {
+ synchronized(lock) {
+ ctx.writeAndFlush(buff).awaitUninterruptibly();
+ buff = Unpooled.buffer();
+ }
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ synchronized(lock) {
+ buff.writeByte(b);
+ }
+ }
+
+ public void bind(ChannelHandlerContext ctx, SocketAddress localAddress,
+ ChannelPromise promise) throws Exception {
+ ctx.bind(localAddress, promise);
+ }
+
+ public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress,
+ SocketAddress localAddress, ChannelPromise promise)
+ throws Exception {
+ this.ctx = ctx;
+ ctx.connect(remoteAddress, localAddress, promise);
+ }
+
+ public void disconnect(ChannelHandlerContext ctx, ChannelPromise promise)
+ throws Exception {
+ ctx.disconnect(promise);
+ }
+
+ public void close(ChannelHandlerContext ctx, ChannelPromise promise)
+ throws Exception {
+ ctx.close(promise);
+ }
+
+ public void deregister(ChannelHandlerContext ctx, ChannelPromise channelPromise)
+ throws Exception {
+ ctx.deregister(channelPromise);
+ }
+
+ public void read(ChannelHandlerContext ctx)
+ throws Exception {
+ ctx.read();
+ }
+
+ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise)
+ throws Exception {
+ // pass
+ }
+
+ public void flush(ChannelHandlerContext ctx)
+ throws Exception {
+ // pass
+ }
+
+ public void handlerAdded(ChannelHandlerContext ctx)
+ throws Exception {
+ }
+
+ public void handlerRemoved(ChannelHandlerContext ctx)
+ throws Exception {
+ }
+
+ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
+ throws Exception {
+ ctx.fireExceptionCaught(cause);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.handler.ssh.virtualsocket;
+
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelHandlerContext;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.net.SocketAddress;
+import java.net.SocketException;
+import java.nio.channels.SocketChannel;
+
+/**
+ * Handler class providing Socket functionality to OIO client application. By using VirtualSocket user can
+ * use OIO application in asynchronous environment and NIO EventLoop. Using VirtualSocket OIO applications
+ * are able to use full potential of NIO environment.
+ */
+public class VirtualSocket extends Socket implements ChannelHandler {
+ private final ChannelInputStream chis = new ChannelInputStream();
+ private final ChannelOutputStream chos = new ChannelOutputStream();
+ private ChannelHandlerContext ctx;
+
+
+ public InputStream getInputStream() {
+ return this.chis;
+ }
+
+ public OutputStream getOutputStream() {
+ return this.chos;
+ }
+
+ public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
+ this.ctx = ctx;
+
+ if (ctx.channel().pipeline().get("outputStream") == null) {
+ ctx.channel().pipeline().addFirst("outputStream", chos);
+ }
+
+ if (ctx.channel().pipeline().get("inputStream") == null) {
+ ctx.channel().pipeline().addFirst("inputStream", chis);
+ }
+ }
+
+ public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
+ if (ctx.channel().pipeline().get("outputStream") != null) {
+ ctx.channel().pipeline().remove("outputStream");
+ }
+
+ if (ctx.channel().pipeline().get("inputStream") != null) {
+ ctx.channel().pipeline().remove("inputStream");
+ }
+ }
+
+ public void exceptionCaught(ChannelHandlerContext ctx, Throwable throwable) throws Exception {
+ ctx.fireExceptionCaught(throwable);
+ }
+
+ public VirtualSocket() {super();}
+
+ @Override
+ public void connect(SocketAddress endpoint) throws IOException {}
+
+ @Override
+ public void connect(SocketAddress endpoint, int timeout) throws IOException {}
+
+ @Override
+ public void bind(SocketAddress bindpoint) throws IOException {}
+
+ @Override
+ public InetAddress getInetAddress() {
+ InetSocketAddress isa = getInetSocketAddress();
+
+ if (isa == null) throw new VirtualSocketException();
+
+ return getInetSocketAddress().getAddress();
+ }
+
+ @Override
+ public InetAddress getLocalAddress() {return null;}
+
+ @Override
+ public int getPort() {
+ return getInetSocketAddress().getPort();
+ }
+
+ private InetSocketAddress getInetSocketAddress() {
+ return (InetSocketAddress)getRemoteSocketAddress();
+ }
+
+ @Override
+ public int getLocalPort() {return -1;}
+
+ @Override
+ public SocketAddress getRemoteSocketAddress() {
+ return this.ctx.channel().remoteAddress();
+ }
+
+ @Override
+ public SocketAddress getLocalSocketAddress() {
+ return this.ctx.channel().localAddress();
+ }
+
+ @Override
+ public SocketChannel getChannel() {return null;}
+
+ @Override
+ public void setTcpNoDelay(boolean on) throws SocketException {}
+
+ @Override
+ public boolean getTcpNoDelay() throws SocketException {return false;}
+
+ @Override
+ public void setSoLinger(boolean on, int linger) throws SocketException {}
+
+ @Override
+ public int getSoLinger() throws SocketException {return -1;}
+
+ @Override
+ public void sendUrgentData(int data) throws IOException {}
+
+ @Override
+ public void setOOBInline(boolean on) throws SocketException {}
+
+ @Override
+ public boolean getOOBInline() throws SocketException {return false;}
+
+ @Override
+ public synchronized void setSoTimeout(int timeout) throws SocketException {}
+
+ @Override
+ public synchronized int getSoTimeout() throws SocketException {return -1;}
+
+ @Override
+ public synchronized void setSendBufferSize(int size) throws SocketException {}
+
+ @Override
+ public synchronized int getSendBufferSize() throws SocketException {return -1;}
+
+ @Override
+ public synchronized void setReceiveBufferSize(int size) throws SocketException {}
+
+ @Override
+ public synchronized int getReceiveBufferSize() throws SocketException {return -1;}
+
+ @Override
+ public void setKeepAlive(boolean on) throws SocketException {}
+
+ @Override
+ public boolean getKeepAlive() throws SocketException {return false;}
+
+ @Override
+ public void setTrafficClass(int tc) throws SocketException {}
+
+ @Override
+ public int getTrafficClass() throws SocketException {return -1;}
+
+ @Override
+ public void setReuseAddress(boolean on) throws SocketException {}
+
+ @Override
+ public boolean getReuseAddress() throws SocketException {return false;}
+
+ @Override
+ public synchronized void close() throws IOException {}
+
+ @Override
+ public void shutdownInput() throws IOException {}
+
+ @Override
+ public void shutdownOutput() throws IOException {}
+
+ @Override
+ public String toString() {
+ return "Virtual socket InetAdress["+getInetAddress()+"], Port["+getPort()+"]";
+ }
+
+ @Override
+ public boolean isConnected() {return false;}
+
+ @Override
+ public boolean isBound() {return false;}
+
+ @Override
+ public boolean isClosed() {return false;}
+
+ @Override
+ public boolean isInputShutdown() {return false;}
+
+ @Override
+ public boolean isOutputShutdown() {return false;}
+
+ @Override
+ public void setPerformancePreferences(int connectionTime, int latency, int bandwidth) {}
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.handler.ssh.virtualsocket;
+
+/**
+ * Exception class which provides notification about exceptional situations at the virtual socket layer.
+ */
+public class VirtualSocketException extends RuntimeException {
+}
<module>config-persister-impl</module>
<module>netconf-mapping-api</module>
<module>netconf-client</module>
+ <module>../../third-party/ganymed</module>
</modules>
<profiles>
}\r
\r
public boolean isEnableDHCP() {\r
- if (enableDHCP == null)\r
+ if (enableDHCP == null) {\r
return true;\r
+ }\r
return enableDHCP;\r
}\r
\r
public Boolean getEnableDHCP() { return enableDHCP; }\r
\r
public void setEnableDHCP(Boolean newValue) {\r
- this.enableDHCP = newValue;\r
+ enableDHCP = newValue;\r
}\r
\r
public String getTenantID() {\r
Iterator<String> i = fields.iterator();\r
while (i.hasNext()) {\r
String s = i.next();\r
- if (s.equals("id"))\r
+ if (s.equals("id")) {\r
ans.setSubnetUUID(this.getSubnetUUID());\r
- if (s.equals("network_id"))\r
+ }\r
+ if (s.equals("network_id")) {\r
ans.setNetworkUUID(this.getNetworkUUID());\r
- if (s.equals("name"))\r
+ }\r
+ if (s.equals("name")) {\r
ans.setName(this.getName());\r
- if (s.equals("ip_version"))\r
+ }\r
+ if (s.equals("ip_version")) {\r
ans.setIpVersion(this.getIpVersion());\r
- if (s.equals("cidr"))\r
+ }\r
+ if (s.equals("cidr")) {\r
ans.setCidr(this.getCidr());\r
- if (s.equals("gateway_ip"))\r
+ }\r
+ if (s.equals("gateway_ip")) {\r
ans.setGatewayIP(this.getGatewayIP());\r
+ }\r
if (s.equals("dns_nameservers")) {\r
List<String> nsList = new ArrayList<String>();\r
nsList.addAll(this.getDnsNameservers());\r
hRoutes.addAll(this.getHostRoutes());\r
ans.setHostRoutes(hRoutes);\r
}\r
- if (s.equals("enable_dhcp"))\r
+ if (s.equals("enable_dhcp")) {\r
ans.setEnableDHCP(this.getEnableDHCP());\r
- if (s.equals("tenant_id"))\r
+ }\r
+ if (s.equals("tenant_id")) {\r
ans.setTenantID(this.getTenantID());\r
+ }\r
}\r
return ans;\r
}\r
try {\r
SubnetUtils util = new SubnetUtils(cidr);\r
SubnetInfo info = util.getInfo();\r
- if (!info.getNetworkAddress().equals(info.getAddress()))\r
+ if (!info.getNetworkAddress().equals(info.getAddress())) {\r
return false;\r
+ }\r
} catch (Exception e) {\r
return false;\r
}\r
Iterator<NeutronSubnet_IPAllocationPool> i = allocationPools.iterator();\r
while (i.hasNext()) {\r
NeutronSubnet_IPAllocationPool pool = i.next();\r
- if (pool.contains(gatewayIP))\r
+ if (pool.contains(gatewayIP)) {\r
return true;\r
+ }\r
}\r
return false;\r
}\r
\r
- public void initDefaults() {\r
- if (enableDHCP == null)\r
+ public boolean initDefaults() {\r
+ if (enableDHCP == null) {\r
enableDHCP = true;\r
- if (ipVersion == null)\r
+ }\r
+ if (ipVersion == null) {\r
ipVersion = 4;\r
+ }\r
gatewayIPAssigned = false;\r
dnsNameservers = new ArrayList<String>();\r
allocationPools = new ArrayList<NeutronSubnet_IPAllocationPool>();\r
try {\r
SubnetUtils util = new SubnetUtils(cidr);\r
SubnetInfo info = util.getInfo();\r
- if (gatewayIP == null)\r
+ if (gatewayIP == null) {\r
gatewayIP = info.getLowAddress();\r
+ }\r
if (allocationPools.size() < 1) {\r
NeutronSubnet_IPAllocationPool source =\r
new NeutronSubnet_IPAllocationPool(info.getLowAddress(),\r
allocationPools = source.splitPool(gatewayIP);\r
}\r
} catch (Exception e) {\r
+ return false;\r
}\r
+ return true;\r
}\r
\r
public List<NeutronPort> getPortsInSubnet() {\r
* available allocation pools or not\r
*/\r
public boolean isIPInUse(String ipAddress) {\r
- if (ipAddress.equals(gatewayIP) && !gatewayIPAssigned )\r
+ if (ipAddress.equals(gatewayIP) && !gatewayIPAssigned ) {\r
return false;\r
+ }\r
Iterator<NeutronSubnet_IPAllocationPool> i = allocationPools.iterator();\r
while (i.hasNext()) {\r
NeutronSubnet_IPAllocationPool pool = i.next();\r
- if (pool.contains(ipAddress))\r
+ if (pool.contains(ipAddress)) {\r
return false;\r
+ }\r
}\r
return true;\r
}\r
}\r
else\r
if (NeutronSubnet_IPAllocationPool.convert(pool.getPoolStart()) <\r
- NeutronSubnet_IPAllocationPool.convert(ans))\r
+ NeutronSubnet_IPAllocationPool.convert(ans)) {\r
ans = pool.getPoolStart();\r
+ }\r
}\r
return ans;\r
}\r
if (pool.contains(ipAddress)) {\r
List<NeutronSubnet_IPAllocationPool> pools = pool.splitPool(ipAddress);\r
newList.addAll(pools);\r
- } else\r
+ } else {\r
newList.add(pool);\r
+ }\r
}\r
}\r
allocationPools = newList;\r
NeutronSubnet_IPAllocationPool pool = i.next();\r
long lIP = NeutronSubnet_IPAllocationPool.convert(pool.getPoolStart());\r
long hIP = NeutronSubnet_IPAllocationPool.convert(pool.getPoolEnd());\r
- if (sIP+1 == lIP)\r
+ if (sIP+1 == lIP) {\r
hPool = pool;\r
- if (sIP-1 == hIP)\r
+ }\r
+ if (sIP-1 == hIP) {\r
lPool = pool;\r
+ }\r
}\r
//if (lPool == NULL and hPool == NULL) create new pool where low = ip = high\r
- if (lPool == null && hPool == null)\r
+ if (lPool == null && hPool == null) {\r
allocationPools.add(new NeutronSubnet_IPAllocationPool(ipAddress,ipAddress));\r
+ }\r
//if (lPool == NULL and hPool != NULL) change low address of hPool to ipAddr\r
- if (lPool == null && hPool != null)\r
+ if (lPool == null && hPool != null) {\r
hPool.setPoolStart(ipAddress);\r
+ }\r
//if (lPool != NULL and hPool == NULL) change high address of lPool to ipAddr\r
- if (lPool != null && hPool == null)\r
+ if (lPool != null && hPool == null) {\r
lPool.setPoolEnd(ipAddress);\r
+ }\r
//if (lPool != NULL and hPool != NULL) remove lPool and hPool and create new pool\r
// where low address = lPool.low address and high address = hPool.high Address\r
if (lPool != null && hPool != null) {\r
import java.util.Map;
import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
* @param nodeId Node Identifier of the node with the management session.
* @param bridgeName Name / Identifier for a bridge to be created.
* @param bridgeConfigs Additional Bridge Configurations.
+ * It takes in complex structures under the ConfigConstants.CUSTOM key.
+ * The use-cases are documented under wiki.opendaylight.org project pages:
+ * https://wiki.opendaylight.org/view/OVSDB_Integration:Mininet_OVSDB_Tutorial
*/
@Path("/bridge/{nodeType}/{nodeId}/{bridgeName}")
throw new ResourceNotFoundException(status.getDescription());
}
+
+ /**
+ * Remove a Bridge.
+ * <pre>
+ *
+ * Example :
+ *
+ * Request :
+ * DELETE
+ * http://localhost:8080/controller/nb/v2/networkconfig/bridgedomain/bridge/STUB/mgmt1/bridge1
+ *
+ *</pre>
+ * @param nodeType Node Type of the node with the management session.
+ * @param nodeId Node Identifier of the node with the management session.
+ * @param bridgeName Name / Identifier for a bridge to be deleted.
+ */
+
+ @Path("/bridge/{nodeType}/{nodeId}/{bridgeName}")
+ @DELETE
+ @StatusCodes( { @ResponseCode(code = 200, condition = "Bridge deleted successfully"),
+ @ResponseCode(code = 404, condition = "Could not delete Bridge"),
+ @ResponseCode(code = 412, condition = "Failed to delete Bridge due to an exception"),
+ @ResponseCode(code = 503, condition = "Bridge Domain Configuration Service not available")} )
+
+ public Response deleteBridge(
+ @PathParam(value = "nodeType") String nodeType,
+ @PathParam(value = "nodeId") String nodeId,
+ @PathParam(value = "bridgeName") String name) {
+
+ IBridgeDomainConfigService configurationService = getConfigurationService();
+ if (configurationService == null) {
+ throw new ServiceUnavailableException("IBridgeDomainConfigService not available.");
+ }
+
+ Node node = Node.fromString(nodeType, nodeId);
+ Status status = null;
+ try {
+ status = configurationService.deleteBridgeDomain(node, name);
+ if (status.getCode().equals(StatusCode.SUCCESS)) {
+ return Response.status(Response.Status.OK).build();
+ }
+ } catch (Throwable t) {
+ return Response.status(Response.Status.PRECONDITION_FAILED).build();
+ }
+ throw new ResourceNotFoundException(status.getDescription());
+ }
+
/**
* Add a Port to a Bridge
* <pre>
* @param bridgeName Name / Identifier of the bridge to which a Port is being added.
* @param portName Name / Identifier of a Port that is being added to a bridge.
* @param portConfigs Additional Port Configurations.
+ * It takes in complex structures under the ConfigConstants.CUSTOM key.
+ * The use-cases are documented under wiki.opendaylight.org project pages :
+ * https://wiki.opendaylight.org/view/OVSDB_Integration:Mininet_OVSDB_Tutorial
*/
@Path("/port/{nodeType}/{nodeId}/{bridgeName}/{portName}")
throw new ResourceNotFoundException(status.getDescription());
}
+ /**
+ * Remove a Port from a Bridge
+ * <pre>
+ *
+ * Example :
+ *
+ * Request :
+ * DELETE
+ * http://localhost:8080/controller/nb/v2/networkconfig/bridgedomain/port/STUB/mgmt1/bridge1/port1
+ *
+ *</pre>
+ * @param nodeType Node Type of the node with the management session.
+ * @param nodeId Node Identifier of the node with the management session.
+ * @param bridgeName Name / Identifier of the bridge to which a Port is being added.
+ * @param portName Name / Identifier of a Port that is being deleted from a bridge.
+ */
+
+ @Path("/port/{nodeType}/{nodeId}/{bridgeName}/{portName}")
+ @DELETE
+ @Consumes({MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON})
+ @StatusCodes( { @ResponseCode(code = 200, condition = "Port deleted successfully"),
+ @ResponseCode(code = 404, condition = "Could not delete Port to the Bridge"),
+ @ResponseCode(code = 412, condition = "Failed to delete Port due to an exception"),
+ @ResponseCode(code = 503, condition = "Bridge Domain Configuration Service not available")} )
+
+ public Response deletePort(
+ @PathParam(value = "nodeType") String nodeType,
+ @PathParam(value = "nodeId") String nodeId,
+ @PathParam(value = "bridgeName") String bridge,
+ @PathParam(value = "portName") String port) {
+
+ IBridgeDomainConfigService configurationService = getConfigurationService();
+ if (configurationService == null) {
+ throw new ServiceUnavailableException("IBridgeDomainConfigService not available.");
+ }
+
+ Node node = Node.fromString(nodeType, nodeId);
+ Status status = null;
+ try {
+ status = configurationService.deletePort(node, bridge, port);
+ if (status.getCode().equals(StatusCode.SUCCESS)) {
+ return Response.status(Response.Status.OK).build();
+ }
+ } catch (Throwable t) {
+ return Response.status(Response.Status.PRECONDITION_FAILED).build();
+ }
+ throw new ResourceNotFoundException(status.getDescription());
+ }
+
private Map<ConfigConstants, Object> buildConfig(Map<String, Object> rawConfigs) {
if (rawConfigs == null) return null;
Map<ConfigConstants, Object> configs = new HashMap<ConfigConstants, Object>();
</init-param>
<init-param>
<param-name>cors.allowed.methods</param-name>
- <param-value>GET,POST,HEAD,OPTIONS,PUT</param-value>
+ <param-value>GET,POST,DELETE,HEAD,OPTIONS,PUT</param-value>
</init-param>
<init-param>
<param-name>cors.allowed.headers</param-name>
import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;\r
import org.opendaylight.controller.networkconfig.neutron.NeutronSubnet;\r
import org.opendaylight.controller.northbound.commons.RestMessages;\r
+import org.opendaylight.controller.northbound.commons.exception.InternalServerErrorException;\r
import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;\r
import org.opendaylight.controller.sal.utils.ServiceHelper;\r
\r
(queryGatewayIP == null || queryGatewayIP.equals(oSS.getGatewayIP())) &&\r
(queryEnableDHCP == null || queryEnableDHCP.equals(oSS.getEnableDHCP())) &&\r
(queryTenantID == null || queryTenantID.equals(oSS.getTenantID()))) {\r
- if (fields.size() > 0)\r
+ if (fields.size() > 0) {\r
ans.add(extractFields(oSS,fields));\r
- else\r
+ } else {\r
ans.add(oSS);\r
+ }\r
}\r
}\r
//TODO: apply pagination to results\r
throw new ServiceUnavailableException("Subnet CRUD Interface "\r
+ RestMessages.SERVICEUNAVAILABLE.toString());\r
}\r
- if (!subnetInterface.subnetExists(subnetUUID))\r
+ if (!subnetInterface.subnetExists(subnetUUID)) {\r
return Response.status(404).build();\r
+ }\r
if (fields.size() > 0) {\r
NeutronSubnet ans = subnetInterface.getSubnet(subnetUUID);\r
return Response.status(200).entity(\r
new NeutronSubnetRequest(extractFields(ans, fields))).build();\r
- } else\r
+ } else {\r
return Response.status(200).entity(\r
new NeutronSubnetRequest(subnetInterface.getSubnet(subnetUUID))).build();\r
+ }\r
}\r
\r
/**\r
* and that the gateway IP doesn't overlap with the allocation pools\r
* *then* add the subnet to the cache\r
*/\r
- if (subnetInterface.subnetExists(singleton.getID()))\r
+ if (subnetInterface.subnetExists(singleton.getID())) {\r
return Response.status(400).build();\r
- if (!networkInterface.networkExists(singleton.getNetworkUUID()))\r
+ }\r
+ if (!networkInterface.networkExists(singleton.getNetworkUUID())) {\r
return Response.status(404).build();\r
- if (!singleton.isValidCIDR())\r
+ }\r
+ if (!singleton.isValidCIDR()) {\r
return Response.status(400).build();\r
- singleton.initDefaults();\r
- if (singleton.gatewayIP_Pool_overlap())\r
+ }\r
+ if (!singleton.initDefaults()) {\r
+ throw new InternalServerErrorException("subnet object could not be initialized properly");\r
+ }\r
+ if (singleton.gatewayIP_Pool_overlap()) {\r
return Response.status(409).build();\r
+ }\r
Object[] instances = ServiceHelper.getGlobalInstances(INeutronSubnetAware.class, this, null);\r
if (instances != null) {\r
for (Object instance : instances) {\r
INeutronSubnetAware service = (INeutronSubnetAware) instance;\r
int status = service.canCreateSubnet(singleton);\r
- if (status < 200 || status > 299)\r
+ if (status < 200 || status > 299) {\r
return Response.status(status).build();\r
+ }\r
}\r
}\r
subnetInterface.addSubnet(singleton);\r
* and that the bulk request doesn't already contain a subnet with this id\r
*/\r
\r
- test.initDefaults();\r
- if (subnetInterface.subnetExists(test.getID()))\r
+ if (!test.initDefaults()) {\r
+ throw new InternalServerErrorException("subnet object could not be initialized properly");\r
+ }\r
+ if (subnetInterface.subnetExists(test.getID())) {\r
return Response.status(400).build();\r
- if (testMap.containsKey(test.getID()))\r
+ }\r
+ if (testMap.containsKey(test.getID())) {\r
return Response.status(400).build();\r
+ }\r
testMap.put(test.getID(), test);\r
- if (!networkInterface.networkExists(test.getNetworkUUID()))\r
+ if (!networkInterface.networkExists(test.getNetworkUUID())) {\r
return Response.status(404).build();\r
- if (!test.isValidCIDR())\r
+ }\r
+ if (!test.isValidCIDR()) {\r
return Response.status(400).build();\r
- if (test.gatewayIP_Pool_overlap())\r
+ }\r
+ if (test.gatewayIP_Pool_overlap()) {\r
return Response.status(409).build();\r
+ }\r
if (instances != null) {\r
for (Object instance : instances) {\r
INeutronSubnetAware service = (INeutronSubnetAware) instance;\r
int status = service.canCreateSubnet(test);\r
- if (status < 200 || status > 299)\r
+ if (status < 200 || status > 299) {\r
return Response.status(status).build();\r
+ }\r
}\r
}\r
}\r
/*\r
* verify the subnet exists and there is only one delta provided\r
*/\r
- if (!subnetInterface.subnetExists(subnetUUID))\r
+ if (!subnetInterface.subnetExists(subnetUUID)) {\r
return Response.status(404).build();\r
- if (!input.isSingleton())\r
+ }\r
+ if (!input.isSingleton()) {\r
return Response.status(400).build();\r
+ }\r
NeutronSubnet delta = input.getSingleton();\r
NeutronSubnet original = subnetInterface.getSubnet(subnetUUID);\r
\r
*/\r
if (delta.getID() != null || delta.getTenantID() != null ||\r
delta.getIpVersion() != null || delta.getCidr() != null ||\r
- delta.getAllocationPools() != null)\r
+ delta.getAllocationPools() != null) {\r
return Response.status(400).build();\r
+ }\r
\r
Object[] instances = ServiceHelper.getGlobalInstances(INeutronSubnetAware.class, this, null);\r
if (instances != null) {\r
for (Object instance : instances) {\r
INeutronSubnetAware service = (INeutronSubnetAware) instance;\r
int status = service.canUpdateSubnet(delta, original);\r
- if (status < 200 || status > 299)\r
+ if (status < 200 || status > 299) {\r
return Response.status(status).build();\r
+ }\r
}\r
}\r
\r
/*\r
* verify the subnet exists and it isn't currently in use\r
*/\r
- if (!subnetInterface.subnetExists(subnetUUID))\r
+ if (!subnetInterface.subnetExists(subnetUUID)) {\r
return Response.status(404).build();\r
- if (subnetInterface.subnetInUse(subnetUUID))\r
+ }\r
+ if (subnetInterface.subnetInUse(subnetUUID)) {\r
return Response.status(409).build();\r
+ }\r
NeutronSubnet singleton = subnetInterface.getSubnet(subnetUUID);\r
Object[] instances = ServiceHelper.getGlobalInstances(INeutronSubnetAware.class, this, null);\r
if (instances != null) {\r
for (Object instance : instances) {\r
INeutronSubnetAware service = (INeutronSubnetAware) instance;\r
int status = service.canDeleteSubnet(singleton);\r
- if (status < 200 || status > 299)\r
+ if (status < 200 || status > 299) {\r
return Response.status(status).build();\r
+ }\r
}\r
}\r
\r
Set<Node> nodes = connectionManager.getLocalNodes();
List<NodeJsonBean> result = new LinkedList<NodeJsonBean>();
+ if (nodes == null) {
+ return result;
+ }
for (Node node : nodes) {
Description descriptionProperty = (Description) switchManager.getNodeProp(node, "description");
- String description = descriptionProperty.getValue();
+ String description = node.toString();
+ if (descriptionProperty != null) {
+ description = descriptionProperty.getValue();
+ }
NodeJsonBean nodeBean = new NodeJsonBean();
nodeBean.setNodeId(node.getNodeIDString());
nodeBean.setNodeType(node.getType());
return "forward:" + "/";
}
-}
\ No newline at end of file
+}