This review will be checked-in after 2617 review is done.
Added Flow Add/delete RPC methods.
Signed-off-by: Tony Tkacik <ttkacik@cisco.com>
Signed-off-by: Gaurav Bhagwani <gaurav.bhagwani@ericsson.com>
Change-Id: I01f65c956adb5b9d28e08f3fec2985023ee7f2e4
<extensions>true</extensions>
<configuration>
<instructions>
- <Import-Package>
- org.opendaylight.controller.sal.binding.api,
- org.opendaylight.controller.sal.binding.api.data,
- org.opendaylight.controller.md.sal.common.api.data,
- org.opendaylight.controller.sal.utils,
- org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow,
- org.opendaylight.yang.gen.v1.urn.opendaylight.group.config.rev131024.groups,
- org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918,
- org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018,
- org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group,
- org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.buckets,
- org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.buckets.bucket,
- org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819,
- org.opendaylight.controller.clustering.services, org.opendaylight.controller.sal.core,
- org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction,
- org.opendaylight.controller.switchmanager,
- org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list,
- org.opendaylight.yang.gen.v1.urn.opendaylight.group.config.rev131024,
- org.opendaylight.yangtools.concepts,
- org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819,
- org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819,
- org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes,
- org.opendaylight.controller.sal.common.util,
- org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows,
- org.opendaylight.yangtools.yang.common,
- org.opendaylight.yangtools.yang.binding,
- org.apache.commons.lang3.builder,
- org.apache.commons.lang3.tuple,
- org.apache.felix.dm,
- org.slf4j,
- org.eclipse.osgi.framework.console,
- org.osgi.framework,
- javax.net.ssl
- </Import-Package>
-
-
<Embed-Transitive>
false
</Embed-Transitive>
<dependency>
<groupId>equinoxSDK381</groupId>
<artifactId>org.eclipse.osgi</artifactId>
- </dependency>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ <version>1.0-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>flow-management-compatibility</artifactId>
+ <version>1.0-SNAPSHOT</version>
+ </dependency>
<dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-binding-api</artifactId>
- <version>1.0-SNAPSHOT</version>
- </dependency>
- <dependency>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-flow-service</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>
- <dependency>
+ <dependency>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-flow-management</artifactId>
<version>1.0-SNAPSHOT</version>
<version>1.0-SNAPSHOT</version>
<scope>provided</scope>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>clustering.services</artifactId>
- <version>0.4.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>0.5.1-SNAPSHOT</version>
- </dependency>
</dependencies>
</project>
-
/*
* Copyright (c) 2013 Ericsson , Inc. and others. All rights reserved.
*
package org.opendaylight.controller.forwardingrulesmanager_mdsal.consumer.impl;
-
import org.eclipse.osgi.framework.console.CommandProvider;
import org.opendaylight.controller.clustering.services.IClusterContainerServices;
import org.opendaylight.controller.sal.binding.api.AbstractBindingAwareProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
-public class FRMConsumerImpl extends AbstractBindingAwareProvider implements CommandProvider{
- protected static final Logger logger = LoggerFactory.getLogger(FRMConsumerImpl.class);
- private static ProviderContext p_session;
- private static DataBrokerService dataBrokerService;
- private static NotificationService notificationService;
+public class FRMConsumerImpl extends AbstractBindingAwareProvider implements CommandProvider {
+ protected static final Logger logger = LoggerFactory.getLogger(FRMConsumerImpl.class);
+ private static ProviderContext p_session;
+ private static DataBrokerService dataBrokerService;
+ private static NotificationService notificationService;
private FlowConsumerImpl flowImplRef;
- private GroupConsumerImpl groupImplRef;
- private static DataProviderService dataProviderService;
-
- private static IClusterContainerServices clusterContainerService = null;
- private static ISwitchManager switchManager;
- private static IContainer container;
-
- @Override
+ private GroupConsumerImpl groupImplRef;
+ private static DataProviderService dataProviderService;
+
+ private static IClusterContainerServices clusterContainerService = null;
+ private static ISwitchManager switchManager;
+ private static IContainer container;
+
+ @Override
public void onSessionInitiated(ProviderContext session) {
-
+
FRMConsumerImpl.p_session = session;
-
+
if (!getDependentModule()) {
logger.error("Unable to fetch handlers for dependent modules");
System.out.println("Unable to fetch handlers for dependent modules");
return;
}
-
+
if (null != session) {
- notificationService = session.getSALService(NotificationService.class);
-
- if (null != notificationService) {
- dataBrokerService = session.getSALService(DataBrokerService.class);
-
- if (null != dataBrokerService) {
- dataProviderService = session.getSALService(DataProviderService.class);
-
- if (null != dataProviderService) {
- flowImplRef = new FlowConsumerImpl();
- // groupImplRef = new GroupConsumerImpl();
- registerWithOSGIConsole();
- }
- else {
- logger.error("Data Provider Service is down or NULL. " +
- "Accessing data from configuration data store will not be possible");
- System.out.println("Data Broker Service is down or NULL.");
- }
-
- }
- else {
- logger.error("Data Broker Service is down or NULL.");
- System.out.println("Data Broker Service is down or NULL.");
- }
- }
- else {
- logger.error("Notification Service is down or NULL.");
- System.out.println("Notification Service is down or NULL.");
- }
- }
- else {
- logger.error("Consumer session is NULL. Please check if provider is registered");
- System.out.println("Consumer session is NULL. Please check if provider is registered");
+ notificationService = session.getSALService(NotificationService.class);
+
+ if (null != notificationService) {
+ dataBrokerService = session.getSALService(DataBrokerService.class);
+
+ if (null != dataBrokerService) {
+ dataProviderService = session.getSALService(DataProviderService.class);
+
+ if (null != dataProviderService) {
+ flowImplRef = new FlowConsumerImpl();
+ // groupImplRef = new GroupConsumerImpl();
+ registerWithOSGIConsole();
+ } else {
+ logger.error("Data Provider Service is down or NULL. "
+ + "Accessing data from configuration data store will not be possible");
+ System.out.println("Data Broker Service is down or NULL.");
+ }
+
+ } else {
+ logger.error("Data Broker Service is down or NULL.");
+ System.out.println("Data Broker Service is down or NULL.");
+ }
+ } else {
+ logger.error("Notification Service is down or NULL.");
+ System.out.println("Notification Service is down or NULL.");
+ }
+ } else {
+ logger.error("Consumer session is NULL. Please check if provider is registered");
+ System.out.println("Consumer session is NULL. Please check if provider is registered");
}
-
+
}
-
- public static IClusterContainerServices getClusterContainerService() {
+
+ public static IClusterContainerServices getClusterContainerService() {
return clusterContainerService;
}
- public static void setClusterContainerService(
- IClusterContainerServices clusterContainerService) {
+ public static void setClusterContainerService(IClusterContainerServices clusterContainerService) {
FRMConsumerImpl.clusterContainerService = clusterContainerService;
}
BundleContext bundleContext = FrameworkUtil.getBundle(this.getClass()).getBundleContext();
bundleContext.registerService(CommandProvider.class.getName(), this, null);
}
-
- private boolean getDependentModule() {
- do {
- clusterContainerService = (IClusterContainerServices) ServiceHelper.getGlobalInstance(IClusterContainerServices.class, this);
- try {
- Thread.sleep(4);
- } catch (InterruptedException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- } while(clusterContainerService == null);
-
- do {
-
-
- container = (IContainer) ServiceHelper.getGlobalInstance(IContainer.class, this);
- try {
- Thread.sleep(5);
- } catch (InterruptedException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- } while (container == null);
-
- do {
- switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, container.getName(), this);
- try {
- Thread.sleep(5);
- } catch (InterruptedException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- } while(null == switchManager);
- return true;
- }
-
+ private boolean getDependentModule() {
+ do {
+ clusterContainerService = (IClusterContainerServices) ServiceHelper.getGlobalInstance(
+ IClusterContainerServices.class, this);
+ try {
+ Thread.sleep(4);
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ } while (clusterContainerService == null);
+
+ do {
+
+ container = (IContainer) ServiceHelper.getGlobalInstance(IContainer.class, this);
+ try {
+ Thread.sleep(5);
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ } while (container == null);
+
+ do {
+ switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, container.getName(), this);
+ try {
+ Thread.sleep(5);
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ } while (null == switchManager);
+ return true;
+ }
public static DataProviderService getDataProviderService() {
- return dataProviderService;
- }
-
- public FlowConsumerImpl getFlowImplRef() {
- return flowImplRef;
- }
-
- public GroupConsumerImpl getGroupImplRef() {
- return groupImplRef;
- }
-
- public static ProviderContext getProviderSession() {
- return p_session;
- }
-
- public static NotificationService getNotificationService() {
- return notificationService;
- }
-
- public static DataBrokerService getDataBrokerService() {
- return dataBrokerService;
- }
-
- /*
+ return dataProviderService;
+ }
+
+ public FlowConsumerImpl getFlowImplRef() {
+ return flowImplRef;
+ }
+
+ public GroupConsumerImpl getGroupImplRef() {
+ return groupImplRef;
+ }
+
+ public static ProviderContext getProviderSession() {
+ return p_session;
+ }
+
+ public static NotificationService getNotificationService() {
+ return notificationService;
+ }
+
+ public static DataBrokerService getDataBrokerService() {
+ return dataBrokerService;
+ }
+
+ /*
* OSGI COMMANDS
*/
@Override
}
}
-
package org.opendaylight.controller.forwardingrulesmanager_mdsal.consumer.impl;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.buckets.bucket.Actions;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.opendaylight.controller.sal.utils.IPProtocols;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.PortNumber;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Uri;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.ControllerAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.OutputAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.PushMplsAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.PushPbbAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.PushVlanAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.SetDlDstAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.SetDlSrcAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.SetQueueAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.SetTpDstAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.SetTpSrcAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.SetVlanIdAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.SetVlanPcpAction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.NodeFlow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Instructions;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.ApplyActions;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.ClearActions;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.GoToTable;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.instruction.WriteActions;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.types.rev130827.VlanId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.types.rev130827.VlanPcp;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.EthernetMatch;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.IpMatch;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.VlanMatch;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
public class FRMUtil {
+ protected static final Logger logger = LoggerFactory.getLogger(FRMUtil.class);
private static final String NAMEREGEX = "^[a-zA-Z0-9]+$";
- public enum operation {ADD, DELETE, UPDATE, GET};
-
-
+
+ public static enum operation {
+ ADD, DELETE, UPDATE, GET
+ };
+
public static boolean isNameValid(String name) {
-
- // Name validation
+
+ // Name validation
if (name == null || name.trim().isEmpty() || !name.matches(NAMEREGEX)) {
return false;
}
return true;
-
+
+ }
+
+ public static boolean validateMatch(NodeFlow flow) {
+ Match match = flow.getMatch();
+ if (match != null) {
+ EthernetMatch ethernetmatch = match.getEthernetMatch();
+ IpMatch ipmatch = match.getIpMatch();
+ VlanMatch vlanmatch = match.getVlanMatch();
+ match.getIcmpv4Match();
+
+ if (ethernetmatch != null) {
+ if ((ethernetmatch.getEthernetSource() != null)
+ && !isL2AddressValid(ethernetmatch.getEthernetSource().toString())) {
+
+ logger.error("Ethernet source address %s is not valid. Example: 00:05:b9:7c:81:5f",
+ ethernetmatch.getEthernetSource());
+ return false;
+ }
+
+ if ((ethernetmatch.getEthernetDestination() != null)
+ && !isL2AddressValid(ethernetmatch.getEthernetDestination().toString())) {
+ logger.error("Ethernet destination address %s is not valid. Example: 00:05:b9:7c:81:5f",
+ ethernetmatch.getEthernetDestination());
+ return false;
+ }
+
+ if (ethernetmatch.getEthernetType() != null) {
+ int type = Integer.decode(ethernetmatch.getEthernetType().toString());
+ if ((type < 0) || (type > 0xffff)) {
+ logger.error("Ethernet type is not valid");
+ return false;
+ }
+ }
+ } else if (ipmatch != null) {
+ if (ipmatch.getIpProtocol() != null && isProtocolValid(ipmatch.getIpProtocol().toString())) {
+ logger.error("Protocol is not valid");
+ return false;
+ }
+ } else if (vlanmatch != null) {
+ if (vlanmatch.getVlanId() != null && isVlanIdValid(vlanmatch.getVlanId().toString())) {
+ logger.error("Vlan ID is not in the range 0 - 4095");
+ return false;
+ }
+
+ if (vlanmatch.getVlanPcp() != null && isVlanPriorityValid(vlanmatch.getVlanPcp().toString())) {
+ logger.error("Vlan priority is not in the range 0 - 7");
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+ public static boolean validateActions(List<Action> actions) {
+
+ if (actions == null || actions.isEmpty()) {
+ logger.error("Actions value is null or empty");
+ return false;
+ }
+
+ for (Action curaction : actions) {
+ org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.Action action = curaction
+ .getAction();
+ if (action instanceof ControllerAction) {
+ Integer length = ((ControllerAction) action).getMaxLength();
+ if (length < 0 || length > 65294) {
+ logger.error("Controller: MaxLength is not valid");
+ return false;
+ }
+ } else if (action instanceof OutputAction) {
+ Integer length = ((OutputAction) action).getMaxLength();
+ List<Uri> outputnodeconnector = ((OutputAction) action).getOutputNodeConnector();
+ if (length < 0 || length > 65294) {
+ logger.error("OutputAction: MaxLength is not valid");
+ return false;
+ }
+ if (outputnodeconnector != null) {
+ // TODO
+ }
+ } else if (action instanceof PushMplsAction) {
+ Integer ethertype = ((PushMplsAction) action).getEthernetType();
+ if (ethertype != null && ethertype != 0x8847 && ethertype != 0x8848) {
+ logger.error("Ether Type is not valid for PushMplsAction");
+ return false;
+ }
+ } else if (action instanceof PushPbbAction) {
+ Integer ethertype = ((PushPbbAction) action).getEthernetType();
+ if (ethertype != null && ethertype != 0x88E7) {
+ logger.error("Ether type is not valid for PushPbbAction");
+ return false;
+ }
+ } else if (action instanceof PushVlanAction) {
+ Integer ethertype = ((PushVlanAction) action).getEthernetType();
+ if (ethertype != null && ethertype != 0x8100 && ethertype != 0x88a8) {
+ logger.error("Ether Type is not valid for PushVlanAction");
+ return false;
+ }
+ } else if (action instanceof SetDlDstAction || action instanceof SetDlSrcAction) {
+ MacAddress address = ((SetDlDstAction) action).getAddress();
+ if (address != null && !isL2AddressValid(address.toString())) {
+ logger.error("SetDlDstAction: Address not valid");
+ return false;
+ }
+ } else if (action instanceof SetDlSrcAction) {
+ MacAddress address = ((SetDlSrcAction) action).getAddress();
+ if (address != null && !isL2AddressValid(address.toString())) {
+ logger.error("SetDlSrcAction: Address not valid");
+ return false;
+ }
+ } else if (action instanceof SetQueueAction) {
+ String queue = ((SetQueueAction) action).getQueue();
+ if (queue != null && !isQueueValid(queue)) {
+ logger.error("Queue Id not valid");
+ return false;
+ }
+ } else if (action instanceof SetTpDstAction) {
+ PortNumber port = ((SetTpDstAction) action).getPort();
+ if (port != null && !isPortValid(port)) {
+ logger.error("Port not valid");
+ }
+ } else if (action instanceof SetTpSrcAction) {
+ PortNumber port = ((SetTpSrcAction) action).getPort();
+ if (port != null && !isPortValid(port)) {
+ logger.error("Port not valid");
+ }
+ } else if (action instanceof SetVlanIdAction) {
+ VlanId vlanid = ((SetVlanIdAction) action).getVlanId();
+ if (vlanid != null && !isVlanIdValid(vlanid.toString())) {
+ logger.error("Vlan ID %s is not in the range 0 - 4095");
+ return false;
+ }
+ } else if (action instanceof SetVlanPcpAction) {
+ VlanPcp vlanpcp = ((SetVlanPcpAction) action).getVlanPcp();
+ if (vlanpcp != null && !isVlanPriorityValid(vlanpcp.toString())) {
+ logger.error("Vlan priority %s is not in the range 0 - 7");
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ public static boolean validateInstructions(NodeFlow flow) {
+ List<Instruction> instructionsList = new ArrayList<>();
+ Instructions instructions = flow.getInstructions();
+ instructionsList = instructions.getInstruction();
+
+ for (Instruction instruction : instructionsList) {
+ org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.Instruction curInstruction = instruction
+ .getInstruction();
+ if (curInstruction instanceof GoToTable) {
+
+ Short tableid = ((GoToTable) curInstruction).getTableId();
+ if (tableid < 0) {
+ logger.error("table id is not valid");
+ return false;
+ }
+ }
+
+ else if (curInstruction instanceof WriteActions) {
+
+ List<Action> action = ((WriteActions) curInstruction).getAction();
+ validateActions(action);
+
+ }
+
+ else if (curInstruction instanceof ApplyActions) {
+ List<Action> action = ((ApplyActions) curInstruction).getAction();
+ validateActions(action);
+ }
+
+ else if (curInstruction instanceof ClearActions) {
+ List<Action> action = ((ClearActions) curInstruction).getAction();
+ validateActions(action);
+ }
+
+ else if (curInstruction instanceof Meter) {
+
+ String meter = ((Meter) curInstruction).getMeter();
+ if (meter != null && !isValidMeter(meter)) {
+ logger.error("Meter Id is not valid");
+ return false;
+ }
+ }
+
+ }
+
+ return true;
+ }
+
+ public static boolean isValidMeter(String meter) {
+ // TODO
+ return true;
+ }
+
+ public static boolean isQueueValid(String queue) {
+ // TODO
+ return true;
}
-
- public static boolean areActionsValid(Actions actions) {
- // List<Action> actionList;
- // Action actionRef;
- // if (null != actions && null != actions.getAction()) {
- // actionList = actions.getAction();
-
-
-
- // }
-
+ public static boolean isPortValid(PortNumber port) {
+ // TODO
return true;
}
+
+ public static boolean isL2AddressValid(String mac) {
+ if (mac == null) {
+ return false;
+ }
+
+ Pattern macPattern = Pattern.compile("([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}");
+ Matcher mm = macPattern.matcher(mac);
+ if (!mm.matches()) {
+ logger.debug("Ethernet address {} is not valid. Example: 00:05:b9:7c:81:5f", mac);
+ return false;
+ }
+ return true;
+ }
+
+ public static boolean isProtocolValid(String protocol) {
+ IPProtocols proto = IPProtocols.fromString(protocol);
+ return (proto != null);
+ }
+
+ public static boolean isVlanIdValid(String vlanId) {
+ int vlan = Integer.decode(vlanId);
+ return ((vlan >= 0) && (vlan < 4096));
+ }
+
+ public static boolean isVlanPriorityValid(String vlanPriority) {
+ int pri = Integer.decode(vlanPriority);
+ return ((pri >= 0) && (pri < 8));
+ }
}
package org.opendaylight.controller.forwardingrulesmanager_mdsal.consumer.impl;
import java.util.ArrayList;
-import java.util.Collection;
+import java.util.EnumSet;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Future;
-import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction;
import org.opendaylight.controller.md.sal.common.api.data.DataModification;
-import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
import org.opendaylight.controller.sal.common.util.Rpcs;
+import org.opendaylight.controller.sal.core.IContainer;
+import org.opendaylight.controller.sal.utils.GlobalConstants;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+import org.opendaylight.controller.sal.utils.Status;
+import org.opendaylight.controller.sal.utils.StatusCode;
+import org.opendaylight.controller.switchmanager.ISwitchManager;
+import org.opendaylight.controller.switchmanager.Switch;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.Flows;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.FlowKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInput;
-
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowAdded;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowRemoved;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.FlowUpdated;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.NodeFlow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.Instruction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.config.rev130819.flows.FlowKey;
import org.opendaylight.yangtools.concepts.Registration;
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.slf4j.LoggerFactory;
public class FlowConsumerImpl {
- protected static final Logger logger = LoggerFactory.getLogger(FlowConsumerImpl.class);
- private FlowEventListener flowEventListener = new FlowEventListener();
+ protected static final Logger logger = LoggerFactory.getLogger(FlowConsumerImpl.class);
+ private FlowEventListener flowEventListener = new FlowEventListener();
private Registration<NotificationListener> listener1Reg;
- private SalFlowService flowService;
- private FlowDataListener listener;
- private FlowDataCommitHandler commitHandler;
- private ConcurrentHashMap<FlowKey, Flow> originalSwView;
-
- public FlowConsumerImpl() {
- InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder().node(Flows.class).toInstance();
- flowService = FRMConsumerImpl.getProviderSession().getRpcService(SalFlowService.class);
-
- if (null == flowService) {
- logger.error("Consumer SAL Service is down or NULL. FRM may not function as intended");
- System.out.println("Consumer SAL Service is down or NULL.");
- return;
- }
-
- listener = new FlowDataListener();
-
- if (null == FRMConsumerImpl.getDataBrokerService().registerDataChangeListener(path, listener)) {
- logger.error("Failed to listen on flow data modifcation events");
- System.out.println("Consumer SAL Service is down or NULL.");
- return;
- }
-
- // For switch events
- listener1Reg = FRMConsumerImpl.getNotificationService().registerNotificationListener(flowEventListener);
-
- if (null == listener1Reg) {
- logger.error("Listener to listen on flow data modifcation events");
- System.out.println("Consumer SAL Service is down or NULL.");
- return;
- }
- //addFlowTest();
- System.out.println("-------------------------------------------------------------------");
- allocateCaches();
- commitHandler = new FlowDataCommitHandler();
- FRMConsumerImpl.getDataProviderService().registerCommitHandler(path, commitHandler);
+ private SalFlowService flowService;
+ // private FlowDataListener listener;
+ private FlowDataCommitHandler commitHandler;
+ private static ConcurrentHashMap<FlowKey, Flow> originalSwView;
+ private static ConcurrentMap<FlowKey, Flow> installedSwView;
+ private IClusterContainerServices clusterContainerService = null;
+ private IContainer container;
+ private static final String NAMEREGEX = "^[a-zA-Z0-9]+$";
+ private static ConcurrentMap<Integer, Flow> staticFlows;
+ private static ConcurrentMap<Integer, Integer> staticFlowsOrdinal;
+ /*
+ * Inactive flow list. This is for the global instance of FRM It will
+ * contain all the flow entries which were installed on the global container
+ * when the first container is created.
+ */
+ private static ConcurrentMap<FlowKey, Flow> inactiveFlows;
+
+ /*
+ * /* Per node indexing
+ */
+ private static ConcurrentMap<Node, List<Flow>> nodeFlows;
+ private boolean inContainerMode; // being used by global instance only
+
+ public FlowConsumerImpl() {
+ InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder().node(Flows.class).toInstance();
+ flowService = FRMConsumerImpl.getProviderSession().getRpcService(SalFlowService.class);
+
+ if (null == flowService) {
+ logger.error("Consumer SAL Service is down or NULL. FRM may not function as intended");
+ System.out.println("Consumer SAL Service is down or NULL.");
+ return;
+ }
+
+ // listener = new FlowDataListener();
+
+ // if (null ==
+ // FRMConsumerImpl.getDataBrokerService().registerDataChangeListener(path,
+ // listener)) {
+ // logger.error("Failed to listen on flow data modifcation events");
+ // System.out.println("Consumer SAL Service is down or NULL.");
+ // return;
+ // }
+
+ // For switch events
+ listener1Reg = FRMConsumerImpl.getNotificationService().registerNotificationListener(flowEventListener);
+
+ if (null == listener1Reg) {
+ logger.error("Listener to listen on flow data modifcation events");
+ System.out.println("Consumer SAL Service is down or NULL.");
+ return;
+ }
+ // addFlowTest();
+ System.out.println("-------------------------------------------------------------------");
+ allocateCaches();
+ commitHandler = new FlowDataCommitHandler();
+ FRMConsumerImpl.getDataProviderService().registerCommitHandler(path, commitHandler);
+ clusterContainerService = (IClusterContainerServices) ServiceHelper.getGlobalInstance(
+ IClusterContainerServices.class, this);
+ container = (IContainer) ServiceHelper.getGlobalInstance(IContainer.class, this);
+ /*
+ * If we are not the first cluster node to come up, do not initialize
+ * the static flow entries ordinal
+ */
+ if (staticFlowsOrdinal.size() == 0) {
+ staticFlowsOrdinal.put(0, Integer.valueOf(0));
+ }
}
-
+
private void allocateCaches() {
- originalSwView = new ConcurrentHashMap<FlowKey, Flow>();
+
+ if (this.clusterContainerService == null) {
+ logger.warn("Un-initialized clusterContainerService, can't create cache");
+ return;
+ }
+
+ try {
+ clusterContainerService.createCache("frm.originalSwView",
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+ clusterContainerService.createCache("frm.installedSwView",
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+ clusterContainerService
+ .createCache("frm.staticFlows", EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+ clusterContainerService.createCache("frm.staticFlowsOrdinal",
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+ clusterContainerService.createCache("frm.inactiveFlows",
+ EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+ clusterContainerService.createCache("frm.nodeFlows", EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+ clusterContainerService.createCache("frm.groupFlows", EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+ } catch (CacheConfigException cce) {
+ logger.error("CacheConfigException");
+ } catch (CacheExistException cce) {
+ logger.error("CacheExistException");
+ }
}
-
- private void addFlowTest()
- {
- try {
- NodeRef nodeOne = createNodeRef("foo:node:1");
- AddFlowInputBuilder input1 = new AddFlowInputBuilder();
-
- input1.setNode(nodeOne);
- AddFlowInput firstMsg = input1.build();
-
- if(null != flowService) {
- System.out.println(flowService.toString());
- }
- else
- {
- System.out.println("ConsumerFlowService is NULL");
- }
- @SuppressWarnings("unused")
- Future<RpcResult<java.lang.Void>> result1 = flowService.addFlow(firstMsg);
-
-
- } catch (Exception e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
+
+ private void addFlowTest() {
+ try {
+ NodeRef nodeOne = createNodeRef("foo:node:1");
+ AddFlowInputBuilder input1 = new AddFlowInputBuilder();
+
+ input1.setNode(nodeOne);
+ AddFlowInput firstMsg = input1.build();
+
+ if (null != flowService) {
+ System.out.println(flowService.toString());
+ } else {
+ System.out.println("ConsumerFlowService is NULL");
+ }
+ @SuppressWarnings("unused")
+ Future<RpcResult<AddFlowOutput>> result1 = flowService.addFlow(firstMsg);
+
+ } catch (Exception e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
}
+
/**
* Adds flow to the southbound plugin and our internal database
*
input.setMatch((dataObject).getMatch());
input.setCookie((dataObject).getCookie());
input.setInstructions((dataObject).getInstructions());
- dataObject.getMatch().getLayer3Match()
- for (int i=0;i<inst.size();i++) {
- System.out.println("i = "+ i + inst.get(i).getInstruction().toString());
- System.out.println("i = "+ i + inst.get(i).toString());
+ dataObject.getMatch().getLayer3Match();
+ for (int i = 0; i < inst.size(); i++) {
+ System.out.println("i = " + i + inst.get(i).getInstruction().toString());
+ System.out.println("i = " + i + inst.get(i).toString());
}
-
+
System.out.println("Instruction list" + (dataObject).getInstructions().getInstruction().toString());
+ // updating the staticflow cache
+ Integer ordinal = staticFlowsOrdinal.get(0);
+ staticFlowsOrdinal.put(0, ++ordinal);
+ staticFlows.put(ordinal, (Flow) dataObject);
+
// We send flow to the sounthbound plugin
flowService.addFlow(input.build());
+ updateLocalDatabase((NodeFlow) dataObject, true);
}
-
+
+ /**
+ * Removes flow to the southbound plugin and our internal database
+ *
+ * @param path
+ * @param dataObject
+ */
+ private void removeFlow(InstanceIdentifier<?> path, Flow dataObject) {
+
+ RemoveFlowInputBuilder input = new RemoveFlowInputBuilder();
+ List<Instruction> inst = (dataObject).getInstructions().getInstruction();
+ input.setNode((dataObject).getNode());
+ input.setPriority((dataObject).getPriority());
+ input.setMatch((dataObject).getMatch());
+ input.setCookie((dataObject).getCookie());
+ input.setInstructions((dataObject).getInstructions());
+ dataObject.getMatch().getLayer3Match();
+ for (int i = 0; i < inst.size(); i++) {
+ System.out.println("i = " + i + inst.get(i).getInstruction().toString());
+ System.out.println("i = " + i + inst.get(i).toString());
+ }
+
+ System.out.println("Instruction list" + (dataObject).getInstructions().getInstruction().toString());
+
+ // updating the staticflow cache
+ Integer ordinal = staticFlowsOrdinal.get(0);
+ staticFlowsOrdinal.put(0, ++ordinal);
+ staticFlows.put(ordinal, dataObject);
+
+ // We send flow to the sounthbound plugin
+ flowService.removeFlow(input.build());
+ updateLocalDatabase((NodeFlow) dataObject, false);
+ }
+
+ @SuppressWarnings("unchecked")
private void commitToPlugin(internalTransaction transaction) {
- for(Entry<InstanceIdentifier<?>, Flow> entry :transaction.additions.entrySet()) {
+ for (Entry<InstanceIdentifier<?>, Flow> entry : transaction.additions.entrySet()) {
System.out.println("Coming add cc in FlowDatacommitHandler");
- addFlow(entry.getKey(),entry.getValue());
+ addFlow(entry.getKey(), entry.getValue());
}
- for(@SuppressWarnings("unused") Entry<InstanceIdentifier<?>, Flow> entry :transaction.updates.entrySet()) {
+ for (@SuppressWarnings("unused")
+ Entry<InstanceIdentifier<?>, Flow> entry : transaction.updates.entrySet()) {
System.out.println("Coming update cc in FlowDatacommitHandler");
- // updateFlow(entry.getKey(),entry.getValue());
+ // updateFlow(entry.getKey(),entry.getValue());
}
-
- for(@SuppressWarnings("unused") InstanceIdentifier<?> removal : transaction.removals) {
- // removeFlow(removal);
+
+ for (Entry<InstanceIdentifier<?>, Flow> entry : transaction.removals.entrySet()) {
+ System.out.println("Coming remove cc in FlowDatacommitHandler");
+ removeFlow(entry.getKey(), entry.getValue());
}
+
}
-
+
private final class FlowDataCommitHandler implements DataCommitHandler<InstanceIdentifier<?>, DataObject> {
- @SuppressWarnings("unchecked")
- @Override
- public DataCommitTransaction requestCommit(DataModification<InstanceIdentifier<?>, DataObject> modification) {
- // We should verify transaction
- System.out.println("Coming in FlowDatacommitHandler");
- internalTransaction transaction = new internalTransaction(modification);
- transaction.prepareUpdate();
- return transaction;
- }
+ @SuppressWarnings("unchecked")
+ @Override
+ public DataCommitTransaction requestCommit(DataModification<InstanceIdentifier<?>, DataObject> modification) {
+ // We should verify transaction
+ System.out.println("Coming in FlowDatacommitHandler");
+ internalTransaction transaction = new internalTransaction(modification);
+ transaction.prepareUpdate();
+ return transaction;
+ }
}
private final class internalTransaction implements DataCommitTransaction<InstanceIdentifier<?>, DataObject> {
Map<InstanceIdentifier<?>, Flow> additions = new HashMap<>();
Map<InstanceIdentifier<?>, Flow> updates = new HashMap<>();
- Set<InstanceIdentifier<?>> removals = new HashSet<>();
+ Map<InstanceIdentifier<?>, Flow> removals = new HashMap<>();
/**
* We create a plan which flows will be added, which will be updated and
* which will be removed based on our internal state.
- *
+ *
*/
void prepareUpdate() {
Set<Entry<InstanceIdentifier<?>, DataObject>> puts = modification.getUpdatedConfigurationData().entrySet();
for (Entry<InstanceIdentifier<?>, DataObject> entry : puts) {
+
+ // validating the DataObject
+
+ Status status = validate(container, (NodeFlow) entry);
+ if (!status.isSuccess()) {
+ logger.warn("Invalid Configuration for flow {}. The failure is {}", entry, status.getDescription());
+ String error = "Invalid Configuration (" + status.getDescription() + ")";
+ logger.error(error);
+ return;
+ }
+ // Presence check
+ if (flowEntryExists((NodeFlow) entry)) {
+ String error = "Entry with this name on specified table already exists";
+ logger.warn("Entry with this name on specified table already exists: {}", entry);
+ logger.error(error);
+ return;
+ }
+ if (originalSwView.containsKey((FlowKey) entry)) {
+ logger.warn("Operation Rejected: A flow with same match and priority exists on the target node");
+ logger.trace("Aborting to install {}", entry);
+ continue;
+ }
+ if (!FRMUtil.validateMatch((NodeFlow) entry)) {
+ logger.error("Not a valid Match");
+ return;
+ }
+ if (!FRMUtil.validateInstructions((NodeFlow) entry)) {
+ logger.error("Not a valid Instruction");
+ return;
+ }
if (entry.getValue() instanceof Flow) {
Flow flow = (Flow) entry.getValue();
preparePutEntry(entry.getKey(), flow);
}
- removals = modification.getRemovedConfigurationData();
+ // removals = modification.getRemovedConfigurationData();
+ Set<InstanceIdentifier<?>> removedData = modification.getRemovedConfigurationData();
+ for (InstanceIdentifier<?> removal : removedData) {
+ DataObject value = modification.getOriginalConfigurationData().get(removal);
+ if (value instanceof Flow) {
+ removals.put(removal, (Flow) value);
+ }
+ }
+
}
private void preparePutEntry(InstanceIdentifier<?> key, Flow flow) {
/**
* We are OK to go with execution of plan
- *
+ *
*/
@Override
public RpcResult<Void> finish() throws IllegalStateException {
-
+
commitToPlugin(this);
// We return true if internal transaction is successful.
- // return Rpcs.getRpcResult(true, null, Collections.emptySet());
- return Rpcs.getRpcResult(true, null, null);
+ // return Rpcs.getRpcResult(true, null, Collections.emptySet());
+ return Rpcs.getRpcResult(true, null, null);
}
/**
- *
+ *
* We should rollback our preparation
- *
+ *
*/
@Override
public RpcResult<Void> rollback() throws IllegalStateException {
// NOOP - we did not modified any internal state during
// requestCommit phase
- // return Rpcs.getRpcResult(true, null, Collections.emptySet());
+ // return Rpcs.getRpcResult(true, null, Collections.emptySet());
return Rpcs.getRpcResult(true, null, null);
-
+
}
+ public Status validate(IContainer container, NodeFlow dataObject) {
+
+ // container validation
+ Switch sw = null;
+ Node node = null;
+ String containerName = (container == null) ? GlobalConstants.DEFAULT.toString() : container.getName();
+ ISwitchManager switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class,
+ containerName, this);
+ // flow Name validation
+ if (dataObject.getFlowName() == null || dataObject.getFlowName().trim().isEmpty()
+ || !dataObject.getFlowName().matches(NAMEREGEX)) {
+ return new Status(StatusCode.BADREQUEST, "Invalid Flow name");
+ }
+ // Node Validation
+ if (dataObject.getNode() == null) {
+ return new Status(StatusCode.BADREQUEST, "Node is null");
+ }
+
+ if (switchManager != null) {
+ for (Switch device : switchManager.getNetworkDevices()) {
+ node = (Node) device.getNode();
+ if (device.getNode().equals(dataObject.getNode())) {
+ sw = device;
+ break;
+ }
+ }
+ if (sw == null) {
+ return new Status(StatusCode.BADREQUEST, String.format("Node %s not found", node));
+ }
+ } else {
+ logger.debug("switchmanager is not set yet");
+ }
+
+ if (dataObject.getPriority() != null) {
+ if (dataObject.getPriority() < 0 || dataObject.getPriority() > 65535) {
+ return new Status(StatusCode.BADREQUEST, String.format("priority %s is not in the range 0 - 65535",
+ dataObject.getPriority()));
+ }
+ }
+
+ return new Status(StatusCode.SUCCESS);
+ }
+
+ private boolean flowEntryExists(NodeFlow config) {
+ // Flow name has to be unique on per table id basis
+ for (ConcurrentMap.Entry<FlowKey, Flow> entry : originalSwView.entrySet()) {
+ if (entry.getValue().getFlowName().equals(config.getFlowName())
+ && entry.getValue().getTableId().equals(config.getTableId())) {
+ return true;
+ }
+ }
+ return false;
+ }
}
-
- final class FlowEventListener implements SalFlowListener {
-
+
+ final class FlowEventListener implements SalFlowListener {
+
List<FlowAdded> addedFlows = new ArrayList<>();
List<FlowRemoved> removedFlows = new ArrayList<>();
List<FlowUpdated> updatedFlows = new ArrayList<>();
-
+
@Override
public void onFlowAdded(FlowAdded notification) {
- System.out.println("added flow..........................");
- addedFlows.add(notification);
- }
-
- @Override
- public void onFlowRemoved(FlowRemoved notification) {
- removedFlows.add(notification);
- };
-
- @Override
- public void onFlowUpdated(FlowUpdated notification) {
- updatedFlows.add(notification);
- }
-
- }
-
- final class FlowDataListener implements DataChangeListener {
- private SalFlowService flowService;
-
- public FlowDataListener() {
-
- }
-
- @Override
- public void onDataChanged(
- DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
- System.out.println("Coming in onDataChange..............");
- @SuppressWarnings("unchecked")
- Collection<DataObject> additions = (Collection<DataObject>) change.getCreatedConfigurationData();
- // we can check for getCreated, getDeleted or getUpdated from DataChange Event class
- for (DataObject dataObject : additions) {
- if (dataObject instanceof NodeFlow) {
- NodeRef nodeOne = createNodeRef("foo:node:1");
- // validating the dataObject here
- AddFlowInputBuilder input = new AddFlowInputBuilder();
- input.setNode(((NodeFlow) dataObject).getNode());
- input.setNode(nodeOne);
- // input.setPriority(((NodeFlow) dataObject).getPriority());
- //input.setMatch(((NodeFlow) dataObject).getMatch());
- //input.setFlowTable(((NodeFlow) dataObject).getFlowTable());
- //input.setCookie(((NodeFlow) dataObject).getCookie());
- //input.setAction(((NodeFlow) dataObject).getAction());
-
- @SuppressWarnings("unused")
- Future<RpcResult<java.lang.Void>> result = flowService.addFlow(input.build());
- }
- }
- }
- }
-
-
-
+ System.out.println("added flow..........................");
+ addedFlows.add(notification);
+ }
+
+ @Override
+ public void onFlowRemoved(FlowRemoved notification) {
+ removedFlows.add(notification);
+ };
+
+ @Override
+ public void onFlowUpdated(FlowUpdated notification) {
+ updatedFlows.add(notification);
+ }
+
+ }
+
+ // Commented out DataChangeListene - to be used by Stats
+
+ // final class FlowDataListener implements DataChangeListener {
+ // private SalFlowService flowService;
+ //
+ // public FlowDataListener() {
+ //
+ // }
+ //
+ // @Override
+ // public void onDataChanged(
+ // DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
+ // System.out.println("Coming in onDataChange..............");
+ // @SuppressWarnings("unchecked")
+ // Collection<DataObject> additions = (Collection<DataObject>)
+ // change.getCreatedConfigurationData();
+ // // we can check for getCreated, getDeleted or getUpdated from DataChange
+ // Event class
+ // for (DataObject dataObject : additions) {
+ // if (dataObject instanceof NodeFlow) {
+ // NodeRef nodeOne = createNodeRef("foo:node:1");
+ // // validating the dataObject here
+ // AddFlowInputBuilder input = new AddFlowInputBuilder();
+ // input.setNode(((NodeFlow) dataObject).getNode());
+ // input.setNode(nodeOne);
+ // // input.setPriority(((NodeFlow) dataObject).getPriority());
+ // //input.setMatch(((NodeFlow) dataObject).getMatch());
+ // //input.setFlowTable(((NodeFlow) dataObject).getFlowTable());
+ // //input.setCookie(((NodeFlow) dataObject).getCookie());
+ // //input.setAction(((NodeFlow) dataObject).getAction());
+ //
+ // @SuppressWarnings("unused")
+ // Future<RpcResult<java.lang.Void>> result =
+ // flowService.addFlow(input.build());
+ // }
+ // }
+ // }
+ // }
+
+ private static void updateLocalDatabase(NodeFlow entry, boolean add) {
+
+ updateSwViewes(entry, add);
+
+ updateNodeFlowsDB(entry, add);
+
+ }
+
+ /*
+ * Update the node mapped flows database
+ */
+ private static void updateSwViewes(NodeFlow entry, boolean add) {
+ if (add) {
+ FlowConsumerImpl.originalSwView.put((FlowKey) entry, (Flow) entry);
+ installedSwView.put((FlowKey) entry, (Flow) entry);
+ } else {
+ originalSwView.remove((Flow) entry);
+ installedSwView.remove((FlowKey) entry);
+
+ }
+ }
+
+ /*
+ * Update the node mapped flows database
+ */
+ private static void updateNodeFlowsDB(NodeFlow entry, boolean add) {
+ Node node = (Node) entry.getNode();
+
+ List<Flow> nodeIndeces = nodeFlows.get(node);
+ if (nodeIndeces == null) {
+ if (!add) {
+ return;
+ } else {
+ nodeIndeces = new ArrayList<Flow>();
+ }
+ }
+
+ if (add) {
+ nodeIndeces.add((Flow) entry);
+ } else {
+ nodeIndeces.remove((Flow) entry);
+ }
+
+ // Update cache across cluster
+ if (nodeIndeces.isEmpty()) {
+ nodeFlows.remove(node);
+ } else {
+ nodeFlows.put(node, nodeIndeces);
+ }
+ }
+
private static NodeRef createNodeRef(String string) {
NodeKey key = new NodeKey(new NodeId(string));
InstanceIdentifier<Node> path = InstanceIdentifier.builder().node(Nodes.class).node(Node.class, key)
return new NodeRef(path);
}
-
-
-
}
import org.opendaylight.controller.sal.utils.GlobalConstants;
import org.opendaylight.controller.sal.utils.Status;
import org.opendaylight.controller.sal.utils.StatusCode;
-import org.opendaylight.controller.switchmanager.ISwitchManager;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.config.rev131024.Groups;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.config.rev131024.groups.Group;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.config.rev131024.groups.GroupKey;
@SuppressWarnings("unused")
public class GroupConsumerImpl {
-
+
protected static final Logger logger = LoggerFactory.getLogger(GroupConsumerImpl.class);
private GroupEventListener groupEventListener = new GroupEventListener();
private Registration<NotificationListener> groupListener;
- private SalGroupService groupService;
+ private SalGroupService groupService;
private GroupDataCommitHandler commitHandler;
-
+
private ConcurrentMap<GroupKey, Group> originalSwGroupView;
private ConcurrentMap<GroupKey, Group> installedSwGroupView;
-
+
private ConcurrentMap<Node, List<Group>> nodeGroups;
private ConcurrentMap<GroupKey, Group> inactiveGroups;
-
- private IClusterContainerServices clusterGroupContainerService = null;
+
+ private IClusterContainerServices clusterGroupContainerService = null;
private IContainer container;
-
+
public GroupConsumerImpl() {
- InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder().node(Groups.class).node(Group.class).toInstance();
+ InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder().node(Groups.class)
+ .node(Group.class).toInstance();
groupService = FRMConsumerImpl.getProviderSession().getRpcService(SalGroupService.class);
-
- clusterGroupContainerService = FRMConsumerImpl.getClusterContainerService();
+
+ clusterGroupContainerService = FRMConsumerImpl.getClusterContainerService();
container = FRMConsumerImpl.getContainer();
-
+
if (!(cacheStartup())) {
logger.error("Unanle to allocate/retrieve group cache");
System.out.println("Unable to allocate/retrieve group cache");
}
-
+
if (null == groupService) {
logger.error("Consumer SAL Group Service is down or NULL. FRM may not function as intended");
System.out.println("Consumer SAL Group Service is down or NULL.");
return;
- }
-
+ }
+
// For switch events
groupListener = FRMConsumerImpl.getNotificationService().registerNotificationListener(groupEventListener);
-
+
if (null == groupListener) {
logger.error("Listener to listen on group data modifcation events");
System.out.println("Listener to listen on group data modifcation events.");
return;
- }
-
+ }
+
commitHandler = new GroupDataCommitHandler();
FRMConsumerImpl.getDataProviderService().registerCommitHandler(path, commitHandler);
- }
-
+ }
+
private boolean allocateGroupCaches() {
if (this.clusterGroupContainerService == null) {
logger.warn("Group: Un-initialized clusterGroupContainerService, can't create cache");
return false;
- }
+ }
try {
clusterGroupContainerService.createCache("frm.originalSwGroupView",
clusterGroupContainerService.createCache("frm.nodeGroups",
EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
-
-//TODO for cluster mode
- /* clusterGroupContainerService.createCache(WORK_STATUS_CACHE,
- EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL, IClusterServices.cacheMode.ASYNC));
-
- clusterGroupContainerService.createCache(WORK_ORDER_CACHE,
- EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL, IClusterServices.cacheMode.ASYNC));*/
-
- } catch (CacheConfigException cce) {
+
+ // TODO for cluster mode
+ /*
+ * clusterGroupContainerService.createCache(WORK_STATUS_CACHE,
+ * EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL,
+ * IClusterServices.cacheMode.ASYNC));
+ *
+ * clusterGroupContainerService.createCache(WORK_ORDER_CACHE,
+ * EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL,
+ * IClusterServices.cacheMode.ASYNC));
+ */
+
+ } catch (CacheConfigException cce) {
logger.error("Group CacheConfigException");
return false;
-
+
} catch (CacheExistException cce) {
- logger.error(" Group CacheExistException");
+ logger.error(" Group CacheExistException");
}
-
+
return true;
}
-
+
private void nonClusterGroupObjectCreate() {
originalSwGroupView = new ConcurrentHashMap<GroupKey, Group>();
installedSwGroupView = new ConcurrentHashMap<GroupKey, Group>();
- nodeGroups = new ConcurrentHashMap<Node, List<Group>>();
+ nodeGroups = new ConcurrentHashMap<Node, List<Group>>();
inactiveGroups = new ConcurrentHashMap<GroupKey, Group>();
}
-
+
@SuppressWarnings({ "unchecked" })
private boolean retrieveGroupCaches() {
ConcurrentMap<?, ?> map;
logger.warn("Group: un-initialized clusterGroupContainerService, can't retrieve cache");
nonClusterGroupObjectCreate();
return false;
- }
+ }
map = clusterGroupContainerService.getCache("frm.originalSwGroupView");
if (map != null) {
logger.error("Retrieval of cache(nodeGroup) failed");
return false;
}
-
+
return true;
}
-
+
private boolean cacheStartup() {
if (allocateGroupCaches()) {
if (retrieveGroupCaches()) {
return true;
}
}
-
+
return false;
}
-
+
public Status validateGroup(Group group, FRMUtil.operation operation) {
String containerName;
String groupName;
Iterator<Bucket> bucketIterator;
boolean returnResult;
Buckets groupBuckets;
-
+
if (null != group) {
containerName = group.getContainerName();
-
+
if (null == containerName) {
containerName = GlobalConstants.DEFAULT.toString();
- }
- else if (!FRMUtil.isNameValid(containerName)) {
+ } else if (!FRMUtil.isNameValid(containerName)) {
logger.error("Container Name is invalid %s" + containerName);
return new Status(StatusCode.BADREQUEST, "Container Name is invalid");
}
-
+
groupName = group.getGroupName();
if (!FRMUtil.isNameValid(groupName)) {
logger.error("Group Name is invalid %s" + groupName);
return new Status(StatusCode.BADREQUEST, "Group Name is invalid");
}
-
+
returnResult = doesGroupEntryExists(group.getKey(), groupName, containerName);
-
+
if (FRMUtil.operation.ADD == operation && returnResult) {
logger.error("Record with same Group Name exists");
return new Status(StatusCode.BADREQUEST, "Group record exists");
- }
- else if (!returnResult) {
+ } else if (!returnResult) {
logger.error("Group record does not exist");
return new Status(StatusCode.BADREQUEST, "Group record does not exist");
}
-
- if (!(group.getGroupType().getIntValue() >= GroupType.GroupAll.getIntValue() &&
- group.getGroupType().getIntValue() <= GroupType.GroupFf.getIntValue())) {
+
+ if (!(group.getGroupType().getIntValue() >= GroupType.GroupAll.getIntValue() && group.getGroupType()
+ .getIntValue() <= GroupType.GroupFf.getIntValue())) {
logger.error("Invalid Group type %d" + group.getGroupType().getIntValue());
- return new Status(StatusCode.BADREQUEST, "Invalid Group type");
+ return new Status(StatusCode.BADREQUEST, "Invalid Group type");
}
-
+
groupBuckets = group.getBuckets();
-
+
if (null != groupBuckets && null != groupBuckets.getBucket()) {
bucketIterator = groupBuckets.getBucket().iterator();
-
+
while (bucketIterator.hasNext()) {
- if(!(FRMUtil.areActionsValid(bucketIterator.next().getActions()))) {
+ if (!(FRMUtil.validateActions(bucketIterator.next().getAction()))) {
logger.error("Error in action bucket");
return new Status(StatusCode.BADREQUEST, "Invalid Group bucket contents");
- }
+ }
}
- }
+ }
}
-
+
return new Status(StatusCode.SUCCESS);
-
+
}
-
+
private boolean doesGroupEntryExists(GroupKey key, String groupName, String containerName) {
- if (! originalSwGroupView.containsKey(key)) {
+ if (!originalSwGroupView.containsKey(key)) {
return false;
}
-
+
for (ConcurrentMap.Entry<GroupKey, Group> entry : originalSwGroupView.entrySet()) {
if (entry.getValue().getGroupName().equals(groupName)) {
if (entry.getValue().getContainerName().equals(containerName)) {
return false;
}
-
/**
- * Update Group entries to the southbound plugin/inventory and our internal database
+ * Update Group entries to the southbound plugin/inventory and our internal
+ * database
*
* @param path
* @param dataObject
*/
private Status updateGroup(InstanceIdentifier<?> path, Group groupUpdateDataObject) {
- GroupKey groupKey = groupUpdateDataObject.getKey();
+ GroupKey groupKey = groupUpdateDataObject.getKey();
Status groupOperationStatus = validateGroup(groupUpdateDataObject, FRMUtil.operation.UPDATE);
-
+
if (!groupOperationStatus.isSuccess()) {
logger.error("Group data object validation failed %s" + groupUpdateDataObject.getGroupName());
return groupOperationStatus;
}
-
+
originalSwGroupView.remove(groupKey);
originalSwGroupView.put(groupKey, groupUpdateDataObject);
-
+
if (groupUpdateDataObject.isInstall()) {
UpdateGroupInputBuilder groupData = new UpdateGroupInputBuilder();
- //TODO how to get original group and modified group.
-
+ // TODO how to get original group and modified group.
+
if (installedSwGroupView.containsKey(groupKey)) {
installedSwGroupView.remove(groupKey);
}
-
+
installedSwGroupView.put(groupKey, groupUpdateDataObject);
groupService.updateGroup(groupData.build());
}
-
+
return groupOperationStatus;
}
-
+
/**
* Adds Group to the southbound plugin and our internal database
*
private Status addGroup(InstanceIdentifier<?> path, Group groupAddDataObject) {
GroupKey groupKey = groupAddDataObject.getKey();
Status groupOperationStatus = validateGroup(groupAddDataObject, FRMUtil.operation.ADD);
-
+
if (!groupOperationStatus.isSuccess()) {
logger.error("Group data object validation failed %s" + groupAddDataObject.getGroupName());
return groupOperationStatus;
}
-
+
originalSwGroupView.put(groupKey, groupAddDataObject);
-
+
if (groupAddDataObject.isInstall()) {
AddGroupInputBuilder groupData = new AddGroupInputBuilder();
groupData.setBuckets(groupAddDataObject.getBuckets());
groupData.setContainerName(groupAddDataObject.getContainerName());
groupData.setGroupId(groupAddDataObject.getGroupId());
groupData.setGroupType(groupAddDataObject.getGroupType());
- groupData.setNode(groupAddDataObject.getNode());
+ groupData.setNode(groupAddDataObject.getNode());
installedSwGroupView.put(groupKey, groupAddDataObject);
groupService.addGroup(groupData.build());
}
-
+
return groupOperationStatus;
}
-
- private RpcResult<Void> commitToPlugin(internalTransaction transaction) {
- for(Entry<InstanceIdentifier<?>, Group> entry :transaction.additions.entrySet()) {
-
- if (!addGroup(entry.getKey(),entry.getValue()).isSuccess()) {
+
+ private RpcResult<Void> commitToPlugin(internalTransaction transaction) {
+ for (Entry<InstanceIdentifier<?>, Group> entry : transaction.additions.entrySet()) {
+
+ if (!addGroup(entry.getKey(), entry.getValue()).isSuccess()) {
return Rpcs.getRpcResult(false, null, null);
}
}
- for(@SuppressWarnings("unused") Entry<InstanceIdentifier<?>, Group> entry :transaction.additions.entrySet()) {
-
- if (!updateGroup(entry.getKey(),entry.getValue()).isSuccess()) {
+ for (@SuppressWarnings("unused")
+ Entry<InstanceIdentifier<?>, Group> entry : transaction.additions.entrySet()) {
+
+ if (!updateGroup(entry.getKey(), entry.getValue()).isSuccess()) {
return Rpcs.getRpcResult(false, null, null);
}
}
-
- for(InstanceIdentifier<?> removal : transaction.removals) {
- // removeFlow(removal);
+
+ for (InstanceIdentifier<?> removal : transaction.removals) {
+ // removeFlow(removal);
}
-
+
return Rpcs.getRpcResult(true, null, null);
}
-
+
private final class GroupDataCommitHandler implements DataCommitHandler<InstanceIdentifier<?>, DataObject> {
- @SuppressWarnings("unchecked")
+ @SuppressWarnings("unchecked")
@Override
- public DataCommitTransaction requestCommit(DataModification<InstanceIdentifier<?>, DataObject> modification) {
- // We should verify transaction
- System.out.println("Coming in GroupDatacommitHandler");
- internalTransaction transaction = new internalTransaction(modification);
- transaction.prepareUpdate();
- return transaction;
- }
+ public DataCommitTransaction<InstanceIdentifier<?>, DataObject> requestCommit(DataModification<InstanceIdentifier<?>, DataObject> modification) {
+ // We should verify transaction
+ System.out.println("Coming in GroupDatacommitHandler");
+ internalTransaction transaction = new internalTransaction(modification);
+ transaction.prepareUpdate();
+ return transaction;
+ }
}
private final class internalTransaction implements DataCommitTransaction<InstanceIdentifier<?>, DataObject> {
/**
* We create a plan which flows will be added, which will be updated and
* which will be removed based on our internal state.
- *
+ *
*/
void prepareUpdate() {
Set<Entry<InstanceIdentifier<?>, DataObject>> puts = modification.getUpdatedConfigurationData().entrySet();
for (Entry<InstanceIdentifier<?>, DataObject> entry : puts) {
- if (entry.getValue() instanceof Group) {
- Group group = (Group) entry.getValue();
+ if (entry.getValue() instanceof Group) {
+ Group group = (Group) entry.getValue();
preparePutEntry(entry.getKey(), group);
}
}
private void preparePutEntry(InstanceIdentifier<?> key, Group group) {
-
+
Group original = originalSwGroupView.get(key);
if (original != null) {
// It is update for us
-
- updates.put(key, group);
+
+ updates.put(key, group);
} else {
// It is addition for us
-
+
additions.put(key, group);
}
}
/**
* We are OK to go with execution of plan
- *
+ *
*/
@Override
public RpcResult<Void> finish() throws IllegalStateException {
-
+
RpcResult<Void> rpcStatus = commitToPlugin(this);
// We return true if internal transaction is successful.
- // return Rpcs.getRpcResult(true, null, Collections.emptySet());
+ // return Rpcs.getRpcResult(true, null, Collections.emptySet());
return rpcStatus;
}
/**
- *
+ *
* We should rollback our preparation
- *
+ *
*/
@Override
public RpcResult<Void> rollback() throws IllegalStateException {
// NOOP - we did not modified any internal state during
// requestCommit phase
- // return Rpcs.getRpcResult(true, null, Collections.emptySet());
+ // return Rpcs.getRpcResult(true, null, Collections.emptySet());
return Rpcs.getRpcResult(true, null, null);
-
+
}
-
+
}
-
-
- final class GroupEventListener implements SalGroupListener {
-
+
+ final class GroupEventListener implements SalGroupListener {
+
List<GroupAdded> addedGroups = new ArrayList<>();
List<GroupRemoved> removedGroups = new ArrayList<>();
List<GroupUpdated> updatedGroups = new ArrayList<>();
-
@Override
public void onGroupAdded(GroupAdded notification) {
System.out.println("added Group..........................");
- addedGroups.add(notification);
+ addedGroups.add(notification);
}
@Override
public void onGroupRemoved(GroupRemoved notification) {
// TODO Auto-generated method stub
-
+
}
@Override
public void onGroupUpdated(GroupUpdated notification) {
// TODO Auto-generated method stub
-
- }
+
+ }
}
}
import org.opendaylight.controller.sal.utils.GlobalConstants;
import org.opendaylight.controller.sal.utils.Status;
import org.opendaylight.controller.sal.utils.StatusCode;
-import org.opendaylight.controller.switchmanager.ISwitchManager;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.config.rev131024.Meters;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.config.rev131024.meters.MeterKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterInputBuilder;
protected static final Logger logger = LoggerFactory.getLogger(MeterConsumerImpl.class);
private MeterEventListener meterEventListener = new MeterEventListener();
private Registration<NotificationListener> meterListener;
- private SalMeterService meterService;
+ private SalMeterService meterService;
private MeterDataCommitHandler commitHandler;
-
+
private ConcurrentMap<MeterKey, Meter> originalSwMeterView;
private ConcurrentMap<MeterKey, Meter> installedSwMeterView;
-
+
private ConcurrentMap<Node, List<Meter>> nodeMeters;
private ConcurrentMap<MeterKey, Meter> inactiveMeters;
-
- private IClusterContainerServices clusterMeterContainerService = null;
+
+ private IClusterContainerServices clusterMeterContainerService = null;
private IContainer container;
-
+
public MeterConsumerImpl() {
- InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder().node(Meters.class).node(Meter.class).toInstance();
- meterService = FRMConsumerImpl.getProviderSession().getRpcService(SalMeterService.class);
+ InstanceIdentifier<? extends DataObject> path = InstanceIdentifier.builder().node(Meters.class)
+ .node(Meter.class).toInstance();
+ meterService = FRMConsumerImpl.getProviderSession().getRpcService(SalMeterService.class);
clusterMeterContainerService = FRMConsumerImpl.getClusterContainerService();
-
+
container = FRMConsumerImpl.getContainer();
-
+
if (!(cacheStartup())) {
logger.error("Unable to allocate/retrieve meter cache");
System.out.println("Unable to allocate/retrieve meter cache");
}
-
+
if (null == meterService) {
logger.error("Consumer SAL Meter Service is down or NULL. FRM may not function as intended");
System.out.println("Consumer SAL Meter Service is down or NULL.");
return;
- }
-
+ }
+
// For switch/plugin events
meterListener = FRMConsumerImpl.getNotificationService().registerNotificationListener(meterEventListener);
-
+
if (null == meterListener) {
logger.error("Listener to listen on meter data modifcation events");
System.out.println("Listener to listen on meter data modifcation events.");
return;
- }
-
+ }
+
commitHandler = new MeterDataCommitHandler();
FRMConsumerImpl.getDataProviderService().registerCommitHandler(path, commitHandler);
}
-
-
-
+
private boolean allocateMeterCaches() {
if (this.clusterMeterContainerService == null) {
logger.warn("Meter: Un-initialized clusterMeterContainerService, can't create cache");
return false;
- }
+ }
try {
clusterMeterContainerService.createCache("frm.originalSwMeterView",
clusterMeterContainerService.createCache("frm.nodeMeters",
EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
-
-//TODO for cluster mode
- /* clusterMeterContainerService.createCache(WORK_STATUS_CACHE,
- EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL, IClusterServices.cacheMode.ASYNC));
-
- clusterMeterContainerService.createCache(WORK_ORDER_CACHE,
- EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL, IClusterServices.cacheMode.ASYNC));*/
-
- } catch (CacheConfigException cce) {
+
+ // TODO for cluster mode
+ /*
+ * clusterMeterContainerService.createCache(WORK_STATUS_CACHE,
+ * EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL,
+ * IClusterServices.cacheMode.ASYNC));
+ *
+ * clusterMeterContainerService.createCache(WORK_ORDER_CACHE,
+ * EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL,
+ * IClusterServices.cacheMode.ASYNC));
+ */
+
+ } catch (CacheConfigException cce) {
logger.error("Meter CacheConfigException");
return false;
-
+
} catch (CacheExistException cce) {
- logger.error(" Meter CacheExistException");
+ logger.error(" Meter CacheExistException");
}
-
+
return true;
}
-
+
private void nonClusterMeterObjectCreate() {
originalSwMeterView = new ConcurrentHashMap<MeterKey, Meter>();
installedSwMeterView = new ConcurrentHashMap<MeterKey, Meter>();
- nodeMeters = new ConcurrentHashMap<Node, List<Meter>>();
+ nodeMeters = new ConcurrentHashMap<Node, List<Meter>>();
inactiveMeters = new ConcurrentHashMap<MeterKey, Meter>();
}
-
+
@SuppressWarnings({ "unchecked" })
private boolean retrieveMeterCaches() {
ConcurrentMap<?, ?> map;
logger.warn("Meter: un-initialized clusterMeterContainerService, can't retrieve cache");
nonClusterMeterObjectCreate();
return false;
- }
+ }
map = clusterMeterContainerService.getCache("frm.originalSwMeterView");
if (map != null) {
logger.error("Retrieval of cache(nodeMeter) failed");
return false;
}
-
+
return true;
}
-
+
private boolean cacheStartup() {
if (allocateMeterCaches()) {
if (retrieveMeterCaches()) {
return true;
}
}
-
+
return false;
}
-
+
/**
* Adds Meter to the southbound plugin and our internal database
*
* @param path
* @param dataObject
*/
- private Status addMeter(InstanceIdentifier<?> path, Meter meterAddDataObject) {
+ private Status addMeter(InstanceIdentifier<?> path, Meter meterAddDataObject) {
MeterKey meterKey = meterAddDataObject.getKey();
-
- if (null != meterKey &&
- validateMeter(meterAddDataObject, FRMUtil.operation.ADD).isSuccess()) {
+
+ if (null != meterKey && validateMeter(meterAddDataObject, FRMUtil.operation.ADD).isSuccess()) {
if (meterAddDataObject.isInstall()) {
AddMeterInputBuilder meterBuilder = new AddMeterInputBuilder();
-
+
meterBuilder.setContainerName(meterAddDataObject.getContainerName());
meterBuilder.setFlags(meterAddDataObject.getFlags());
meterBuilder.setMeterBandHeaders(meterAddDataObject.getMeterBandHeaders());
originalSwMeterView.put(meterKey, meterAddDataObject);
meterService.addMeter(meterBuilder.build());
}
-
- originalSwMeterView.put(meterKey, meterAddDataObject);
- }
- else {
+
+ originalSwMeterView.put(meterKey, meterAddDataObject);
+ } else {
return new Status(StatusCode.BADREQUEST, "Meter Key or attribute validation failed");
}
-
+
return new Status(StatusCode.SUCCESS);
}
-
+
/*
* Update Meter to the southbound plugin and our internal database
*
* @param path
+ *
* @param dataObject
*/
- private Status updateMeter(InstanceIdentifier<?> path, Meter meterUpdateDataObject) {
+ private Status updateMeter(InstanceIdentifier<?> path, Meter meterUpdateDataObject) {
MeterKey meterKey = meterUpdateDataObject.getKey();
-
- if (null != meterKey &&
- validateMeter(meterUpdateDataObject, FRMUtil.operation.ADD).isSuccess()) {
+
+ if (null != meterKey && validateMeter(meterUpdateDataObject, FRMUtil.operation.ADD).isSuccess()) {
if (meterUpdateDataObject.isInstall()) {
- UpdateMeterInputBuilder updateMeterBuilder = new UpdateMeterInputBuilder();
-
+ UpdateMeterInputBuilder updateMeterBuilder = new UpdateMeterInputBuilder();
+
originalSwMeterView.put(meterKey, meterUpdateDataObject);
meterService.updateMeter(updateMeterBuilder.build());
}
-
- originalSwMeterView.put(meterKey, meterUpdateDataObject);
- }
- else {
+
+ originalSwMeterView.put(meterKey, meterUpdateDataObject);
+ } else {
return new Status(StatusCode.BADREQUEST, "Meter Key or attribute validation failed");
}
-
+
return new Status(StatusCode.SUCCESS);
}
-
+
/*
* Remove Meter to the southbound plugin and our internal database
*
* @param path
+ *
* @param dataObject
*/
- private Status RemoveMeter(InstanceIdentifier<?> path, Meter meterUpdateDataObject) {
+ private Status RemoveMeter(InstanceIdentifier<?> path, Meter meterUpdateDataObject) {
MeterKey meterKey = meterUpdateDataObject.getKey();
-
- if (null != meterKey &&
- validateMeter(meterUpdateDataObject, FRMUtil.operation.ADD).isSuccess()) {
+
+ if (null != meterKey && validateMeter(meterUpdateDataObject, FRMUtil.operation.ADD).isSuccess()) {
if (meterUpdateDataObject.isInstall()) {
- UpdateMeterInputBuilder updateMeterBuilder = new UpdateMeterInputBuilder();
-
+ UpdateMeterInputBuilder updateMeterBuilder = new UpdateMeterInputBuilder();
+
originalSwMeterView.put(meterKey, meterUpdateDataObject);
meterService.updateMeter(updateMeterBuilder.build());
}
-
- originalSwMeterView.put(meterKey, meterUpdateDataObject);
- }
- else {
+
+ originalSwMeterView.put(meterKey, meterUpdateDataObject);
+ } else {
return new Status(StatusCode.BADREQUEST, "Meter Key or attribute validation failed");
}
-
+
return new Status(StatusCode.SUCCESS);
}
-
+
public Status validateMeter(Meter meter, FRMUtil.operation operation) {
String containerName;
String meterName;
Status returnStatus = null;
boolean returnResult;
-
+
if (null != meter) {
containerName = meter.getContainerName();
-
+
if (null == containerName) {
containerName = GlobalConstants.DEFAULT.toString();
- }
- else if (!FRMUtil.isNameValid(containerName)) {
+ } else if (!FRMUtil.isNameValid(containerName)) {
logger.error("Container Name is invalid %s" + containerName);
returnStatus = new Status(StatusCode.BADREQUEST, "Container Name is invalid");
return returnStatus;
}
-
+
meterName = meter.getMeterName();
if (!FRMUtil.isNameValid(meterName)) {
logger.error("Meter Name is invalid %s" + meterName);
returnStatus = new Status(StatusCode.BADREQUEST, "Meter Name is invalid");
return returnStatus;
}
-
+
returnResult = doesMeterEntryExists(meter.getKey(), meterName, containerName);
-
+
if (FRMUtil.operation.ADD == operation && returnResult) {
logger.error("Record with same Meter Name exists");
returnStatus = new Status(StatusCode.BADREQUEST, "Meter record exists");
return returnStatus;
- }
- else if (!returnResult) {
+ } else if (!returnResult) {
logger.error("Group record does not exist");
returnStatus = new Status(StatusCode.BADREQUEST, "Meter record does not exist");
return returnStatus;
}
-
+
for (int i = 0; i < meter.getMeterBandHeaders().getMeterBandHeader().size(); i++) {
if (!meter.getFlags().isMeterBurst()) {
if (0 < meter.getMeterBandHeaders().getMeterBandHeader().get(i).getBurstSize()) {
logger.error("Burst size should only be associated when Burst FLAG is set");
- returnStatus = new Status(StatusCode.BADREQUEST, "Burst size should only be associated when Burst FLAG is set");
+ returnStatus = new Status(StatusCode.BADREQUEST,
+ "Burst size should only be associated when Burst FLAG is set");
break;
}
}
}
-
+
if (null != returnStatus && !returnStatus.isSuccess()) {
return returnStatus;
- }
- else {
+ } else {
BandType setBandType = null;
DscpRemark dscpRemark = null;
for (int i = 0; i < meter.getMeterBandHeaders().getMeterBandHeader().size(); i++) {
setBandType = meter.getMeterBandHeaders().getMeterBandHeader().get(i).getBandType();
- if ( setBandType instanceof DscpRemark) {
- dscpRemark = (DscpRemark)setBandType;
+ if (setBandType instanceof DscpRemark) {
+ dscpRemark = (DscpRemark) setBandType;
if (0 > dscpRemark.getRate()) {
-
+
}
- }
- else if (setBandType instanceof Drop) {
+ } else if (setBandType instanceof Drop) {
if (0 < dscpRemark.getPercLevel()) {
logger.error("Number of drop Precedence level");
- }
- }
- else if (setBandType instanceof Experimenter) {
-
+ }
+ } else if (setBandType instanceof Experimenter) {
+
}
- }
+ }
}
}
return new Status(StatusCode.SUCCESS);
}
-
+
private boolean doesMeterEntryExists(MeterKey key, String meterName, String containerName) {
- if (! originalSwMeterView.containsKey(key)) {
+ if (!originalSwMeterView.containsKey(key)) {
return false;
}
-
+
for (Entry<MeterKey, Meter> entry : originalSwMeterView.entrySet()) {
if (entry.getValue().getMeterName().equals(meterName)) {
if (entry.getValue().getContainerName().equals(containerName)) {
return false;
}
-
private RpcResult<Void> commitToPlugin(internalTransaction transaction) {
- for(Entry<InstanceIdentifier<?>, Meter> entry :transaction.additions.entrySet()) {
-
- if (!addMeter(entry.getKey(),entry.getValue()).isSuccess()) {
+ for (Entry<InstanceIdentifier<?>, Meter> entry : transaction.additions.entrySet()) {
+
+ if (!addMeter(entry.getKey(), entry.getValue()).isSuccess()) {
return Rpcs.getRpcResult(false, null, null);
}
}
- for(@SuppressWarnings("unused") Entry<InstanceIdentifier<?>, Meter> entry :transaction.updates.entrySet()) {
-
- if (!updateMeter(entry.getKey(),entry.getValue()).isSuccess()) {
+ for (@SuppressWarnings("unused")
+ Entry<InstanceIdentifier<?>, Meter> entry : transaction.updates.entrySet()) {
+
+ if (!updateMeter(entry.getKey(), entry.getValue()).isSuccess()) {
return Rpcs.getRpcResult(false, null, null);
}
}
-
- for(InstanceIdentifier<?> removal : transaction.removals) {
- /* if (!removeMeter(entry.getKey(),entry.getValue()).isSuccess()) {
- return Rpcs.getRpcResult(false, null, null);
- }*/
+
+ for (InstanceIdentifier<?> removal : transaction.removals) {
+ /*
+ * if (!removeMeter(entry.getKey(),entry.getValue()).isSuccess()) {
+ * return Rpcs.getRpcResult(false, null, null); }
+ */
}
-
+
return Rpcs.getRpcResult(true, null, null);
}
-
+
private final class internalTransaction implements DataCommitTransaction<InstanceIdentifier<?>, DataObject> {
private final DataModification<InstanceIdentifier<?>, DataObject> modification;
/**
* We create a plan which flows will be added, which will be updated and
* which will be removed based on our internal state.
- *
+ *
*/
void prepareUpdate() {
Set<Entry<InstanceIdentifier<?>, DataObject>> puts = modification.getUpdatedConfigurationData().entrySet();
for (Entry<InstanceIdentifier<?>, DataObject> entry : puts) {
- if (entry.getValue() instanceof Meter) {
- Meter Meter = (Meter) entry.getValue();
+ if (entry.getValue() instanceof Meter) {
+ Meter Meter = (Meter) entry.getValue();
preparePutEntry(entry.getKey(), Meter);
}
}
private void preparePutEntry(InstanceIdentifier<?> key, Meter meter) {
-
+
Meter original = originalSwMeterView.get(key);
if (original != null) {
// It is update for us
-
- updates.put(key, meter);
+
+ updates.put(key, meter);
} else {
// It is addition for us
-
+
additions.put(key, meter);
}
}
/**
* We are OK to go with execution of plan
- *
+ *
*/
@Override
public RpcResult<Void> finish() throws IllegalStateException {
-
+
RpcResult<Void> rpcStatus = commitToPlugin(this);
// We return true if internal transaction is successful.
- // return Rpcs.getRpcResult(true, null, Collections.emptySet());
+ // return Rpcs.getRpcResult(true, null, Collections.emptySet());
return rpcStatus;
}
/**
- *
+ *
* We should rollback our preparation
- *
+ *
*/
@Override
public RpcResult<Void> rollback() throws IllegalStateException {
// NOOP - we did not modified any internal state during
// requestCommit phase
- // return Rpcs.getRpcResult(true, null, Collections.emptySet());
+ // return Rpcs.getRpcResult(true, null, Collections.emptySet());
return Rpcs.getRpcResult(true, null, null);
-
+
}
-
+
}
private final class MeterDataCommitHandler implements DataCommitHandler<InstanceIdentifier<?>, DataObject> {
return transaction;
}
}
-
+
final class MeterEventListener implements SalMeterListener {
-
+
List<MeterAdded> addedMeter = new ArrayList<>();
List<MeterRemoved> removeMeter = new ArrayList<>();
List<MeterUpdated> updatedMeter = new ArrayList<>();
@Override
public void onMeterAdded(MeterAdded notification) {
// TODO Auto-generated method stub
-
+
}
@Override
public void onMeterRemoved(MeterRemoved notification) {
// TODO Auto-generated method stub
-
+
}
@Override
public void onMeterUpdated(MeterUpdated notification) {
// TODO Auto-generated method stub
-
- }
+
+ }
}
}