Remove unused code & fix pep8 issues 34/45134/8
authorGuo Ruijing <ruijing.guo@intel.com>
Sun, 4 Sep 2016 16:27:13 +0000 (12:27 -0400)
committerBrady Johnson <brady.allen.johnson@ericsson.com>
Tue, 6 Sep 2016 08:44:58 +0000 (10:44 +0200)
Change-Id: I4fb3807ba6edb59d42bc81cc49e818ff8610fdbe
Signed-off-by: Guo Ruijing <ruijing.guo@intel.com>
15 files changed:
sfc-py/sfc/common/services.py
sfc-py/sfc/nsh/decode.py
sfc-py/sfc/nsh/encode.py
sfc-py/sfc/sfc_agent.py
sfc-py/sfc/sff_client.py
sfc-py/unused/README.txt [deleted file]
sfc-py/unused/logging_config.py [deleted file]
sfc-py/unused/nfq_class_server.py [deleted file]
sfc-py/unused/nfq_class_thread.py [deleted file]
sfc-py/unused/odl2ovs.py [deleted file]
sfc-py/unused/odl2ovs_cli.py [deleted file]
sfc-py/unused/pysf_oldnsh.py [deleted file]
sfc-py/unused/service_classifier.py [deleted file]
sfc-py/unused/service_function.py [deleted file]
sfc-py/unused/start_sf.sh [deleted file]

index da3855e6657200d6824430159e7081a894fc0264..fdab2ffe89b8d09e43282439332accb9bfba5897 100644 (file)
@@ -19,7 +19,9 @@ from threading import Thread
 from struct import pack, unpack
 
 from ..common.sfc_globals import sfc_globals
-from ..nsh.common import *  # noqa
+from ..nsh.common import VXLANGPE, BASEHEADER, CONTEXTHEADER, ETHHEADER, TRACEREQHEADER, NSH_NEXT_PROTO_IPV4
+from ..nsh.common import PAYLOAD_START_INDEX_NSH_TYPE1, NSH_NEXT_PROTO_ETH
+from ..nsh.common import PAYLOAD_START_INDEX_NSH_TYPE3, IPV4_HEADER_LEN_BYTES
 from ..nsh import decode as nsh_decode
 from ..nsh.encode import add_sf_to_trace_pkt
 from ..nsh.service_index import process_service_index
index 272769e41b89a24bed85541fe5cefec159489352..419e9d7a32217223479bc40f2c1e069ae653d54a 100644 (file)
@@ -8,8 +8,10 @@
 import struct
 import logging
 import binascii
-from .common import *  # noqa
-
+from .common import NSH_BASE_HEADER_START_OFFSET, NSH_OAM_PKT_START_OFFSET, NSH_OAM_TRACE_HDR_LEN
+from .common import NSH_OAM_TRACE_RESP_SF_TYPE_LEN_START_OFFSET, NSH_OAM_TRACE_RESP_SF_TYPE_START_OFFSET
+from .common import NSH_TYPE1_DATA_PACKET, NSH_TYPE1_OAM_PACKET, OAM_TRACE_REQ_TYPE, OAM_TRACE_RESP_TYPE
+from .common import OAM_VERSION_AND_FLAG, VXLAN_RFC7348_HEADER, VXLAN_START_OFFSET
 
 __author__ = 'Reinaldo Penno'
 __copyright__ = 'Copyright(c) 2014, Cisco Systems, Inc.'
index acc35453b20877fda278e4d62e4519d2d6029ead..b26e36e51e10800190d733248946eaae3c823e5d 100644 (file)
@@ -11,8 +11,8 @@ import socket
 import ipaddress
 import logging
 
-from .common import *  # noqa
-
+from .common import IP4HEADER, IP_HEADER_LEN, IPV4_HEADER_LEN_BYTES, IPV4_PACKET_ID, IPV4_TOS, IPV4_TTL
+from .common import IPV4_VERSION, PSEUDO_UDPHEADER, TRACEREQHEADER, UDPHEADER, UDP_HEADER_LEN_BYTES
 
 __author__ = "Reinaldo Penno, Jim Guichard"
 __copyright__ = "Copyright(c) 2015, Cisco Systems, Inc."
index 50cd29a1011d2deeec8784c2abd10554ee686140..c9859db22ba6be320820bd816df85952b2fc5af5 100644 (file)
@@ -7,23 +7,25 @@
 
 import os
 import sys
+import logging
+import json
 import flask
 import signal
 import argparse
+from sfc.common import classifier
+from sfc.common import sfc_globals as _sfc_globals
+from sfc.common.odl_api import auto_sff_name, find_metadata, find_sff_locator, find_sf_locator
+from sfc.common.odl_api import get_metadata_from_odl, get_sffs_from_odl, get_sfp_from_odl, sf_local_host
+from sfc.common.launcher import start_sf, stop_sf, start_sff, stop_sff
+from sfc.cli import xe_cli, xr_cli, ovs_cli
 
 # fix Python 3 relative imports inside packages
 # CREDITS: http://stackoverflow.com/a/6655098/4183498
 parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 sys.path.insert(1, parent_dir)
-import sfc  # noqa
 
 __package__ = 'sfc'
 
-from sfc.common import classifier
-from sfc.common.odl_api import *  # noqa
-from sfc.cli import xe_cli, xr_cli, ovs_cli
-from sfc.common import sfc_globals as _sfc_globals
-from sfc.common.launcher import start_sf, stop_sf, start_sff, stop_sff
 
 __author__ = "Paul Quinn, Reinaldo Penno"
 __copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
index 42a16bcb0dee060b6103eff939de8b26e5436e31..a6acf5148089046aee9bd468fad2301bf00a0ea9 100644 (file)
@@ -8,24 +8,30 @@
 
 import os
 import sys
+import logging
+import binascii
 import platform
 import time
+import socket
+import ipaddress
 import getopt
 import asyncio
 
+from sfc.nsh.common import BASEHEADER, CONTEXTHEADER, ETHERNET_ADDR_SIZE, ETHHEADER, GREHEADER, InnerHeader
+from sfc.nsh.common import NSH_NEXT_PROTO_ETH, NSH_NEXT_PROTO_IPV4, OAM_TRACE_REQ_TYPE, TRACEREQHEADER, VXLAN, VXLANGPE
+
+from sfc.nsh.decode import decode_baseheader, decode_contextheader, decode_trace_resp, decode_vxlan
+
+from sfc.nsh.encode import build_nsh_eth_header, build_nsh_header, build_nsh_trace_header
+from sfc.nsh.encode import build_trace_req_header, build_udp_packet, process_context_headers
 
 # fix Python 3 relative imports inside packages
 # CREDITS: http://stackoverflow.com/a/6655098/4183498
 parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 sys.path.insert(1, parent_dir)
-import sfc  # noqa
 
 __package__ = 'sfc'
 
-from sfc.nsh.decode import *  # noqa
-from sfc.nsh.encode import *  # noqa
-
-
 __author__ = "Reinaldo Penno, Jim Guichard, Paul Quinn"
 __copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
 __version__ = "0.6"
diff --git a/sfc-py/unused/README.txt b/sfc-py/unused/README.txt
deleted file mode 100644 (file)
index c66df82..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-This folder contains files which are no longer used, but for historical
-reasons are still present.
-No updates are issued for these files anymore.
\ No newline at end of file
diff --git a/sfc-py/unused/logging_config.py b/sfc-py/unused/logging_config.py
deleted file mode 100644 (file)
index 9dcef89..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-import logging
-
-# configure sfc logger as parent logger
-console_handler = logging.StreamHandler()
-console_handler.setLevel(logging.DEBUG)
-console_handler.setFormatter(logging.Formatter('%(levelname)s[%(name)s] %(message)s'))
-
-sfc_logger = logging.getLogger('sfc')
-sfc_logger.setLevel(logging.DEBUG)
-
-sfc_logger.addHandler(console_handler)
diff --git a/sfc-py/unused/nfq_class_server.py b/sfc-py/unused/nfq_class_server.py
deleted file mode 100644 (file)
index 345e876..0000000
+++ /dev/null
@@ -1,469 +0,0 @@
-# flake8: noqa
-# hint: to see the rules execute: 'sudo iptables -S -t raw'
-
-import os
-import sys
-
-# fix PYTHONPATH
-parent_dir = os.path.dirname(os.path.abspath(__file__))
-parent_dir = os.sep.join(parent_dir.split(os.sep)[:-1])
-sys.path.append(parent_dir)
-
-import json
-import socket
-import logging
-import threading
-import subprocess
-
-from netfilterqueue import NetfilterQueue
-
-from nsh.encode import build_packet
-from nsh.common import VXLANGPE, BASEHEADER, CONTEXTHEADER
-
-
-if __name__ == '__main__':
-    from logging_config import *
-
-
-logger = logging.getLogger('sfc.nfq_class_server')
-
-NFQ_NUMBER = 1
-TUNNEL_ID = 0x0500  # TODO: add tunnel_id to sff yang model
-SUDO = True
-
-# global ref to manager
-nfq_class_server_manager = None
-
-
-def execute_cli(cli):
-    """
-    Common BASH command executor
-    """
-    if (SUDO):
-        cli = "sudo " + cli
-    logger.debug("execute_cli: %s", cli)
-    subprocess.call([cli], shell=True)
-    return
-
-
-class NfqTunnelParamsTransformer:
-    """
-    Transforms tunnel params for packet forwarder
-    """
-    def transform_tunnel_params(self, tun_params):
-        if 'context-metadata' in tun_params:
-            ctx_metadata = tun_params['context-metadata']
-            ctx_values = CONTEXTHEADER(ctx_metadata['context-header1'],
-                                       ctx_metadata['context-header2'],
-                                       ctx_metadata['context-header3'],
-                                       ctx_metadata['context-header4'])
-        else:
-            ctx_values = CONTEXTHEADER(0, 0, 0, 0)  # empty default ctx
-
-        # set path_id, starting-index to VXLAN+NSH template
-        vxlan_values = VXLANGPE(int('00000100', 2), 0, 0x894F, TUNNEL_ID, 64)
-        base_values = BASEHEADER(0x1, int('01000000', 2), 0x6, 0x1, 0x1,
-                                 tun_params['nsp'],
-                                 tun_params['starting-index'])
-
-        return {"vxlan_values": vxlan_values,
-                "base_values": base_values,
-                "ctx_values": ctx_values}
-
-
-class NfqClassifierServerManager:
-    def __init__(self, nfq_number):
-        """
-        We use mark that will be equal to path_id
-        """
-        self.nfq_number = nfq_number
-        self.nfqueue = NetfilterQueue()
-        self.tun_params_transformer = NfqTunnelParamsTransformer()
-        self.__reset()
-        #return
-
-    def __del__(self):
-        """
-        Wannabe destructor - does not work
-
-        NetfilterQueue should destroy itself properly automatically
-        """
-        self.__clear_all_rules()
-        #return
-
-    def __reset(self):
-        """
-        Private reset
-        """
-        self.__clear_all_rules()
-        #return
-
-    def __clear_all_rules(self):
-        """
-        Delete all forwarder and iptables rules
-        """
-        logger.info("Clear_all_rules: Reset iptables rules.")
-
-        # init map
-        self.path_id_2_pfw_map = {}
-
-        # clear all previous rules/sub-chains in 'raw' table
-        cli = "iptables -t raw -F"
-        execute_cli(cli)
-
-        cli = "iptables -t raw -X"
-        execute_cli(cli)
-        #return
-
-    # helper
-    def get_sub_chain_name(self, path_id):
-        return "sfp-nfq-" + str(path_id)
-
-    def __common_process_packet(self, packet):
-        """
-        Main NFQ callback for received packets
-        """
-        try:
-            logger.debug("common_process_packet: received packet=%s, mark=%d",
-                         packet, packet.get_mark())
-
-            mark = packet.get_mark()
-
-            # check
-            if mark in self.path_id_2_pfw_map:
-                packet_forwarder = self.path_id_2_pfw_map[mark]
-                packet_forwarder.process_packet(packet)
-            else:
-                logger.warn("common_process_packet: no packet forwarder for "
-                            "mark=%d, dropping the packet", mark)
-                packet.drop()
-
-            #return
-        except Exception as exc:
-            logger.exception('common_process_packet exception: %s', exc)
-
-    def bind_and_run(self):
-        """
-        Bind to queue and run listening loop
-        """
-        self.nfqueue.bind(self.nfq_number, self.__common_process_packet)
-
-        logger.info("NFQ binded to queue number %d", self.nfq_number)
-
-        self.nfqueue.run()
-        #return
-
-    def process_input(self, message_dict):
-        """
-        Apply new configuration
-        """
-        # input
-        path_id = message_dict['path-id']
-        acl = message_dict['acl']
-
-        # check if 'delete' operation
-        if acl == 'delete':
-            self.__destroy_packet_forwarder(path_id)
-            return
-
-        # additional input
-        fw_params = message_dict['forwarding-params']
-        tun_params = message_dict['tunnel-params']
-
-        # delete possible former forwarder
-        if path_id in self.path_id_2_pfw_map:
-            self.__destroy_packet_forwarder(path_id)
-        # init new forwarder
-        self.__init_new_packet_forwarder(path_id,
-                                         fw_params,
-        self.tun_params_transformer.transform_tunnel_params(tun_params))
-        # create rules
-        self.__compile_acl(acl, path_id)
-        return
-
-    def __compile_acl(self, acl_item, path_id):
-        logger.debug("__compile_acl: acl_item=%s", acl_item)
-        for ace in acl_item['access-list-entries']:
-            self.__add_iptables_classification_rule(ace, path_id)
-        return
-
-    def __init_new_packet_forwarder(self, path_id, forwarding_params,
-                                    tunnel_params):
-        sub_chain_name = self.get_sub_chain_name(path_id)
-
-        # create sub-chain for the path, this way we can in future easily
-        # remove the rules for particular path
-        cli = "iptables -t raw -N " + sub_chain_name
-        execute_cli(cli)
-
-        # insert jump to sub-chain
-        cli = "iptables -t raw -I PREROUTING -j " + sub_chain_name
-        execute_cli(cli)
-
-        # add jump to queue 'nfq_number' in case of match mark (ACL matching
-        # rules will have to be inserted before this one)
-        cli = ("iptables -t raw -A " + sub_chain_name +
-               " -m mark --mark " + str(path_id) +
-               " -j NFQUEUE --queue-num " + str(self.nfq_number))
-        execute_cli(cli)
-
-        packet_forwarder = PacketForwarder(path_id)
-
-        packet_forwarder.update_forwarding_params(forwarding_params)
-        packet_forwarder.update_tunnel_params(tunnel_params)
-
-        self.path_id_2_pfw_map[path_id] = packet_forwarder
-        return
-
-    def __destroy_packet_forwarder(self, path_id):
-        """
-        Destroy PacketForwader with iptables rules and chains
-        """
-        # check
-        assert path_id
-
-        if path_id in self.path_id_2_pfw_map:
-            logger.debug("destroy_packet_forwarder: Removing classifier for "
-                         "path_id=%d", path_id)
-
-            del self.path_id_2_pfw_map[path_id]
-
-            sub_chain_name = self.get_sub_chain_name(path_id)
-
-            # -D - delete the jump to sub-chain
-            cli = "iptables -t raw -D PREROUTING -j " + sub_chain_name
-            execute_cli(cli)
-
-            # delete rules in sub-chain
-            cli = "iptables -t raw -F  " + sub_chain_name
-            execute_cli(cli)
-
-            # delete sub-chain
-            cli = "iptables -t raw -X " + sub_chain_name
-            execute_cli(cli)
-
-            logger.info("destroy_packet_forwarder: Classifier for path_id=%d "
-                        "removed", path_id)
-        else:
-            logger.debug("destroy_packet_forwarder: Classifier for path_id=%d "
-                         "not found", path_id)
-
-    def __add_iptables_classification_rule(self, ace, path_id):
-        """
-        Create iptables matches for sending packets to NFQ of given number
-        """
-        assert ace
-        assert path_id
-
-        ace_matches = ace['matches']
-
-        # dl_src
-        dl_src = ''
-        if 'source-mac-address' in ace_matches:
-            dl_src = '-m mac --mac-source' + ace_matches['source-mac-address']
-
-            if 'source-mac-address-mask' in ace_matches:
-                logger.warn('source-mac-address-mask match not implemented')
-
-        # dl_dst
-        dl_dst = ''
-        if 'destination-mac-address' in ace_matches:
-            logger.warn('destination-mac-address match not implemented')
-
-        # nw_src/ipv6_src
-        nw_src = ''
-        if 'source-ipv4-address' in ace_matches:
-            nw_src = ' -s ' + ace_matches['source-ipv4-address']
-
-        ipv6_src = ''
-        if 'source-ipv6-address' in ace_matches:
-            # not sure about this
-            ipv6_src = ' -s ' + ace_matches['source-ipv6-address']
-
-        #nw_dst/ipv6_dst
-        nw_dst = ''
-        if 'destination-ipv4-address' in ace_matches:
-            nw_dst = ' -d ' + ace_matches['destination-ipv4-address']
-
-        ipv6_dst = ''
-        if 'destination-ipv6-address' in ace_matches:
-            # not sure about this
-            ipv6_dst = ' -d ' + ace_matches['destination-ipv6-address']
-
-        # nw_proto --- TCP/UDP ....
-        nw_proto = ''
-        if 'ip-protocol' in ace_matches:
-            if ace_matches['ip-protocol'] == 7:
-                nw_proto = ' -p tcp'
-            elif ace_matches['ip-protocol'] == 17:
-                nw_proto = ' -p udp'
-            else:
-                logger.warn('unknown ip-protocol=%d',
-                            ace_matches['ip-protocol'])
-
-        # only lower transport port dst/src supported !!!!
-        tp_dst = ''
-        if 'destination-port-range' in ace_matches:
-            if 'lower-port' in ace_matches['destination-port-range']:
-                if nw_proto == '':
-                    logger.error("add_iptables_classification_rule: "
-                                 "processing 'destination-port-range'. "
-                                 "ip-protocol must be specified")
-                    return
-
-                port = str(ace_matches['destination-port-range']['lower-port'])
-                tp_dst = ' --dport ' + port
-
-        tp_src = ''
-        if 'source-port-range' in ace_matches:
-            if 'lower-port' in ace_matches['source-port-range']:
-                if nw_proto == '':
-                    logger.error("add_iptables_classification_rule: "
-                                 "processing 'source-port-range'. "
-                                 "ip-protocol must be specified")
-                    return
-
-                port = str(ace_matches['source-port-range']['lower-port'])
-                tp_src = ' --sport ' + port
-
-        sub_chain_name = self.get_sub_chain_name(path_id)
-
-        # 'I' - insert this 'set mark' rule before the 'jump to queue' rule
-        cli = "iptables -t raw -I " + sub_chain_name
-        cli += nw_src + nw_dst + ipv6_src + ipv6_dst
-        cli += nw_proto + tp_src + tp_dst
-        cli += " -j MARK --set-mark " + str(path_id)
-
-        execute_cli(cli)
-        #return
-
-
-class PacketForwarder:
-    def __init__(self, path_id):
-        self.path_id = path_id
-        self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-        self._fw_params_set = False
-        self._tun_params_set = False
-        return
-
-    def __str__(self):
-        return "PacketForwarder: path_id=" + str(self.path_id)
-
-    def update_forwarding_params(self, forwarding_params):
-        if not forwarding_params:
-            return
-        self.sff_ip_addr = forwarding_params['ip']
-        self.sff_port = forwarding_params['port']
-        self._fw_params_set = True
-        return
-
-    def update_tunnel_params(self, tunnel_params):
-        if not tunnel_params:
-            return
-        self.vxlan_values = tunnel_params['vxlan_values']
-        self.base_values = tunnel_params['base_values']
-        self.ctx_values = tunnel_params['ctx_values']
-        self._tun_params_set = True
-        return
-
-    def process_packet(self, orig_packet):
-        # check
-        if not self._fw_params_set:
-            logger.error('process_packet: Forwarding params not set for '
-                         'path_id=%d', self.path_id)
-            return
-
-        if not self._tun_params_set:
-            logger.error('process_packet: Tunnel params not set for '
-                         'path_id=%d', self.path_id)
-            return
-
-        logger.debug('process_packet: Forwarding packet to %s:%d',
-                     self.sff_ip_addr, self.sff_port)
-
-        orig_payload = orig_packet.get_payload()
-        vxlan_packet = build_packet(self.vxlan_values,
-                                    self.base_values,
-                                    self.ctx_values) + orig_payload
-
-        self.socket.sendto(vxlan_packet, (self.sff_ip_addr, self.sff_port))
-        # ! drop original packet
-        orig_packet.drop()
-        return
-
-
-# global procedures
-def start_nfq_class_server_manager():
-    global nfq_class_server_manager
-
-    if nfq_class_server_manager:
-        logger.error('Nfq classifier already started!')
-        return
-
-    nfq_class_server_manager = NfqClassifierServerManager(NFQ_NUMBER)
-    nfq_class_server_manager.bind_and_run()
-    return
-
-
-# starts nfq thread and listens on socket
-def nfq_class_server_start():
-    global nfq_class_server_manager
-
-    logger.info('starting thread for NetfilterQueue.run()')
-    t = threading.Thread(target=start_nfq_class_server_manager, args=())
-    t.daemon = True
-    t.start()
-
-    # create and listen on stream socket
-    logger.info('creating socket')
-
-    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-    try:
-        os.remove("/tmp/nfq-class.sock")
-    except OSError:
-        pass
-
-    s.bind("/tmp/nfq-class.sock")
-    # allow not root access
-    execute_cli('chmod 777 /tmp/nfq-class.sock')
-
-    logger.info('listening on socket')
-    while True:
-        s.listen(1)
-        conn, _ = s.accept()
-
-        message = ""
-        message_dict = None
-        try:
-            # collect message
-            while True:
-                # buffer
-                data = conn.recv(1024)
-                if not data:
-                    # end of stream
-                    break
-
-                message = message + data.decode()
-
-            # convert received message
-            logger.debug('socket received message: %s', message)
-            message_dict = json.loads(message)
-        except:
-            logger.exception("exception while receiving data, message %s not "
-                             "applied", message)
-            break
-
-        try:
-            # apply message
-            nfq_class_server_manager.process_input(message_dict)
-        except:
-            logger.exception("exception while applying message %s", message)
-            break
-
-    conn.close()
-    #return
-
-
-# launch main loop
-if __name__ == '__main__':
-    nfq_class_server_start()
diff --git a/sfc-py/unused/nfq_class_thread.py b/sfc-py/unused/nfq_class_thread.py
deleted file mode 100644 (file)
index 4ee7b71..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-# hint: to see the rules execute: 'sudo iptables -S -t raw'
-
-
-import os
-import json
-import socket
-import logging
-import threading
-import subprocess
-
-from sfc_agent import find_sff_locator
-from common.sfc_globals import sfc_globals
-from classifier.nfq_class_server import nfq_class_server_start
-
-
-logger = logging.getLogger('sfc.nfq_class_thread')
-
-nfq_class_manager = None
-
-
-def get_nfq_class_manager_ref():
-    global nfq_class_manager
-    return nfq_class_manager
-
-
-# TODO: implement class like this like global helper class managing path infos
-class NfqPathInfoSupplier:
-    def __init__(self):
-        self.path_name_2_id_map = {}
-        self.path_id_2_info_map = {}
-
-    def get_path_id(self, path_name):
-        if path_name in self.path_name_2_id_map:
-            return self.path_name_2_id_map[path_name]
-
-        if self.__add_path_info(path_name):
-            # call this one once more
-            return self.get_path_id(path_name)
-        else:
-            logger.warn('get_path_id: path not found (path_name=%s)',
-                        path_name)
-            return None
-
-    def delete_path_info(self, path_id):
-        # remove data from maps for given path
-        if path_id in self.path_id_2_info_map:
-            path_item = self.path_id_2_info_map.pop(path_id)
-            path_name = path_item['name']
-
-            if path_name in self.path_name_2_id_map:
-                self.path_name_2_id_map.pop(path_name)
-                return True
-        else:
-            logger.debug('delete_path_info: path not found (path_id=%d)',
-                         path_id)
-
-        return False
-
-    def __add_path_info(self, path_name):
-        """
-        Returns True if path_item was found in global path data
-        """
-        if not sfc_globals.get_path():
-            logger.warn('__add_path_info: No path data')
-            return False
-
-        if path_name in sfc_globals.get_path():
-            path_item = sfc_globals.get_path()[path_name]
-            path_id = path_item['path-id']
-            self.path_name_2_id_map[path_name] = path_id
-            self.path_id_2_info_map[path_id] = path_item
-            return True
-
-        return False
-
-    def get_forwarding_params(self, path_id):
-        # assuming info already added by requesting path_id before
-        if path_id not in self.path_id_2_info_map:
-            logger.warn('get_forwarding_params: path data not found for '
-                        'path_id=%d', path_id)
-            return None
-
-        path_item = self.path_id_2_info_map[path_id]
-
-        # string ref for sff for first hop
-        sff_name = (path_item['rendered-service-path-hop']
-                             [0]
-                             ['service-function-forwarder'])
-
-        sff_locator = find_sff_locator(sff_name)
-        if not sff_locator:
-            logger.warn('get_forwarding_params: sff data not found for '
-                        'sff_name=%s', sff_name)
-
-        return sff_locator
-
-    def get_tunnel_params(self, path_id):
-        # assuming info already added by requesting path_id before
-        if path_id not in self.path_id_2_info_map:
-            logger.warn('get_tunnel_params: path data not found for '
-                        'path_id=%d', path_id)
-            return None
-
-        path_item = self.path_id_2_info_map[path_id]
-
-        result = {}
-        result['nsp'] = path_id
-        result['starting-index'] = path_item['starting-index']
-
-        if 'context-metadata' in path_item:
-            result['context-metadata'] = path_item['context-metadata']
-
-        return result
-
-
-class NfqClassifierManager:
-    # we use packet-mark that will be equal to path_id
-    def __init__(self):
-        self.path_info_supp = NfqPathInfoSupplier()
-        return
-
-    # compile_one_acl
-    # !assumed! all aces in alc_item are for one and only path
-    def compile_one_acl(self, acl_item):
-        logger.debug("compile_one_acl: acl_item=%s", acl_item)
-
-        # add error info to this dictionary
-        collected_results = {}
-
-        first_ace = acl_item['access-list-entries'][0]
-        path_name = (first_ace['actions']
-                              ['service-function-acl:rendered-service-path'])
-
-        path_id = self.path_info_supp.get_path_id(path_name)
-        if not path_id:
-            logger.error("compile_one_acl: path_id not found for path_name=%s",
-                         path_name)
-
-            collected_results[path_name] = 'Path data not found'
-            return collected_results
-
-        logger.debug("compile_one_acl: found path_id=%d", path_id)
-
-        # ip, port
-        fw_params = self.path_info_supp.get_forwarding_params(path_id)
-        # nsp, starting-index, context-metadata
-        tn_params = self.path_info_supp.get_tunnel_params(path_id)
-
-        data = {
-            'path-id': path_id,
-            'acl': acl_item,                 # only entries for this path
-            'forwarding-params': fw_params,
-            'tunnel-params': tn_params
-        }
-
-        jsonified_data = json.dumps(data)
-
-        try:
-            socka = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-            socka.connect("/tmp/nfq-class.sock")
-            logger.debug("compile_one_acl: sending to socket: %s",
-                         jsonified_data)
-            socka.send(jsonified_data.encode())
-            socka.close()
-        except:
-            msg = 'Error sending data to nfq classifier server'
-            collected_results[path_name] = msg
-            logger.exception('data was not sent: exception:')
-
-        # return info about unsuccess
-        return collected_results
-
-
-def __start_nfq_classifier_separate_script():
-    parent_dir = os.path.dirname(os.path.abspath(__file__))
-    nfq_class_server_path = os.path.join(parent_dir, 'nfq_class_server.py')
-
-    cli = "sudo python3.4 %s" % nfq_class_server_path
-
-    logger.info("start_nfq_classifier_separate_script cli: %s", cli)
-    subprocess.call([cli], shell=True)
-
-    return
-
-
-# globals
-def start_nfq_classifier(start_server_as_separate_script):
-    logger.info('starting nfq classifier server')
-
-    thread = None
-    if not start_server_as_separate_script:
-        thread = threading.Thread(target=nfq_class_server_start, args=())
-    else:
-        thread = threading.Thread(
-            target=__start_nfq_classifier_separate_script, args=())
-
-    thread.daemon = True
-    thread.start()
-
-    global nfq_class_manager
-
-    if nfq_class_manager:
-        logger.error('Nfq classifier already started!')
-        return
-
-    nfq_class_manager = NfqClassifierManager()
-    return
diff --git a/sfc-py/unused/odl2ovs.py b/sfc-py/unused/odl2ovs.py
deleted file mode 100644 (file)
index 3c9cdc5..0000000
+++ /dev/null
@@ -1,689 +0,0 @@
-#
-# Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
-#
-# This program and the accompanying materials are made available under the
-# terms of the Eclipse Public License v1.0 which accompanies this distribution,
-# and is available at http://www.eclipse.org/legal/epl-v10.html
-
-import logging
-import socket
-
-from flask import *  # noqa
-from random import randint
-import sys
-import getopt
-import json
-import requests
-from odl2ovs_cli import *  # noqa
-
-__author__ = "Paul Quinn, Reinaldo Penno"
-__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
-__version__ = "0.2"
-__email__ = "paulq@cisco.com, rapenno@gmail.com"
-__status__ = "alpha"
-
-""" SFF REST Server. This Server should be co-located with a OVS switch """
-
-app = Flask(__name__)
-my_topo = {}
-
-sff_topo = {}
-path = {}
-
-# ODL IP:port
-ODLIP = "127.0.0.1:8181"
-# Static URLs for testing
-SF_URL = "http://" + ODLIP + "/restconf/config/service-function:service-functions/"
-SFC_URL = "http://" + ODLIP + "/restconf/config/service-function-chain:service-function-chains/"
-SFF_URL = "http://" + ODLIP + "/restconf/config/service-function-forwarder:service-function-forwarders/"
-SFT_URL = "http://" + ODLIP + "/restconf/config/service-function-type:service-function-types/"
-SFP_URL = "http://" + ODLIP + "/restconf/config/service-function-path:service-function-paths/"
-
-SFF_PARAMETER_URL = "http://{}/restconf/config/service-function-forwarder:service-function-forwarders/"
-
-SFF_NAME_PARAMETER_URL = "http://{}/restconf/config/service-function-forwarder:service-function-forwarders/service-function-forwarder/{}"  # noqa
-
-USERNAME = "admin"
-PASSWORD = "admin"
-
-logger = logging.getLogger(__name__)
-
-
-def sffinit():
-    """
-    This function is used when testing without actual OVS switch
-    :return:
-    """
-
-    sff_topo_init = {
-        "service-function-forwarders": {
-            "service-function-forwarder": [
-                {
-                    "name": "SFF1",
-                    "service-node": "OVSDB1",
-                    "sff-data-plane-locator": [
-                        {
-                            "name": "eth0",
-                            "service-function-forwarder-ovs:ovs-bridge": {
-                                "bridge-name": "br-tun",
-                                "uuid": "4c3778e4-840d-47f4-b45e-0988e514d26c"
-                            },
-                            "data-plane-locator": {
-                                "port": 4789,
-                                "ip": "10.100.100.1",
-                                "transport": "service-locator:vxlan-gpe"
-                            }
-                        }
-                    ],
-                    "rest-uri": "http://198.18.134.23",
-                    "service-function-dictionary": [
-                        {
-                            "name": "SF1",
-                            "type": "dp1",
-                            "sff-sf-data-plane-locator": {
-                                "port": 4789,
-                                "ip": "10.1.1.4",
-                                "transport": "service-locator:vxlan-gpe",
-                                "service-function-forwarder-ovs:ovs-bridge": {
-                                    "bridge-name": "br-int"
-                                }
-                            }
-                        },
-                        {
-                            "name": "SF2",
-                            "type": "napt44",
-                            "sff-sf-data-plane-locator": {
-                                "port": 4789,
-                                "ip": "10.1.1.5",
-                                "transport": "service-locator:vxlan-gpe",
-                                "service-function-forwarder-ovs:ovs-bridge": {
-                                    "bridge-name": "br-int"
-                                }
-                            }
-                        }
-                    ],
-                    "classifier": "acl-sfp-1",
-                    "ip-mgmt-address": "198.18.134.23"
-                },
-                {
-                    "name": "SFF2",
-                    "service-node": "OVSDB2",
-                    "sff-data-plane-locator": [
-                        {
-                            "name": "eth0",
-                            "service-function-forwarder-ovs:ovs-bridge": {
-                                "bridge-name": "br-tun",
-                                "uuid": "fd4d849f-5140-48cd-bc60-6ad1f5fc0a0"
-                            },
-                            "data-plane-locator": {
-                                "port": 4789,
-                                "ip": "10.100.100.2",
-                                "transport": "service-locator:vxlan-gpe"
-                            }
-                        }
-                    ],
-                    "rest-uri": "http://198.18.134.23",
-                    "service-function-dictionary": [
-                        {
-                            "name": "SF3",
-                            "type": "firewall",
-                            "sff-sf-data-plane-locator": {
-                                "port": 4789,
-                                "ip": "10.1.2.6",
-                                "transport": "service-locator:vxlan-gpe",
-                                "service-function-forwarder-ovs:ovs-bridge": {
-                                    "bridge-name": "br-int"
-                                }
-                            }
-                        }
-                    ],
-                    "ip-mgmt-address": "198.18.134.24"
-                }
-            ]
-        }
-    }
-
-    return sff_topo_init
-
-
-def pathinit():
-    """
-    This function is used when testing without actual OVS switch
-    :return:
-    """
-    path_init = {
-        "service-function-paths": {
-            "service-function-path": [
-                {
-                    "name": "Path-1-SFC1",
-                    "path-id": 1,
-                    "starting-index": 3,
-                    "service-chain-name": "SFC1",
-                    "service-path-hop": [
-                        {
-                            "hop-number": 0,
-                            "service-function-name": "SF1",
-                            "service_index": 3,
-                            "service-function-forwarder": "SFF1"
-                        },
-                        {
-                            "hop-number": 1,
-                            "service-function-name": "SF2",
-                            "service_index": 2,
-                            "service-function-forwarder": "SFF1"
-                        },
-                        {
-                            "hop-number": 2,
-                            "service-function-name": "SF3",
-                            "service_index": 1,
-                            "service-function-forwarder": "SFF2"
-                        }
-                    ]
-                }
-            ]
-        }
-    }
-
-    return path_init
-
-
-# the following dictionaries are for testing only.  Remove when running on OVS.
-def get_bridge_info():
-    b1 = {
-        'status': '{}',
-        'fail_mode': '[]',
-        'datapath_id': '"0000e21a84dd0c4c"',
-        'datapath_type': '""',
-        'sflow': '[]',
-        'mirrors': '[]',
-        'ipfix': '[]',
-        '_uuid': 'dd841ae1-0a6e-4c0c-b24c-059e7b0b87f8',
-        'other_config': '{}',
-        'flood_vlans': '[]',
-        'stp_enable': 'false',
-        'controller': '[]',
-        'mcast_snooping_enable': 'false',
-        'flow_tables': '{}',
-        'ports': '[60ce3635-70d2-4c48-98f6-cefd65ab0e58]',
-        'external_ids': '{bridge-id="SFF1"}',
-        'netflow': '[]',
-        'protocols': '[]',
-        'name': '"br-int"'
-    }
-
-    b2 = {
-        'status': '{}',
-        'fail_mode': '[]',
-        'datapath_id': '"000052f810c06148"',
-        'datapath_type': '""',
-        'sflow': '[]',
-        'mirrors': '[]',
-        'ipfix': '[]',
-        '_uuid': 'c010f853-5c8a-4861-9e53-050981fbc121',
-        'other_config': '{}',
-        'flood_vlans': '[]',
-        'stp_enable': 'false',
-        'controller': '[]',
-        'mcast_snooping_enable': 'false',
-        'flow_tables': '{}',
-        'ports': '[4a194fdd-ed59-47cf-998b-7c996c46e3e6]',
-        'external_ids': '{}',
-        'netflow': '[]',
-        'protocols': '[]',
-        'name': '"br-tun"'
-    }
-
-    # br_list = []
-    br_dict_list = []
-    # bc = 0
-    # br_dict={}
-
-    # bridges = subprocess.check_output(['ovs-vsctl', 'list-br'])
-
-    # for line in bridges.split('\n'):
-    # br_list.append(line)
-
-    # while bc < (len(br_list) - 1):
-    # b = subprocess.check_output(['ovs-vsctl', 'list', 'bridge', br_list[b]])
-    # for row in b.split('\n'):
-    # if ': ' in row:
-    # key, value = row.split(': ')
-    # br_dict[key.strip()] = value.strip()
-    #  br_dict_list.append(br_dict)
-    #  b = b+1
-
-    # test code
-
-    br_dict_list.append(b1)
-    br_dict_list.append(b2)
-    return br_dict_list
-
-
-# This function does not work if machine has more than one IP/interface
-def get_my_ip():
-    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-    s.connect(('8.8.8.8', 80))
-    myip = (s.getsockname()[0])
-    s.close()
-    myip = "http://" + myip + ":/paths"
-    return myip
-
-
-def parse_bridges(bridge_list):
-    # num_of_bridges = len(bridge_list)
-    all_bridges = []
-    br_dict = {}
-
-    for bridge in bridge_list:
-        if bridge['name'] == '"br-tun"' or '"br-int"':
-            br_dict = {
-                'name': bridge['name'][1:-1],
-                'external_ids': bridge['external_ids'], 'uuid': bridge['_uuid']
-            }
-        all_bridges.append(br_dict)
-    return all_bridges
-
-
-# Not used anymore
-def who_am_i(path, bridges):
-    for path in path['service-function-paths']['service-function-path']:
-        for sff in path['service-path-hop']:
-            for bridge in bridges:
-                if sff['service-function-forwarder'] == bridge['external_ids'][12:-2]:
-                    return sff['service-function-forwarder']
-
-
-def who_am_i_sfp(service_path):
-    """
-    Determines the name of the local attached SFF
-    :param service_path: A single Service Function Path
-    :return: The name of the local attached SFF
-    """
-    ovsbridges = get_bridge_info()
-    bridges = parse_bridges(ovsbridges)
-    for sff in service_path['service-path-hop']:
-        for bridge in bridges:
-            if sff['service-function-forwarder'] == bridge['external_ids'][12:-2]:
-                return sff['service-function-forwarder']
-    return None
-
-
-def who_am_i_sff():
-    """
-    Determines the name of the local attached SFF by checking
-    against the collections of all known SFFs
-    :return: The name of the local attached SFF
-    """
-    ovsbridges = get_bridge_info()
-    bridges = parse_bridges(ovsbridges)
-    for bridge in bridges:
-        if bridge['external_ids'][12:-2] in sff_topo.keys():
-            return bridge['external_ids'][12:-2]
-    return None
-
-
-# Not used anymore
-def build_a_path(path, my_sff):
-    # me = 'SFF-bootstrap'
-    sflist = []
-    nextsff = {}
-    sfdict = {}
-    count = 0
-    pid = 0
-
-    for path in path['service-function-paths']['service-function-path']:
-        pid = path['path-id']
-        for sf in path['service-path-hop']:
-            if sf['service-function-forwarder'] == my_sff:
-                sfdict['sff'] = sf['service-function-forwarder']
-                sfdict['pid'] = path['path-id']
-                sfdict['name'] = sf['service-function-name']
-                sfdict['index'] = sf['service_index']
-                find_sf_loc(sfdict)
-                sflist.append(sfdict)
-                sfdict = {}
-                count += 1
-    nextsff['sff-name'] = path['service-path-hop'][count]['service-function-forwarder']
-    nextsff['sff-index'] = path['service-path-hop'][count]['service_index']
-    nextsffloc = find_sff_loc(nextsff)
-    return sflist, nextsffloc, nextsff, my_sff, pid
-
-
-def build_service_path(service_path, my_sff_name):
-    """
-    Builds a dictionary of the local attached Service Functions
-    :param path: A single Service Function Path
-    :param my_sff_name: The name of the local attached SFF
-    :return:
-    """
-    # my_sff = 'SFF-bootstrap'
-    sflist = []
-    nextsff = {}
-    sfdict = {}
-    count = 0
-
-    for service_hop in service_path['service-path-hop']:
-        if service_hop['service-function-forwarder'] == my_sff_name:
-            sfdict['sff'] = service_hop['service-function-forwarder']
-            sfdict['pid'] = service_path['path-id']
-            sfdict['name'] = service_hop['service-function-name']
-            sfdict['index'] = service_hop['service_index']
-            sfdict['locator'] = find_sf_locator(sfdict['name'], sfdict['sff'])
-            if sfdict['locator'] is None:
-                logger.error("Could not find data plane locator for SF: %s", sfdict['name'])
-            sflist.append(sfdict)
-            sfdict = {}
-            count += 1
-    nextsff['sff-name'] = service_path['service-path-hop'][count]['service-function-forwarder']
-    nextsff['sff-index'] = service_path['service-path-hop'][count]['service_index']
-    nextsffloc = find_sff_locator(nextsff['sff-name'])
-    if nextsffloc is None:
-        logger.error("Could not find data plane locator for SFF: %s", nextsff['sff-name'])
-    return sflist, nextsffloc, nextsff
-
-
-def find_sf_locator(sf_name, sff_name):
-    """
-    Looks for the SF name  within the service function
-    dictionary of sff_name. If found, return the
-    corresponding data plane locator
-
-    :param sfdict: A dictionary with a single SF attributes
-    :return: SF data plane locator
-    """
-    service_dictionary = sff_topo[sff_name]['service-function-dictionary']
-    for service_function in service_dictionary:
-        if sf_name == service_function['name']:
-            return service_function['sff-sf-data-plane-locator']['ip']
-    return None
-
-
-def find_sff_locator(sff_name):
-    """
-    For a given SFF name, look into local SFF topology for a match
-    and returns the corresponding data plane locator
-    :param sff_name:
-    :return: SFF data plane locator
-    """
-    try:
-        return sff_topo[sff_name]['sff-data-plane-locator'][0]['data-plane-locator']['ip']
-    except KeyError:
-        msg = "SFF {} locator not found".format(sff_name)
-        logger.warning(msg)
-        return None
-    except:
-        logger.warning("Unexpected exception, re-raising it")
-        raise
-
-
-# Not used anymore
-def find_sff_loc(sff):
-    for sffi in sff_topo['service-function-forwarders']['service-function-forwarder']:
-        if sffi['name'] == sff['sff-name']:
-            return sffi['sff-data-plane-locator'][0]['data-plane-locator']['ip']
-
-
-# Not used anymore
-def find_sf_loc(sfdict):
-    count = 0
-    while count < len(sff_topo['service-function-forwarders']['service-function-forwarder']):
-        if sff_topo['service-function-forwarders']['service-function-forwarder'][count]['name'] == sfdict['sff']:
-            for sfi in (sff_topo['service-function-forwarders']
-                        ['service-function-forwarder']
-                        [count]['service-function-dictionary']):
-                if sfdict['name'] == sfi['name']:
-                    sfdict['locator'] = sfi['sff-sf-data-plane-locator']['ip']
-        count += 1
-        return
-
-
-def mytopo(nextsffloc, vxlanid):
-    global my_topo
-    if nextsffloc in my_topo.keys():
-        return my_topo[nextsffloc]
-    else:
-        vxlan = 'vxlan' + str(vxlanid)
-        my_topo[nextsffloc] = vxlan
-        vxlanid += 1
-        return vxlanid
-
-
-def cli():
-    global path
-    global sff_topo
-    path = pathinit()
-    sff_topo = sffinit()
-    ovsbridges = get_bridge_info()
-    bridge_info = parse_bridges(ovsbridges)
-    my_sff = who_am_i(path, bridge_info)
-    vxlanid = 0
-    key = hex(randint(1, 16777216))
-    build_a_path(path, my_sff)
-    mysflist, nextsffloc, nextsff, my_sff, pid = build_a_path(path, my_sff)
-    vxlanid = mytopo(nextsffloc, vxlanid)
-    vxlanid = cli_local(mysflist, vxlanid, key)
-    cli_nextsff(nextsffloc, nextsff, key, vxlanid, pid)
-    return
-
-
-# Not used anymore
-def ovsbuildit(path):
-    print "BUILDING CHAIN..."
-    ovsbridges = get_bridge_info()
-    bridge_info = parse_bridges(ovsbridges)
-    my_sff = who_am_i(path, bridge_info)
-    # my_topo = {}
-    vxlanid = 0
-    key = hex(randint(1, 16777216))
-
-    mysflist, nextsffloc, nextsff, me, pid = build_a_path(path, my_sff)
-    my_topo, vxlanid = mytopo(nextsffloc, vxlanid)
-    vxlanid = ovs_cli_local(mysflist, vxlanid, key)
-    ovs_cli_nextsff(nextsffloc, nextsff, key, vxlanid, pid)
-    return
-
-
-def ovsbuild_one_path(service_path):
-    """
-    :param path: A single Service Function Path
-    :return: Nothing
-    """
-    logger.info("BUILDING CHAIN...")
-    my_sff_name = who_am_i_sfp(service_path)
-    if my_sff_name is None:
-        logger.info("Service path does not contain local SFF")
-        return
-    # Is this correct?
-    vxlanid = 0
-    key = hex(randint(1, 16777216))
-
-    mysflist, nextsffloc, nextsff = build_service_path(service_path, my_sff_name)
-    vxlanid = mytopo(nextsffloc, vxlanid)
-    vxlanid = ovs_cli_local(mysflist, vxlanid, key)
-    pid = mysflist[0]['pid']
-    ovs_cli_nextsff(nextsffloc, nextsff, key, vxlanid, pid)
-    return
-
-
-@app.route('/config/service-function-path:service-function-paths/', methods=['GET'])
-def get_paths():
-    return jsonify({'Service paths': path})
-
-
-@app.route('/config/service-function-forwarder:service-function-forwarders/', methods=['GET'])
-def get_sffs():
-    return jsonify({'SFFs': sff_topo})
-
-
-@app.route('/config/service-function-path:service-function-paths/', methods=['PUT'])
-def create_paths():
-    global path
-    if not request.json:
-        abort(400)
-    else:
-        path = {
-            'service-function-paths': request.json['service-function-paths']
-        }
-    if any(sff_topo):
-        ovsbuildit(path)
-    return jsonify({'path': path}), 201
-
-
-@app.route('/config/service-function-path:service-function-paths/service-function-path/<sfpname>', methods=['PUT'])
-def create_path(sfpname):
-    global path
-    if not request.json:
-        abort(400)
-    else:
-        # print json.dumps(sfpjson)
-        # sfpj_name = sfpjson["service-function-path"][0]['name']
-        path[sfpname] = request.get_json()["service-function-path"][0]
-
-    if any(sff_topo):
-        ovsbuild_one_path(path[sfpname])
-    return jsonify({'path': path}), 201
-
-
-@app.route('/config/service-function-path:service-function-paths/service-function-path/<sfpname>', methods=['DELETE'])
-def delete_path(sfpname):
-    global path
-    try:
-        del path[sfpname]
-    except KeyError:
-        msg = "SFP name {} not found, message".format(sfpname)
-        logger.warning(msg)
-        return msg, 404
-    except:
-        logger.warning("Unexpected exception, re-raising it")
-        raise
-    return '', 204
-
-
-@app.route('/config/service-function-forwarder:service-function-forwarders/service-function-forwarder/<sffname>',
-           methods=['PUT'])
-def create_sff(sffname):
-    global sff_topo
-    if not request.json:
-        abort(400)
-    else:
-        sff_topo[sffname] = request.get_json()['service-function-forwarder'][0]
-    if any(path):
-        ovsbuild_one_path(path)
-    return jsonify({'sff': sff_topo}), 201
-
-
-@app.route('/config/service-function-forwarder:service-function-forwarders/service-function-forwarder/<sffname>',
-           methods=['DELETE'])
-def delete_sff(sffname):
-    global sff_topo
-    try:
-        del sff_topo[sffname]
-    except KeyError:
-        msg = "SFF name {} not found, message".format(sffname)
-        logger.warning(msg)
-        return msg, 404
-    except:
-        logger.warning("Unexpected exception, re-raising it")
-        raise
-    return '', 204
-
-
-@app.route('/config/service-function-forwarder:service-function-forwarders/', methods=['PUT'])
-def create_sffs():
-    global sff_topo
-    if not request.json:
-        abort(400)
-    else:
-        sff_topo = {
-            'service-function-forwarders': request.json['service-function-forwarders']
-        }
-    if any(path):
-        ovsbuildit(path)
-    return jsonify({'sff': sff_topo}), 201
-
-
-@app.route('/config/service-function-forwarder:service-function-forwarders/', methods=['DELETE'])
-def delete_sffs():
-    global sff_topo
-    sff_topo = {}
-    return jsonify({'sff': sff_topo}), 201
-
-
-@app.errorhandler(404)
-def page_not_found(e):
-    return render_template('404.html'), 404
-
-
-def get_sffs_from_odl(odl_ip_port):
-    """
-    Retrieves the list fo configured SFFs from ODL
-    :return: Nothing
-    """
-    global sff_topo
-    s = requests.Session()
-    print ("Getting SFF information from ODL... \n")
-    r = s.get(SFF_PARAMETER_URL.format(odl_ip_port), stream=False, auth=(USERNAME, PASSWORD))
-    if r.status_code == 200:
-        sff_json = json.loads(r.text)['service-function-forwarders']['service-function-forwarder']
-        for sff in sff_json:
-            sff_topo[sff['name']] = sff
-    else:
-        print ("=>Failed to GET SFF from ODL \n")
-
-
-def get_sff_from_odl(odl_ip_port, sff_name):
-    """
-    Retrieves the list fo configured SFFs from ODL
-    :return: Nothing
-    """
-    global sff_topo
-    s = requests.Session()
-    print ("Getting SFF information from ODL... \n")
-    r = s.get(SFF_PARAMETER_URL.format(odl_ip_port, sff_name), stream=False, auth=(USERNAME, PASSWORD))
-    if r.status_code == 200:
-        sff_topo[sff_name] = request.get_json()['service-function-forwarder'][0]
-    else:
-        print ("=>Failed to GET SFF from ODL \n")
-
-
-def main(argv):
-    global ODLIP
-    try:
-        logging.basicConfig(level=logging.INFO)
-        opt, args = getopt.getopt(argv, "hrc", ["help", "rest", "cli", "odl-get-sff", "odl-ip-port="])
-    except getopt.GetoptError:
-        print 'rest2ovs --help | --rest | --cli | --odl-get-sff | --odl-ip-port'
-        sys.exit(2)
-
-    odl_get_sff = False
-    rest = False
-    for opt, arg in opt:
-        if opt == "--odl-get-sff":
-            odl_get_sff = True
-            continue
-
-        if opt == "--odl-ip-port":
-            ODLIP = arg
-            continue
-
-        if opt in ('-h', '--help'):
-            print 'rest2ovs -m cli | rest --odl-get-sff --odl-ip-port'
-            sys.exit()
-
-        if opt in ('-c', '--cli'):
-            cli()
-            sys.exit()
-
-        if opt in ('-r', '--rest'):
-            rest = True
-
-    if odl_get_sff:
-        get_sffs_from_odl(ODLIP)
-
-    if rest:
-        app.debug = True
-        app.run(host='0.0.0.0')
-
-
-if __name__ == "__main__":
-    main(sys.argv[1:])
diff --git a/sfc-py/unused/odl2ovs_cli.py b/sfc-py/unused/odl2ovs_cli.py
deleted file mode 100644 (file)
index 7311510..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-# Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
-#
-# This program and the accompanying materials are made available under the
-# terms of the Eclipse Public License v1.0 which accompanies this distribution,
-# and is available at http://www.eclipse.org/legal/epl-v10.html
-
-import subprocess
-
-
-__author__ = "Paul Quinn, Reinaldo Penno"
-__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
-__version__ = "0.2"
-__email__ = "paulq@cisco.com, rapenno@gmail.com"
-__status__ = "alpha"
-
-
-def cli_local(sfdpinfo, vxlan, key):
-    sfi = 0
-    if len(sfdpinfo) > 1:
-        for sf in sfdpinfo:
-            part1 = 'ovs-vsctl add-port br-int ' + 'vxlan' + str(vxlan) + ' -- set interface vxlan' + str(vxlan) + \
-                    ' options:dst_port=6633 type=vxlan options:remote_ip=' + sfdpinfo[sfi]['locator']
-            part2 = ' options:key=' + str(key) + ' options:nsp=' + str(sfdpinfo[sfi]['pid']) + ' options:nsi=' + \
-                    str(sfdpinfo[sfi]['index'])
-            cli = part1 + part2
-            print cli
-            # subprocess.call([cli], shell=True)
-            vxlan += 1
-            sfi += 1
-    else:
-        print "No locally attached services on SFF"
-    return vxlan
-
-
-def ovs_cli_local(sfdpinfo, vxlan, key):
-    sfi = 0
-    if len(sfdpinfo) > 1:
-        for sf in sfdpinfo:
-            part1 = 'ovs-vsctl add-port br-int ' + 'vxlan' + str(vxlan) + '-- set interface vxlan' + str(vxlan) + \
-                    ' options:dst_port=6633 type=vxlan options:remote_ip=' + sfdpinfo[sfi]['locator']
-            part2 = ' options:key=' + str(key) + ' options:nsp=' + str(sfdpinfo[sfi]['pid']) + ' options:nsi=' + \
-                    str(sfdpinfo[sfi]['index'])
-            cli = part1 + part2
-            subprocess.call([cli], shell=True)
-            vxlan += 1
-            sfi += 1
-            print cli
-    else:
-        print "No locally attached services on SFF"
-    return vxlan
-
-
-def cli_nextsff(nextsffloc, nextsff, key, vxlan, pid):
-    part1 = 'ovs-vsctl add-port br-tun ' + 'vxlan' + str(vxlan) + ' -- set interface vxlan' + str(vxlan) + \
-            ' options:dst_port=6633 type=vxlan options:remote_ip=' + nextsffloc
-    part2 = ' options:key=' + str(key) + ' options:nsp=' + str(pid) + ' options:nsi=' + \
-            str(nextsff['sff-index'])
-    cli = part1 + part2
-    print cli
-    # subprocess.call([cli], shell=True)
-    vxlan += 1
-    return
-
-
-def ovs_cli_nextsff(nextsffloc, nextsff, key, vxlan, path):
-    part1 = 'ovs-vsctl add-port br-tun ' + 'vxlan' + str(vxlan) + ' -- set interface vxlan' + str(vxlan) + \
-            ' options:dst_port=6633 type=vxlan options:remote_ip=' + nextsffloc
-    part2 = ' options:key=' + str(key) + ' options:nsp=' + str(path) + ' options:nsi=' + str(nextsff['sff-index'])
-    cli = part1 + part2
-
-    print cli
-    subprocess.call([cli], shell=True)
-
-    vxlan += 1
-    return
diff --git a/sfc-py/unused/pysf_oldnsh.py b/sfc-py/unused/pysf_oldnsh.py
deleted file mode 100644 (file)
index 9c6a60d..0000000
+++ /dev/null
@@ -1,231 +0,0 @@
-#
-# Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
-#
-# This program and the accompanying materials are made available under the
-# terms of the Eclipse Public License v1.0 which accompanies this distribution,
-# and is available at http://www.eclipse.org/legal/epl-v10.html
-
-import argparse
-import asyncio
-import sys
-import struct
-import socket
-import binascii
-from ctypes import *  # noqa
-
-
-__author__ = "Jim Guichard"
-__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
-__version__ = "0.1"
-__email__ = "jguichar@cisco.com"
-__status__ = "alpha"
-
-"""Network Service Header (NSH) Enabled Service Function"""
-
-try:
-    import signal
-except ImportError:
-    signal = None
-
-
-class BASEHEADER(Structure):
-    _fields_ = [("version", c_ushort, 2),
-                ("flags", c_ushort, 8),
-                ("length", c_ushort, 6),
-                ("next_protocol", c_uint, 16),
-                ("service_path", c_uint, 24),
-                ("service_index", c_uint, 8)]
-
-
-class CONTEXTHEADER(Structure):
-    _fields_ = [("network_platform", c_uint),
-                ("network_shared", c_uint),
-                ("service_platform", c_uint),
-                ("service_shared", c_uint)]
-
-# Decode base NSH header and context headers
-
-base_values = BASEHEADER()
-ctx_values = CONTEXTHEADER()
-
-
-class MyFwService:
-
-    def connection_made(self, transport):
-        self.transport = transport
-
-    def datagram_received(self, data, addr):
-        print('\nfw service received packet from SFF:\n', addr, binascii.hexlify(data))
-        rw_data = process_incoming_packet(data)
-        self.transport.sendto(rw_data, addr)
-        loop.stop()
-
-    def connection_refused(self, exc):
-        print('Connection refused:', exc)
-
-    def connection_lost(self, exc):
-        print('closing transport', exc)
-        loop = asyncio.get_event_loop()
-        loop.stop()
-
-
-class MyNatService:
-
-    def connection_made(self, transport):
-        self.transport = transport
-
-    def datagram_received(self, data, addr):
-        print('\nnat service received packet from SFF:\n', addr, binascii.hexlify(data))
-        print('\n')
-        rw_data = process_incoming_packet(data)
-        self.transport.sendto(rw_data, addr)
-        loop.stop()
-
-    def connection_refused(self, exc):
-        print('Connection refused:', exc)
-
-    def connection_lost(self, exc):
-        print('closing transport', exc)
-        loop = asyncio.get_event_loop()
-        loop.stop()
-
-
-class MyDpiService:
-
-    def connection_made(self, transport):
-        self.transport = transport
-
-    def datagram_received(self, data, addr):
-        print('\ndpi service received packet from SFF:\n', addr, binascii.hexlify(data))
-        print('\n')
-        rw_data = process_incoming_packet(data)
-        self.transport.sendto(rw_data, addr)
-        loop.stop()
-
-    def connection_refused(self, exc):
-        print('Connection refused:', exc)
-
-    def connection_lost(self, exc):
-        print('closing transport', exc)
-        loop = asyncio.get_event_loop()
-        loop.stop()
-
-
-def process_incoming_packet(data):
-    print('Processing recieved packet')
-    rw_data = bytearray(data)
-    decode_baseheader(data)
-    decode_contextheader(data)
-    base_values.service_index -= 1
-    set_service_index(rw_data, base_values.service_index)
-    return(rw_data)
-
-
-def decode_baseheader(payload):
-    # Base Service header
-    # base_header = payload[8:17] #starts at offset 8 of payload
-    base_header = payload[7:16]
-
-    start_idx, base_values.md_type, base_values.next_protocol, path_idx = struct.unpack('!H B B I', base_header)
-
-    base_values.version = start_idx >> 14
-    base_values.flags = start_idx >> 6
-    base_values.length = start_idx >> 0
-    base_values.service_path = path_idx >> 8
-    base_values.service_index = path_idx & 0x000000FF
-
-    if __debug__ is False:
-        print ("\nBase NSH Header Decode:")
-        print (binascii.hexlify(base_header))
-        # print ('NSH Version:', base_values.version)
-        # print ('NSH base header flags:', base_values.flags)
-        # print ('NSH base header length:', base_values.length)
-        # print ('NSH MD-type:', base_values.md_type)
-        # print ('NSH base header next protocol:', base_values.next_protocol)
-        print ('Service Path Identifier:', base_values.service_path)
-        print ('Service Index:', base_values.service_index)
-
-# Decode the NSH context headers for a received packet at this SFF.
-
-
-def decode_contextheader(payload):
-    # Context header
-    context_header = payload[16:32]
-
-    ctx_values.network_platform, ctx_values.network_shared, ctx_values.service_platform, \
-        ctx_values.service_shared = struct.unpack('!I I I I', context_header)
-
-    if __debug__ is False:
-        print ("\nNSH Context Header Decode:")
-        print (binascii.hexlify(context_header))
-        print ('Network Platform Context:', ctx_values.network_platform)
-        print ('Network Shared Context:', ctx_values.network_shared)
-        print ('Service Platform Context:', ctx_values.service_platform)
-        print ('Service Shared Context:', ctx_values.service_shared)
-
-
-def set_service_index(rw_data, service_index):
-    rw_data[15] = service_index
-
-
-def start_server(loop, addr, service, myip):
-    t = asyncio.Task(loop.create_datagram_endpoint(
-        service, local_addr=(myip, 6633)))
-    loop.run_until_complete(t)
-    print('Connection made with SFF:', addr)
-    print('Listening for packets on port:', myip)
-
-
-def find_service(service):
-    if service == 'fw':
-        return(MyFwService)
-    elif service == 'nat':
-        return(MyNatService)
-    elif service == 'dpi':
-        return(MyDpiService)
-
-
-def get_service_ip():
-    # Let's find a local IP address to use as the source IP of client generated packets
-    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-    try:
-        s.connect(('8.8.8.8', 80))
-        client = (s.getsockname()[0])
-    except socket.error:
-        client = "Unknown IP"
-    finally:
-        s.close()
-    return client
-
-ARGS = argparse.ArgumentParser(description="NSH Service Function")
-ARGS.add_argument('--type', action="store", dest='type',
-                  default=False, help='Run service function. Options: fw, nat, dpi')
-ARGS.add_argument('--host', action="store", dest='host',
-                  default='127.0.0.1', help='SFF host name')
-ARGS.add_argument('--port', action="store", dest='port',
-                  default=4789, type=int, help='SFF port number')
-
-if __name__ == '__main__':
-    args = ARGS.parse_args()
-    if ':' in args.host:
-        args.host, port = args.host.split(':', 1)
-        args.port = int(port)
-
-    if not args.type:
-        print('Please specify --type\n')
-        ARGS.print_help()
-    else:
-        loop = asyncio.get_event_loop()
-        if signal is not None:
-            loop.add_signal_handler(signal.SIGINT, loop.stop)
-
-        if '--type' in sys.argv:
-            # local_ip = get_service_ip()
-            local_ip = "10.1.1.4"
-            service = find_service(args.type)
-            print('Starting', args.type, 'service...')
-            start_server(loop, (args.host, args.port), service, local_ip)
-        else:
-            print('something went wrong')
-
-        loop.run_forever()
diff --git a/sfc-py/unused/service_classifier.py b/sfc-py/unused/service_classifier.py
deleted file mode 100644 (file)
index 9cf297e..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-# Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
-#
-# This program and the accompanying materials are made available under the
-# terms of the Eclipse Public License v1.0 which accompanies this distribution,
-# and is available at http://www.eclipse.org/legal/epl-v10.html
-
-import struct
-import socket
-import logging
-import binascii
-
-from netfilterqueue import NetfilterQueue
-
-from nsh.encode import build_packet
-from nsh.common import VXLANGPE, BASEHEADER, CONTEXTHEADER
-
-
-__author__ = "Jim Guichard"
-__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
-__version__ = "0.1"
-__email__ = "jguichar@cisco.com"
-__status__ = "alpha"
-
-
-"""
-Service Classifier
-"""
-
-logger = logging.getLogger(__name__)
-
-
-#: constants
-classify_map = {"172.16.6.140": {"sff": "172.16.6.141", "port": "4789"}}
-
-vxlan_values = VXLANGPE(int('00000100', 2), 0, 0x894F,
-                        int('111111111111111111111111', 2), 64)
-ctx_values = CONTEXTHEADER(0xffffffff, 0, 0xffffffff, 0)
-base_values = BASEHEADER(0x1, int('01000000', 2), 0x6, 0x1, 0x1, 0x000001, 0x4)
-
-# Testing: Setup linux with:
-# iptables -I INPUT -d 172.16.6.140 -j NFQUEUE --queue_num 1
-#
-#
-
-
-def process_and_accept(packet):
-    packet.accept()
-    data = packet.get_payload()
-    address = int(binascii.hexlify(data[16:20]), 16)
-    lookup = socket.inet_ntoa(struct.pack(">I", address))
-
-    try:
-        if classify_map[lookup]['sff'] != '':
-            packet = build_packet(vxlan_values, base_values, ctx_values) + data
-
-            logger.info(binascii.hexlify(data))
-            logger.info(binascii.hexlify(packet))
-
-            UDP_IP = classify_map[lookup]['sff']
-            UDP_PORT = int(classify_map[lookup]['port'])
-            sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-
-            try:
-                sock.sendto(packet, (UDP_IP, UDP_PORT))
-
-                if __debug__ is False:
-                    logger.debug('Sending NSH encapsulated packet to SFF: %s',
-                                 UDP_IP)
-
-            except socket.error as exc:
-                logger.exception('Socket Error: %s', exc)
-
-            finally:
-                sock.close()
-
-    except KeyError as exc:
-        logger.exception('Classification failed: %s', exc)
-
-nfqueue = NetfilterQueue()
-nfqueue.bind(1, process_and_accept)
-
-try:
-    nfqueue.run()
-except KeyboardInterrupt:
-    print
diff --git a/sfc-py/unused/service_function.py b/sfc-py/unused/service_function.py
deleted file mode 100644 (file)
index 2a0bf32..0000000
+++ /dev/null
@@ -1,176 +0,0 @@
-#
-# Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
-#
-# This program and the accompanying materials are made available under the
-# terms of the Eclipse Public License v1.0 which accompanies this distribution,
-# and is available at http://www.eclipse.org/legal/epl-v10.html
-
-import argparse
-import asyncio
-import sys
-
-from nsh.decode import *  # noqa
-from nsh.service_index import *  # noqa
-from nsh.common import *  # noqa
-
-try:
-    import signal
-except ImportError:
-    signal = None
-
-
-__author__ = "Jim Guichard"
-__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
-__version__ = "0.1"
-__email__ = "jguichar@cisco.com"
-__status__ = "alpha"
-
-
-"""Network Service Header (NSH) Enabled Service Function"""
-
-# Decode vxlan-gpe, base NSH header and NSH context headers
-server_vxlan_values = VXLANGPE()
-server_ctx_values = CONTEXTHEADER()
-server_base_values = BASEHEADER()
-
-
-class MyFwService:
-    def connection_made(self, transport):
-        self.transport = transport
-
-    def datagram_received(self, data, addr):
-        print('\nfw service received packet from SFF:\n', addr, binascii.hexlify(data))
-        rw_data = process_incoming_packet(data)
-        print("Sending packets to", addr)
-        self.transport.sendto(rw_data, addr)
-        # loop.stop()
-
-    def connection_refused(self, exc):
-        print('Connection refused:', exc)
-
-    def connection_lost(self, exc):
-        print('closing transport', exc)
-        loop = asyncio.get_event_loop()
-        loop.stop()
-
-    def __init__(self, loop):
-        self.transport = None
-        self.loop = loop
-
-
-class MyNatService:
-    def connection_made(self, transport):
-        self.transport = transport
-
-    def datagram_received(self, data, addr):
-        print('\nnat service received packet from SFF:\n', addr, binascii.hexlify(data))
-        print('\n')
-        rw_data = process_incoming_packet(data)
-        self.transport.sendto(rw_data, addr)
-        # loop.stop()
-
-    def connection_refused(self, exc):
-        print('Connection refused:', exc)
-
-    def connection_lost(self, exc):
-        print('closing transport', exc)
-        loop = asyncio.get_event_loop()
-        loop.stop()
-
-
-class MyDpiService:
-    def connection_made(self, transport):
-        self.transport = transport
-
-    def datagram_received(self, data, addr):
-        print('\ndpi service received packet from SFF:\n', addr, binascii.hexlify(data))
-        print('\n')
-        rw_data = process_incoming_packet(data)
-        self.transport.sendto(rw_data, addr)
-        # loop.stop()
-
-    def connection_refused(self, exc):
-        print('Connection refused:', exc)
-
-    def connection_lost(self, exc):
-        print('closing transport', exc)
-        loop = asyncio.get_event_loop()
-        loop.stop()
-
-
-def process_incoming_packet(data):
-    print('Processing received packet')
-    rw_data = bytearray(data)
-    decode_vxlan(data, server_vxlan_values)  # decode vxlan-gpe header
-    decode_baseheader(data, server_base_values)  # decode NSH base header
-    decode_contextheader(data, server_ctx_values)  # decode NSH context headers
-    rw_data, si_result = process_service_index(rw_data, server_base_values)
-    return rw_data
-
-
-def set_service_index(rw_data, service_index):
-    rw_data[15] = service_index
-
-
-# This does not work in MacOS when SFF/SF are different python
-# applications on the same machine
-# def start_server(loop, addr, service, myip):
-#     t = asyncio.Task(loop.create_datagram_endpoint(
-#         service, local_addr=(myip, 57444), remote_addr=addr))
-#     loop.run_until_complete(t)
-#     print('Listening for packet on:', addr)
-
-
-def start_server(loop, addr, udpserver, message):
-    listen = loop.create_datagram_endpoint(lambda: udpserver, local_addr=addr)
-    transport, protocol = loop.run_until_complete(listen)
-    print(message, addr)
-    return transport
-
-
-def find_service(service):
-    if service == 'fw':
-        return MyFwService
-    elif service == 'nat':
-        return MyNatService
-    elif service == 'dpi':
-        return MyDpiService
-
-
-ARGS = argparse.ArgumentParser(description="NSH Service Function")
-ARGS.add_argument(
-    '--type', action="store", dest='type',
-    default=False, help='Run service function. Options: fw, nat, dpi')
-ARGS.add_argument(
-    '--host', action="store", dest='host',
-    default='0.0.0.0', help='SFF host name')
-ARGS.add_argument(
-    '--port', action="store", dest='port',
-    default=4789, type=int, help='SFF port number')
-
-
-if __name__ == '__main__':
-    args = ARGS.parse_args()
-    if ':' in args.host:
-        args.host, port = args.host.split(':', 1)
-        args.port = int(port)
-
-    if not args.type:
-        print('Please specify --type\n')
-        ARGS.print_help()
-    else:
-        loop = asyncio.get_event_loop()
-        if signal is not None:
-            loop.add_signal_handler(signal.SIGINT, loop.stop)
-
-        if '--type' in sys.argv:
-            # local_ip = get_service_ip()
-            local_ip = '0.0.0.0'
-            service = find_service(args.type)
-            print('Starting', args.type, 'service...')
-            udpserver = service(loop)
-            start_server(loop, (args.host, args.port), udpserver, "Starting new server...")
-        else:
-            print('something went wrong')
-
-        loop.run_forever()
diff --git a/sfc-py/unused/start_sf.sh b/sfc-py/unused/start_sf.sh
deleted file mode 100755 (executable)
index 4d5061f..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-python3.4 service_function.py --type fw --host 0.0.0.0 --port 40001