This patch has to go in along with the corresponding netvirt patch.
Else genius merge jobs will start failing
Change-Id: I654fbdf30a7e1b5565d238878a5eca7a96960de9
Signed-off-by: Faseela K <faseela.k@ericsson.com>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2019 Ericsson India Global Services Pvt Ltd. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>binding-parent</artifactId>
+ <version>0.8.0-SNAPSHOT</version>
+ <relativePath>../../commons/binding-parent</relativePath>
+ </parent>
+
+ <modelVersion>4.0.0</modelVersion>
+ <artifactId>cloudscaler-api</artifactId>
+ <version>0.8.0-SNAPSHOT</version>
+ <name>ODL :: genius :: ${project.artifactId}</name>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>yang-binding</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.ovsdb</groupId>
+ <artifactId>southbound-api</artifactId>
+ <version>${genius.ovsdb.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal.binding.model.ietf</groupId>
+ <artifactId>rfc7223</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>idmanager-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.openflowplugin.model</groupId>
+ <artifactId>model-flow-service</artifactId>
+ <version>${openflowplugin.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-broker-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>commons-net</groupId>
+ <artifactId>commons-net</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>javax.inject</groupId>
+ <artifactId>javax.inject</artifactId>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2019 Ericsson India Global Services Pvt Ltd. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.genius.cloudscaler.api;
+
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Uri;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.OvsdbBridgeAugmentation;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.ovsdb.bridge.attributes.BridgeExternalIds;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.ovsdb.bridge.attributes.BridgeExternalIdsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.ovsdb.bridge.attributes.BridgeExternalIdsKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+public final class ScaleInConstants {
+
+ public static final TopologyId OVSDB_TOPOLOGY_ID = new TopologyId(new Uri("ovsdb:1"));
+ public static final String TOMBSTONED = "TOMBSTONED";
+ public static final InstanceIdentifier<BridgeExternalIds> BRIDGE_EXTERNAL_IID
+ = InstanceIdentifier.builder(NetworkTopology.class)
+ .child(Topology.class, new TopologyKey(OVSDB_TOPOLOGY_ID))
+ .child(Node.class)
+ .augmentation(OvsdbBridgeAugmentation.class)
+ .child(BridgeExternalIds.class, new BridgeExternalIdsKey(TOMBSTONED)).build();
+
+ private ScaleInConstants() {
+ }
+
+ public static InstanceIdentifier<BridgeExternalIds> buildBridgeExternalIids(NodeId nodeId) {
+ return InstanceIdentifier.builder(NetworkTopology.class)
+ .child(Topology.class, new TopologyKey(OVSDB_TOPOLOGY_ID))
+ .child(Node.class, new NodeKey(nodeId))
+ .augmentation(OvsdbBridgeAugmentation.class)
+ .child(BridgeExternalIds.class, new BridgeExternalIdsKey(TOMBSTONED)).build();
+ }
+
+ public static BridgeExternalIds buildBridgeExternalIds(Boolean tombstone) {
+ return new BridgeExternalIdsBuilder()
+ .setBridgeExternalIdKey(TOMBSTONED)
+ .setBridgeExternalIdValue(tombstone.toString())
+ .build();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 Ericsson India Global Services Pvt Ltd. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.genius.cloudscaler.api;
+
+import java.math.BigInteger;
+import java.util.List;
+import java.util.function.Function;
+
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+
+public interface TombstonedNodeManager {
+
+ /**
+ * Tells if the supplied dpn is getting scaled in or not.
+ * @param dpnId dpn id
+ * @return true if the supllied dpn is getting scaled in
+ * @throws ReadFailedException throws read failed exception
+ */
+ boolean isDpnTombstoned(BigInteger dpnId) throws ReadFailedException;
+
+ /**
+ * Add the listener callback which will be invoked upon recovery of scaled in dpn.
+ * @param callback callback to be invoked on recovery
+ */
+ void addOnRecoveryCallback(Function<BigInteger, Void> callback);
+
+ /**
+ * Filters the list of dpns which are not scaled in.
+ * @param dpns the input list of dpns
+ * @return filtered list of dpns which are not scaled in
+ * @throws ReadFailedException throws read failed exception
+ */
+ List<BigInteger> filterTombStoned(List<BigInteger> dpns) throws ReadFailedException;
+}
--- /dev/null
+module cloudscaler-rpc {
+ namespace "urn:opendaylight:genius:cloudscaler:rpcs";
+ prefix "cloudscaler";
+
+ import yang-ext {prefix ext; revision-date "2013-07-09";}
+
+ import ietf-inet-types {
+ prefix inet;
+ revision-date "2013-07-15";
+ }
+ import ietf-yang-types {
+ prefix yang;
+ }
+
+ revision "2017-12-20" {
+ description "ODL Specific Scalein Rpcs Module";
+ }
+
+ container compute-nodes {
+ description
+ "stores compute node related details (nodeid, dpnid etc ) learned from operational ovsdb node";
+
+ list compute-node {
+ max-elements "unbounded";
+ min-elements "0";
+ key "compute-name";
+ leaf compute-name {
+ type string;
+ description "The name of the compute node";
+ }
+ leaf nodeid {
+ type string;
+ description "ovsdb br-int bridge node id";
+ }
+ leaf dpnid {
+ type uint64;
+ description "datapath node identifier";
+ }
+ leaf tombstoned {
+ type boolean;
+ description "indicates if scalein is started for this node or not";
+ }
+ }
+ }
+
+ rpc scalein-computes-start {
+ description "To trigger start of scale in the given dpns";
+ input {
+ leaf-list scalein-compute-names {
+ type string;
+ }
+ }
+ }
+
+ rpc scalein-computes-tep-delete {
+ description "To delete the tep endpoints of the scaled in dpns";
+ input {
+ leaf-list scalein-compute-names {
+ type string;
+ }
+ }
+ }
+
+ rpc scalein-computes-end {
+ description "To end the scale in of the given dpns output DONE/INPROGRESS";
+ input {
+ leaf-list scalein-compute-names {
+ type string;
+ }
+ }
+ output {
+ leaf status {
+ type string;
+ }
+ }
+ }
+
+ rpc scalein-computes-recover {
+ description "To recover the dpns which are marked for scale in";
+ input {
+ leaf-list recover-compute-names {
+ type string;
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <parent>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>binding-parent</artifactId>
+ <version>0.8.0-SNAPSHOT</version>
+ <relativePath>../../commons/binding-parent</relativePath>
+ </parent>
+
+ <modelVersion>4.0.0</modelVersion>
+ <artifactId>cloudscaler-impl</artifactId>
+ <version>0.8.0-SNAPSHOT</version>
+ <name>ODL :: genius :: ${project.artifactId}</name>
+ <packaging>bundle</packaging>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>cloudscaler-api</artifactId>
+ <version>0.8.0-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>idmanager-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>idmanager-impl</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>arputil-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>itm-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>interfacemanager-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.openflowplugin.model</groupId>
+ <artifactId>model-flow-service</artifactId>
+ <version>${openflowplugin.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>commons-net</groupId>
+ <artifactId>commons-net</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.powermock</groupId>
+ <artifactId>powermock-api-mockito2</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.powermock</groupId>
+ <artifactId>powermock-module-junit4</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.powermock</groupId>
+ <artifactId>powermock-core</artifactId>
+ </dependency>
+ <!--<dependency>
+ <groupId>org.opendaylight.infrautils</groupId>
+ <artifactId>counters-api</artifactId>
+ </dependency> -->
+ <dependency>
+ <groupId>javax.inject</groupId>
+ <artifactId>javax.inject</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>javax.annotation</groupId>
+ <artifactId>javax.annotation-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.infrautils</groupId>
+ <artifactId>caches-api</artifactId>
+ <version>${genius.infrautils.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>testutils</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>mdsalutil-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>mdsalutil-impl</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-broker-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-binding-test-utils</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>lockmanager-impl</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>openflowplugin-extension-nicira</artifactId>
+ <version>${openflowplugin.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.openflowplugin.model</groupId>
+ <artifactId>model-flow-base</artifactId>
+ <version>${openflowplugin.version}</version>
+ </dependency>
+
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <!-- We purposely don't export any packages to avoid any dependencies
+ on this bundle and prevent @Singleton annotated classes from being
+ accidently included in another bundle's blueprint XML -->
+ <Export-Package />
+ </instructions>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.aries.blueprint</groupId>
+ <artifactId>blueprint-maven-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.eclipse.xtend</groupId>
+ <artifactId>xtend-maven-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2019 Ericsson India Global Services Pvt Ltd. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.genius.cloudscaler.rpcservice;
+
+import com.google.common.base.Optional;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+
+import java.math.BigInteger;
+import java.util.concurrent.TimeUnit;
+import javax.annotation.PostConstruct;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.genius.datastoreutils.AsyncClusteredDataTreeChangeListenerBase;
+import org.opendaylight.genius.infra.ManagedNewTransactionRunner;
+import org.opendaylight.genius.infra.ManagedNewTransactionRunnerImpl;
+import org.opendaylight.infrautils.utils.concurrent.ListenableFutures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.CloudscalerRpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.ScaleinComputesEndInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.ScaleinComputesEndOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.ScaleinComputesEndOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.ScaleinComputesRecoverInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.ScaleinComputesRecoverOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.ScaleinComputesStartInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.ScaleinComputesStartOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.ScaleinComputesTepDeleteInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.ScaleinComputesTepDeleteOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.compute.nodes.ComputeNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.compute.nodes.ComputeNodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.itm.rev160406.TransportZones;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.itm.rev160406.transport.zones.TransportZone;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.itm.rev160406.transport.zones.transport.zone.Vteps;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Singleton
+public class CloudscalerRpcServiceImpl implements CloudscalerRpcService {
+
+ private static final Logger LOG = LoggerFactory.getLogger("GeniusEventLogger");
+
+ private static final Integer DELETE_DELAY = Integer.getInteger(
+ "scale.in.end.delay.inventory.delete.in.secs", 120);
+ private static ScaleinComputesEndOutput IN_PROGRESS = new ScaleinComputesEndOutputBuilder()
+ .setStatus("INPROGRESS")
+ .build();
+
+ private static ScaleinComputesEndOutput DONE = new ScaleinComputesEndOutputBuilder()
+ .setStatus("DONE")
+ .build();
+
+ private static RpcResult<ScaleinComputesEndOutput> IN_PROGRESS_RPC_RESPONSE = RpcResultBuilder
+ .<ScaleinComputesEndOutput>success().withResult(IN_PROGRESS).build();
+
+ private static RpcResult<ScaleinComputesEndOutput> DONE_RPC_RESPONSE = RpcResultBuilder
+ .<ScaleinComputesEndOutput>success().withResult(DONE).build();
+
+ private DataBroker dataBroker;
+ private final ComputeNodeManager computeNodeManager;
+ private final ManagedNewTransactionRunner txRunner;
+ private final ItmTepClusteredListener itmTepClusteredListener;
+
+ //The following timestamp is not persisted across reboots
+ //upon reboot the timestamp will have a default value of that system timestamp
+ //this way scalein end that is triggered after cluster reboot will still honour the 2 min delay
+ private LoadingCache<BigInteger, Long> tepDeleteTimeStamp = CacheBuilder.newBuilder()
+ .expireAfterWrite(60, TimeUnit.MINUTES)
+ .build(new CacheLoader<BigInteger, Long>() {
+ public Long load(BigInteger dpnId) {
+ return System.currentTimeMillis();
+ }
+ });
+
+ public static final FutureCallback<Void> DEFAULT_CALLBACK = new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(Void result) {
+ LOG.debug("Success in Datastore operation");
+ }
+
+ @Override
+ public void onFailure(Throwable error) {
+ LOG.error("Error in Datastore operation", error);
+ }
+ };
+
+ @Inject
+ public CloudscalerRpcServiceImpl(DataBroker dataBroker,
+ ComputeNodeManager computeNodeManager) {
+ this.dataBroker = dataBroker;
+ this.computeNodeManager = computeNodeManager;
+ this.txRunner = new ManagedNewTransactionRunnerImpl(dataBroker);
+ this.itmTepClusteredListener = new ItmTepClusteredListener(dataBroker);
+ }
+
+ @PostConstruct
+ public void init() {
+ itmTepClusteredListener.registerListener(LogicalDatastoreType.CONFIGURATION, dataBroker);
+ }
+
+ @Override
+ public ListenableFuture<RpcResult<ScaleinComputesStartOutput>> scaleinComputesStart(
+ ScaleinComputesStartInput input) {
+ ReadWriteTransaction tx = this.dataBroker.newReadWriteTransaction();
+ SettableFuture<RpcResult<ScaleinComputesStartOutput>> ft = SettableFuture.create();
+ input.getScaleinComputeNames().forEach(s -> tombstoneTheNode(s, tx, true));
+ input.getScaleinComputeNames().forEach(s -> LOG.info("Cloudscaler scalein-start {}", s));
+ try {
+ tx.submit().checkedGet();
+ } catch (TransactionCommitFailedException e) {
+ LOG.error("Failed to tombstone all the nodes ", e);
+ ft.set(RpcResultBuilder.<ScaleinComputesStartOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+ "Failed to tombstone all the nodes " + e.getMessage()).build());
+ return ft;
+ }
+ ft.set(RpcResultBuilder.<ScaleinComputesStartOutput>success().build());
+ return ft;
+ }
+
+ @Override
+ public ListenableFuture<RpcResult<ScaleinComputesRecoverOutput>> scaleinComputesRecover(
+ ScaleinComputesRecoverInput input) {
+ ReadWriteTransaction tx = this.dataBroker.newReadWriteTransaction();
+ SettableFuture<RpcResult<ScaleinComputesRecoverOutput>> ft = SettableFuture.create();
+ input.getRecoverComputeNames().forEach(s -> tombstoneTheNode(s, tx, false));
+ input.getRecoverComputeNames().forEach(s -> LOG.info("Cloudscaler scalein-recover {}", s));
+ try {
+ tx.submit().checkedGet();
+ } catch (TransactionCommitFailedException e) {
+ LOG.error("Failed to recover all the nodes ", e);
+ ft.set(RpcResultBuilder.<ScaleinComputesRecoverOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+ "Failed to recover all the nodes " + e.getMessage()).build());
+ return ft;
+ }
+ //LOG.info("Recovered the nodes {}", input);
+ ft.set(RpcResultBuilder.<ScaleinComputesRecoverOutput>success().build());
+ return ft;
+ }
+
+ @Override
+ @SuppressWarnings("checkstyle:IllegalCatch")
+ public ListenableFuture<RpcResult<ScaleinComputesEndOutput>> scaleinComputesEnd(ScaleinComputesEndInput input) {
+ LOG.error("Cloudscaler scalein-end {}", input);
+ try {
+ for (String computeName : input.getScaleinComputeNames()) {
+ ComputeNode computeNode = computeNodeManager.getComputeNodeFromName(computeName);
+ if (computeNode == null) {
+ LOG.warn("Cloudscaler Failed to find the compute {} for scale in end ", computeName);
+ return Futures.immediateFuture(DONE_RPC_RESPONSE);
+ }
+ Long tepDeletedTimeStamp = tepDeleteTimeStamp.get(computeNode.getDpnid());
+ Long currentTime = System.currentTimeMillis();
+ if ((currentTime - tepDeletedTimeStamp) > DELETE_DELAY * 1000L) {
+ scaleinComputesEnd2(input);
+ } else {
+ return Futures.immediateFuture(IN_PROGRESS_RPC_RESPONSE);
+ }
+ }
+ } catch (Exception e) {
+ LOG.error("Cloudscaler Failed scalein-end ", e);
+ return Futures.immediateFuture(
+ RpcResultBuilder.<ScaleinComputesEndOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+ "Failed to read the compute node " + e.getMessage()).build());
+ }
+ return Futures.immediateFuture(DONE_RPC_RESPONSE);
+ }
+
+ @SuppressWarnings("checkstyle:IllegalCatch")
+ public ListenableFuture<RpcResult<ScaleinComputesEndOutput>> scaleinComputesEnd2(ScaleinComputesEndInput input) {
+ try {
+ for (String computeName : input.getScaleinComputeNames()) {
+ ComputeNode computeNode;
+ try {
+ computeNode = computeNodeManager.getComputeNodeFromName(computeName);
+ if (computeNode == null) {
+ LOG.error("Cloudscaler Failed to find the compute {} for scale in end ",
+ computeName);
+ return Futures.immediateFuture(IN_PROGRESS_RPC_RESPONSE);
+ }
+ } catch (ReadFailedException e) {
+ LOG.error("Cloudscaler Failed to read the compute node {}", e.getMessage());
+ return Futures.immediateFuture(
+ RpcResultBuilder.<ScaleinComputesEndOutput>failed().withError(
+ RpcError.ErrorType.APPLICATION,
+ "Failed to read the compute node " + e.getMessage()).build());
+ }
+ LOG.info("Cloudscaler Deleting compute node details {}", computeNode);
+ LOG.info("Cloudscaler Deleting compute node details {}",
+ buildOpenflowNodeIid(computeNode));
+ LOG.info("Cloudscaler Deleting compute node details {}", buildOvsdbNodeId(computeNode));
+ ListenableFutures.addErrorLogging(txRunner.callWithNewReadWriteTransactionAndSubmit(tx -> {
+ computeNodeManager.deleteComputeNode(tx, computeNode);
+ }), LOG, "Cloudscaler Failed to delete the compute node");
+ ListenableFutures.addErrorLogging(txRunner.callWithNewReadWriteTransactionAndSubmit(tx -> {
+ tx.delete(LogicalDatastoreType.CONFIGURATION, buildOpenflowNodeIid(computeNode));
+ }), LOG, "Cloudscaler Failed to delete the config inventory");
+ ListenableFutures.addErrorLogging(txRunner.callWithNewReadWriteTransactionAndSubmit(tx -> {
+ tx.delete(LogicalDatastoreType.CONFIGURATION, buildOvsdbNodeId(computeNode));
+ }), LOG, "Cloudscaler Failed to delete the config topology");
+ }
+ } catch (Throwable e) {
+ LOG.error("Cloudscaler Failed to do scale in end {} ", input, e);
+ return Futures.immediateFuture(
+ RpcResultBuilder.<ScaleinComputesEndOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+ "Failed to read the transport zone " + e.getMessage()).build());
+ }
+ return Futures.immediateFuture(DONE_RPC_RESPONSE);
+ }
+
+ private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network
+ .topology.rev131021.network.topology.topology.Node> buildOvsdbNodeId(ComputeNode computeNode) {
+ return InstanceIdentifier
+ .create(NetworkTopology.class)
+ .child(Topology.class, new TopologyKey(new TopologyId("ovsdb:1")))
+ .child(org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network
+ .topology.topology.Node.class,
+ new org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021
+ .network.topology.topology.NodeKey(new NodeId(computeNode.getNodeid())));
+ }
+
+ private InstanceIdentifier<Node> buildOpenflowNodeIid(ComputeNode computeNode) {
+ return InstanceIdentifier.builder(Nodes.class)
+ .child(Node.class, new NodeKey(
+ new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId(
+ "openflow:" + computeNode.getDpnid().toString()))).build();
+ }
+
+ private void tombstoneTheNode(String computeName, ReadWriteTransaction tx, Boolean tombstone) {
+ ComputeNode computeNode = null;
+ try {
+ computeNode = computeNodeManager.getComputeNodeFromName(computeName);
+ if (computeNode == null) { //TODO throw error to rpc
+ LOG.error("Cloudscaler Node not present to {} {}",
+ computeName, tombstone ? "tombstone" : "recover");
+ return;
+ }
+ } catch (ReadFailedException e) {
+ LOG.error("Cloudscaler Failed to {} the compute {} read failed",
+ tombstone ? "tombstone" : "recover", computeName);
+ return;
+ }
+ ComputeNodeBuilder builder = new ComputeNodeBuilder(computeNode);
+ builder.setTombstoned(tombstone);
+ tx.put(LogicalDatastoreType.CONFIGURATION,
+ computeNodeManager.buildComputeNodeIid(computeName), builder.build(), true);
+ }
+
+ @Override
+ @SuppressWarnings("checkstyle:IllegalCatch")
+ public ListenableFuture<RpcResult<ScaleinComputesTepDeleteOutput>> scaleinComputesTepDelete(
+ ScaleinComputesTepDeleteInput input) {
+ ReadOnlyTransaction readTx = this.dataBroker.newReadOnlyTransaction();
+ SettableFuture<RpcResult<ScaleinComputesTepDeleteOutput>> ft = SettableFuture.create();
+ Optional<TransportZones> tz;
+ try {
+ tz = readTx.read(LogicalDatastoreType.CONFIGURATION, InstanceIdentifier.create(TransportZones.class))
+ .checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Cloudscaler Failed to read the transport zone {}", e.getMessage());
+ ft.set(RpcResultBuilder.<ScaleinComputesTepDeleteOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+ "Failed to read the transport zone " + e.getMessage()).build());
+ return ft;
+ } finally {
+ readTx.close();
+ }
+ try {
+ for (String computeName : input.getScaleinComputeNames()) {
+ ComputeNode computeNode = null;
+ try {
+ computeNode = computeNodeManager.getComputeNodeFromName(computeName);
+ if (computeNode == null) {
+ LOG.warn("Cloudscaler Could not find the compute for tep delete {}", computeName);
+ ft.set(RpcResultBuilder.<ScaleinComputesTepDeleteOutput>success().build());
+ return ft;
+ }
+ } catch (ReadFailedException e) {
+ LOG.error("Cloudscaler Failed to read the compute node {}", e.getMessage());
+ ft.set(RpcResultBuilder.<ScaleinComputesTepDeleteOutput>failed()
+ .withError(RpcError.ErrorType.APPLICATION, "Failed to read the compute node "
+ + e.getMessage()).build());
+ return ft;
+ }
+ if (tz.isPresent() && tz.get().getTransportZone() != null) {
+ for (TransportZone zone : tz.get().getTransportZone()) {
+ if (zone.getVteps() == null) {
+ continue;
+ }
+ for (Vteps vteps : zone.getVteps()) {
+ if (vteps.getDpnId().equals(computeNode.getDpnid())) {
+ InstanceIdentifier<Vteps> dpnVtepIid = InstanceIdentifier
+ .create(TransportZones.class)
+ .child(TransportZone.class, zone.key())
+ .child(Vteps.class, vteps.key());
+ LOG.error("Cloudscaler deleting dpn {}", vteps);
+ ListenableFutures.addErrorLogging(
+ txRunner.callWithNewReadWriteTransactionAndSubmit(tx -> {
+ tx.delete(LogicalDatastoreType.CONFIGURATION, dpnVtepIid);
+ }), LOG, "Cloudscaler Failed to delete the itm tep");
+ }
+ }
+ }
+ }
+ }
+ InstanceIdentifier.create(TransportZones.class)
+ .child(TransportZone.class)
+ .child(Vteps.class);
+ } catch (Throwable e) {
+ LOG.error("Failed to read the transport zone ", e);
+ ft.set(RpcResultBuilder.<ScaleinComputesTepDeleteOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+ "Failed to read the transport zone " + e.getMessage()).build());
+ return ft;
+ }
+ ft.set(RpcResultBuilder.<ScaleinComputesTepDeleteOutput>success().build());
+ return ft;
+ }
+
+ class ItmTepClusteredListener extends AsyncClusteredDataTreeChangeListenerBase<Vteps, ItmTepClusteredListener> {
+
+ @Inject
+ ItmTepClusteredListener(DataBroker dataBroker) {
+ super(Vteps.class, ItmTepClusteredListener.class);
+ }
+
+ @Override
+ public InstanceIdentifier<Vteps> getWildCardPath() {
+ return InstanceIdentifier.create(TransportZones.class)
+ .child(TransportZone.class)
+ .child(Vteps.class);
+ }
+
+ @Override
+ protected void remove(InstanceIdentifier<Vteps> instanceIdentifier, Vteps tep) {
+ tepDeleteTimeStamp.put(tep.getDpnId(), System.currentTimeMillis());
+ }
+
+ @Override
+ protected void update(InstanceIdentifier<Vteps> instanceIdentifier, Vteps vteps, Vteps t1) {
+ }
+
+ @Override
+ protected void add(InstanceIdentifier<Vteps> instanceIdentifier, Vteps vteps) {
+ }
+
+ @Override
+ protected ItmTepClusteredListener getDataTreeChangeListener() {
+ return ItmTepClusteredListener.this;
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 Ericsson India Global Services Pvt Ltd. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.genius.cloudscaler.rpcservice;
+
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
+import java.math.BigInteger;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.genius.mdsalutil.cache.InstanceIdDataObjectCache;
+import org.opendaylight.infrautils.caches.CacheProvider;
+import org.opendaylight.infrautils.utils.concurrent.Executors;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.ComputeNodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.compute.nodes.ComputeNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.compute.nodes.ComputeNodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.compute.nodes.ComputeNodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.OvsdbBridgeAugmentation;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.ovsdb.bridge.attributes.BridgeOtherConfigs;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Singleton
+public class ComputeNodeManager {
+
+ private static final Logger LOG = LoggerFactory.getLogger("GeniusEventLogger");
+
+ private final DataBroker dataBroker;
+
+ private InstanceIdDataObjectCache<ComputeNode> computeNodeCache;
+ private InstanceIdDataObjectCache<Node> ovsdbTopologyNodeCache;
+ private Map<BigInteger, ComputeNode> dpnIdVsComputeNode;
+ private ExecutorService executorService = Executors.newSingleThreadExecutor("compute-node-manager", LOG);
+
+ @Inject
+ @SuppressFBWarnings({"URF_UNREAD_FIELD", "NP_LOAD_OF_KNOWN_NULL_VALUE"})
+ public ComputeNodeManager(DataBroker dataBroker,
+ CacheProvider cacheProvider) {
+ this.dataBroker = dataBroker;
+ this.dpnIdVsComputeNode = new ConcurrentHashMap<>();
+ this.computeNodeCache = new InstanceIdDataObjectCache<ComputeNode>(ComputeNode.class, dataBroker,
+ LogicalDatastoreType.CONFIGURATION,
+ InstanceIdentifier.builder(ComputeNodes.class).child(ComputeNode.class).build(),
+ cacheProvider) {
+ @Override
+ protected void added(InstanceIdentifier<ComputeNode> path, ComputeNode computeNode) {
+ LOG.info("ComputeNodeManager add compute {}", computeNode);
+ dpnIdVsComputeNode.put(computeNode.getDpnid(), computeNode);
+ }
+
+ @Override
+ protected void removed(InstanceIdentifier<ComputeNode> path, ComputeNode computeNode) {
+ LOG.info("ComputeNodeManager remove compute {}", computeNode);
+ dpnIdVsComputeNode.remove(computeNode.getDpnid());
+ }
+ };
+ this.ovsdbTopologyNodeCache = new InstanceIdDataObjectCache<Node>(Node.class, dataBroker,
+ LogicalDatastoreType.OPERATIONAL,
+ getWildcardPath(),
+ cacheProvider) {
+ @Override
+ @SuppressWarnings("checkstyle:IllegalCatch")
+ protected void added(InstanceIdentifier<Node> path, Node dataObject) {
+ executorService.execute(() -> {
+ try {
+ add(dataObject);
+ } catch (Exception e) {
+ LOG.error("ComputeNodeManager Failed to handle ovsdb node add", e);
+ }
+ });
+ }
+ };
+ //LOG.info("Compute node manager is initialized ");
+ }
+
+ public ComputeNode getComputeNodeFromName(String computeName) throws ReadFailedException {
+ InstanceIdentifier<ComputeNode> computeIid = buildComputeNodeIid(computeName);
+ return computeNodeCache.get(computeIid).orNull();
+ }
+
+ public void deleteComputeNode(ReadWriteTransaction tx, ComputeNode computeNode) {
+ tx.delete(LogicalDatastoreType.CONFIGURATION, buildComputeNodeIid(computeNode.getComputeName()));
+ }
+
+ public void add(@NonNull Node node) throws TransactionCommitFailedException {
+ OvsdbBridgeAugmentation bridgeAugmentation = node.augmentation(OvsdbBridgeAugmentation.class);
+ if (bridgeAugmentation != null && bridgeAugmentation.getBridgeOtherConfigs() != null) {
+ BigInteger datapathid = getDpnIdFromBridge(bridgeAugmentation);
+ Optional<BridgeOtherConfigs> otherConfigOptional = bridgeAugmentation.getBridgeOtherConfigs()
+ .stream()
+ .filter(otherConfig -> otherConfig.getBridgeOtherConfigKey().equals("dp-desc"))
+ .findFirst();
+ if (!otherConfigOptional.isPresent()) {
+ LOG.error("ComputeNodeManager Compute node name is not present in bridge {}", node.getNodeId());
+ return;
+ }
+ String computeName = otherConfigOptional.get().getBridgeOtherConfigValue();
+ String nodeId = node.getNodeId().getValue();
+ InstanceIdentifier<ComputeNode> computeIid = buildComputeNodeIid(computeName);
+ ComputeNode computeNode = new ComputeNodeBuilder()
+ .setComputeName(computeName)
+ .setDpnid(datapathid)
+ .setNodeid(nodeId)
+ .build();
+ com.google.common.base.Optional<ComputeNode> computeNodeOptional = com.google.common.base.Optional.absent();
+ try {
+ computeNodeOptional = computeNodeCache.get(computeIid);
+ } catch (ReadFailedException e) {
+ LOG.error("ComputeNodeManager Failed to read {}", computeIid);
+ }
+ if (computeNodeOptional.isPresent()) {
+ logErrorIfComputeNodeIsAlreadyTaken(datapathid, nodeId, computeNodeOptional);
+ } else {
+ LOG.info("ComputeNodeManager add ovsdb node {}", node.getNodeId());
+ putComputeDetailsInConfigDatastore(computeIid, computeNode);
+ }
+ }
+ }
+
+ public InstanceIdentifier<ComputeNode> buildComputeNodeIid(String computeName) {
+ return InstanceIdentifier.builder(ComputeNodes.class)
+ .child(ComputeNode.class, new ComputeNodeKey(computeName))
+ .build();
+ }
+
+ private BigInteger getDpnIdFromBridge(OvsdbBridgeAugmentation bridgeAugmentation) {
+ String datapathIdStr = bridgeAugmentation.getDatapathId().getValue().replace(":", "");
+ return new BigInteger(datapathIdStr, 16);
+ }
+
+ public void putComputeDetailsInConfigDatastore(InstanceIdentifier<ComputeNode> computeIid,
+ ComputeNode computeNode) throws TransactionCommitFailedException {
+ ReadWriteTransaction tx = dataBroker.newReadWriteTransaction();
+ tx.put(LogicalDatastoreType.CONFIGURATION, computeIid, computeNode);
+ tx.submit().checkedGet();
+ dpnIdVsComputeNode.put(computeNode.getDpnid(), computeNode);
+ //LOG.info("Write comute node details {}", computeNode);
+ }
+
+ private void logErrorIfComputeNodeIsAlreadyTaken(BigInteger datapathid, String nodeId,
+ com.google.common.base.Optional<ComputeNode> optional) {
+ ComputeNode existingNode = optional.get();
+ if (!Objects.equals(existingNode.getNodeid(), nodeId)) {
+ LOG.error("ComputeNodeManager Compute is already connected by compute {}", existingNode);
+ return;
+ }
+ if (!Objects.equals(existingNode.getDpnid(), datapathid)) {
+ LOG.error("ComputeNodeManager Compute is already connected by compute {}", existingNode);
+ }
+ }
+
+ private InstanceIdentifier<Node> getWildcardPath() {
+ return InstanceIdentifier
+ .create(NetworkTopology.class)
+ .child(Topology.class, new TopologyKey(new TopologyId("ovsdb:1")))
+ .child(Node.class);
+ }
+
+ public ComputeNode getComputeNode(BigInteger dpnId) {
+ return dpnIdVsComputeNode.get(dpnId);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 Ericsson India Global Services Pvt Ltd. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.genius.cloudscaler.rpcservice;
+
+import java.math.BigInteger;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.genius.cloudscaler.api.TombstonedNodeManager;
+import org.opendaylight.genius.mdsalutil.cache.InstanceIdDataObjectCache;
+import org.opendaylight.infrautils.caches.CacheProvider;
+import org.opendaylight.infrautils.utils.concurrent.Executors;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.ComputeNodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.genius.cloudscaler.rpcs.rev171220.compute.nodes.ComputeNode;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Singleton
+public class TombstonedNodeManagerImpl implements TombstonedNodeManager {
+
+ private static final Logger LOG = LoggerFactory.getLogger(TombstonedNodeManagerImpl.class);
+
+ private final DataBroker dataBroker;
+ private final CacheProvider cacheProvider;
+ private final Set<Function<BigInteger, Void>> callbacks = ConcurrentHashMap.newKeySet();
+ private final ComputeNodeManager computeNodeManager;
+
+ private InstanceIdDataObjectCache<ComputeNode> computeNodeCache;
+ private ExecutorService executorService = Executors.newSingleThreadExecutor("tombstone-node-manager", LOG);
+
+ @Inject
+ public TombstonedNodeManagerImpl(DataBroker dataBroker,
+ CacheProvider cacheProvider,
+ ComputeNodeManager computeNodeManager) {
+ this.dataBroker = dataBroker;
+ this.cacheProvider = cacheProvider;
+ this.computeNodeManager = computeNodeManager;
+ init();
+ }
+
+ void init() {
+ this.computeNodeCache = new InstanceIdDataObjectCache<ComputeNode>(ComputeNode.class, dataBroker,
+ LogicalDatastoreType.CONFIGURATION,
+ InstanceIdentifier.builder(ComputeNodes.class).child(ComputeNode.class).build(),
+ cacheProvider) {
+ @Override
+ protected void added(InstanceIdentifier<ComputeNode> path, ComputeNode computeNode) {
+ if (computeNode.isTombstoned() != null && !computeNode.isTombstoned()) {
+ executorService.execute(() -> {
+ callbacks.forEach(callback -> {
+ callback.apply(computeNode.getDpnid());
+ });
+ });
+ }
+ }
+ };
+ }
+
+ @PreDestroy
+ void close() {
+ computeNodeCache.close();
+ }
+
+ @Override
+ public boolean isDpnTombstoned(BigInteger dpnId) throws ReadFailedException {
+ if (dpnId == null) {
+ return false;
+ }
+ ComputeNode computeNode = computeNodeManager.getComputeNode(dpnId);
+ if (computeNode != null && computeNode.isTombstoned() != null) {
+ return computeNode.isTombstoned();
+ }
+ return false;
+ }
+
+ @Override
+ public void addOnRecoveryCallback(Function<BigInteger, Void> callback) {
+ callbacks.add(callback);
+ }
+
+ @Override
+ public List<BigInteger> filterTombStoned(List<BigInteger> dpns) throws ReadFailedException {
+ return dpns.stream().filter((dpn) -> {
+ try {
+ return !isDpnTombstoned(dpn);
+ } catch (ReadFailedException e) {
+ LOG.error("Failed to read {}", dpn);
+ return true;
+ }
+ }).collect(Collectors.toList());
+ }
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
+ xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
+ odl:use-default-for-reference-types="true">
+
+ <reference id="dataBroker"
+ interface="org.opendaylight.controller.md.sal.binding.api.DataBroker"
+ odl:type="default" />
+
+ <reference id="cacheProvider"
+ interface="org.opendaylight.infrautils.caches.CacheProvider"/>
+
+ <odl:rpc-implementation ref="cloudscalerRpcServiceImpl"/>
+
+ <service ref="tombstonedNodeManagerImpl"
+ interface="org.opendaylight.genius.cloudscaler.api.TombstonedNodeManager" />
+</blueprint>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2018 Ericsson India Global Services Pvt Ltd. and others. All rights reserved.
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html INTERNAL
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <parent>
+ <groupId>org.opendaylight.odlparent</groupId>
+ <artifactId>odlparent-lite</artifactId>
+ <version>5.0.1</version>
+ <relativePath/>
+ </parent>
+
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>cloudscaler-aggregator</artifactId>
+ <version>0.8.0-SNAPSHOT</version>
+ <name>ODL :: genius :: ${project.artifactId}</name>
+ <packaging>pom</packaging>
+ <modelVersion>4.0.0</modelVersion>
+ <modules>
+ <module>api</module>
+ <module>impl</module>
+ </modules>
+ <!-- DO NOT install or deploy the repo root pom as it's only needed to initiate a build -->
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
<artifactId>interfacemanager-api</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>cloudscaler-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.genius</groupId>
<artifactId>itm-api</artifactId>
<artifactId>interfacemanager-impl</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.genius</groupId>
+ <artifactId>cloudscaler-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.genius</groupId>
<artifactId>interfacemanager-shell</artifactId>
<module>commons</module>
<module>interfacemanager</module>
<module>itm</module>
+ <module>cloudscaler</module>
<module>lockmanager</module>
<module>mdsalutil</module>
<module>arputil</module>