try {
startNegotiation();
} catch (Exception e) {
- logger.info("Unexpected negotiation failure", e);
+ logger.warn("Unexpected negotiation failure", e);
negotiationFailed(e);
}
}
}
if (!cf.isSuccess()) {
- LOG.info("Attempt to connect to connect to {} failed", ProtocolSessionPromise.this.address, cf.cause());
+ LOG.warn("Attempt to connect to connect to {} failed", ProtocolSessionPromise.this.address, cf.cause());
final Future<Void> rf = ProtocolSessionPromise.this.strategy.scheduleReconnect(cf.cause());
rf.addListener(new FutureListener<Void>() {
@Override
throws ConflictingVersionException, ValidationException {
final String transactionName = ObjectNameUtil
.getTransactionName(transactionControllerON);
- logger.info("About to commit {}. Current parentVersion: {}, versionCounter {}", transactionName, version, versionCounter);
+ logger.trace("About to commit {}. Current parentVersion: {}, versionCounter {}", transactionName, version, versionCounter);
// find ConfigTransactionController
Map<String, ConfigTransactionControllerInternal> transactions = transactionsHolder.getCurrentTransactions();
private void validate_noLocks() throws ValidationException {
transactionStatus.checkNotAborted();
- logger.info("Validating transaction {}", getTransactionIdentifier());
+ logger.trace("Validating transaction {}", getTransactionIdentifier());
// call validate()
List<ValidationException> collectedExceptions = new ArrayList<>();
for (Entry<ModuleIdentifier, Module> entry : dependencyResolverManager
throw ValidationException
.createFromCollectedValidationExceptions(collectedExceptions);
}
- logger.info("Validated transaction {}", getTransactionIdentifier());
+ logger.trace("Validated transaction {}", getTransactionIdentifier());
}
/**
try {
validate_noLocks();
} catch (ValidationException e) {
- logger.info("Commit failed on validation");
+ logger.trace("Commit failed on validation");
configBeanModificationDisabled.set(false); // recoverable error
throw e;
}
+ "to obtain a lock");
}
- logger.info("Committing transaction {}", getTransactionIdentifier());
+ logger.trace("Committing transaction {}", getTransactionIdentifier());
// call getInstance()
for (Entry<ModuleIdentifier, Module> entry : dependencyResolverManager
// count dependency order
- logger.info("Committed configuration {}", getTransactionIdentifier());
+ logger.trace("Committed configuration {}", getTransactionIdentifier());
transactionStatus.setCommitted();
// unregister this and all modules from jmx
close();
@Override
public void close() {
- logger.info("Destroying {}", identifier);
+ logger.trace("Destroying {}", identifier);
try {
instance.close();
} catch (Exception e) {
logger.warn("Thread dump:{}", sb);
System.exit(1);
} catch (InterruptedException e) {
- logger.info("Interrupted, not going to call System.exit(1)");
+ logger.warn("Interrupted, not going to call System.exit(1)");
}
}
}
pathToFile.mkdirs();
}
if (targetFile.exists() && overwrite == false) {
- logger.info("Skipping {} since it already exists", targetFile);
+ logger.trace("Skipping {} since it already exists", targetFile);
} else {
try (Writer fileWriter = new FileWriter(targetFile)) {
fileWriter.write(entry.getValue());
}
- logger.info("{}: File {} generated successfully",
+ logger.trace("{}: File {} generated successfully",
JMXGenerator.class.getCanonicalName(), targetFile);
result.add(targetFile);
}
updateCache(snapshot);
cache.setInconsistentURLsForReporting(Collections.<URL> emptySet());
- logger.info("Yang store updated to new consistent state containing {} yang files", consistentBundlesToYangURLs.size());
+ logger.trace("Yang store updated to new consistent state containing {} yang files", consistentBundlesToYangURLs.size());
logger.debug("Yang store updated to new consistent state containing {}", consistentBundlesToYangURLs);
}
Status status = configurationAware.saveConfiguration();
if (!status.isSuccess()) {
success = false;
- logger.info("Failed to save config for {}",
+ logger.warn("Failed to save config for {}",
configurationAware.getClass().getName());
}
}
public Status saveConfiguration() {
boolean success = true;
for (IConfigurationContainerAware configurationAware : configurationAwareList) {
- logger.info("Save Config triggered for {}", configurationAware.getClass().getSimpleName());
+ logger.trace("Save Config triggered for {}", configurationAware.getClass().getSimpleName());
Status status = configurationAware.saveConfiguration();
if (!status.isSuccess()) {
success = false;
- logger.info("Failed to save config for {}", configurationAware.getClass().getSimpleName());
+ logger.warn("Failed to save config for {}", configurationAware.getClass().getSimpleName());
}
}
if (success) {
*/
private void notifyContainerModeChange(boolean lastActionDelete, boolean notifyLocal) {
if (lastActionDelete == false && containerConfigs.size() == 1) {
- logger.info("First container Creation. Inform listeners");
+ logger.trace("First container Creation. Inform listeners");
synchronized (this.iContainerListener) {
for (IContainerListener i : this.iContainerListener) {
i.containerModeUpdated(UpdateType.ADDED);
}
}
} else if (lastActionDelete == true && containerConfigs.isEmpty()) {
- logger.info("Last container Deletion. Inform listeners");
+ logger.trace("Last container Deletion. Inform listeners");
synchronized (this.iContainerListener) {
for (IContainerListener i : this.iContainerListener) {
i.containerModeUpdated(UpdateType.REMOVED);
@SuppressWarnings("deprecation")
private void allocateCaches() {
if (this.clusterContainerService == null) {
- log.info("un-initialized clusterContainerService, can't create cache");
+ log.trace("un-initialized clusterContainerService, can't create cache");
return;
}
@SuppressWarnings({ "unchecked", "deprecation" })
private void retrieveCaches() {
if (this.clusterContainerService == null) {
- log.info("un-initialized clusterContainerService, can't retrieve cache");
+ log.warn("un-initialized clusterContainerService, can't retrieve cache");
return;
}
private void notifyStaticRouteUpdate(StaticRoute s, boolean update) {
if (this.staticRoutingAware != null) {
- log.info("Invoking StaticRoutingAware listeners");
+ log.trace("Invoking StaticRoutingAware listeners");
synchronized (this.staticRoutingAware) {
for (IStaticRoutingAware ra : this.staticRoutingAware) {
try {
Status succeeded = null;
boolean decouple = false;
if (installedList.size() != toInstallList.size()) {
- log.info("Modify: New flow entry does not satisfy the same "
+ log.trace("Modify: New flow entry does not satisfy the same "
+ "number of container flows as the original entry does");
decouple = true;
}
*/
FlowEntryInstall sameMatchEntry = installedSwView.get(installEntry);
if (sameMatchEntry != null && !sameMatchEntry.getOriginal().equals(currentFlowEntry)) {
- log.info("Modify: new container flow merged flow entry clashes with existing flow");
+ log.trace("Modify: new container flow merged flow entry clashes with existing flow");
decouple = true;
} else {
toInstallSafe.add(installEntry);
}
Status error = modifyEntry(currentFlowEntry, newFlowEntry, false);
if (error.isSuccess()) {
- log.info("Ports {} added to FlowEntry {}", portList, flowName);
+ log.trace("Ports {} added to FlowEntry {}", portList, flowName);
} else {
log.warn("Failed to add ports {} to Flow entry {}. The failure is: {}", portList,
currentFlowEntry.toString(), error.getDescription());
}
Status status = modifyEntry(currentFlowEntry, newFlowEntry, false);
if (status.isSuccess()) {
- log.info("Ports {} removed from FlowEntry {}", portList, flowName);
+ log.trace("Ports {} removed from FlowEntry {}", portList, flowName);
} else {
log.warn("Failed to remove ports {} from Flow entry {}. The failure is: {}", portList,
currentFlowEntry.toString(), status.getDescription());
Status status = modifyEntry(currentFlowEntry, newFlowEntry, false);
if (status.isSuccess()) {
- log.info("Output port replaced with {} for flow {} on node {}", outPort, flowName, node);
+ log.trace("Output port replaced with {} for flow {} on node {}", outPort, flowName, node);
} else {
log.warn("Failed to replace output port for flow {} on node {}. The failure is: {}", flowName, node,
status.getDescription());
// Do not attempt to reinstall the flow, warn user
if (newFlowConfig.equals(oldFlowConfig)) {
String msg = "No modification detected";
- log.info("Static flow modification skipped. New flow and old flow are the same: {}", newFlowConfig);
+ log.trace("Static flow modification skipped. New flow and old flow are the same: {}", newFlowConfig);
return new Status(StatusCode.SUCCESS, msg);
}
* inactive list
*/
private void uninstallAllFlowEntries(boolean preserveFlowEntries) {
- log.info("Uninstalling all non-internal flows");
+ log.trace("Uninstalling all non-internal flows");
List<FlowEntryInstall> toRemove = new ArrayList<FlowEntryInstall>();
* default container instance of FRM only when the last container is deleted
*/
private void reinstallAllFlowEntries() {
- log.info("Reinstalling all inactive flows");
+ log.trace("Reinstalling all inactive flows");
for (FlowEntry flowEntry : this.inactiveFlows.keySet()) {
this.addEntry(flowEntry, false);
dropAllConfig.setActions(dropAction);
defaultConfigs.add(dropAllConfig);
- log.info("Forwarding mode for node {} set to {}", node, (proactive ? "proactive" : "reactive"));
+ log.trace("Forwarding mode for node {} set to {}", node, (proactive ? "proactive" : "reactive"));
for (FlowConfig fc : defaultConfigs) {
Status status = (proactive) ? addStaticFlowInternal(fc, false) : removeStaticFlow(fc);
if (status.isSuccess()) {
- log.info("{} Proactive Static flow: {}", (proactive ? "Installed" : "Removed"), fc.getName());
+ log.trace("{} Proactive Static flow: {}", (proactive ? "Installed" : "Removed"), fc.getName());
} else {
log.warn("Failed to {} Proactive Static flow: {}", (proactive ? "install" : "remove"),
fc.getName());
* @param node
*/
private void cleanDatabaseForNode(Node node) {
- log.info("Cleaning Flow database for Node {}", node);
+ log.trace("Cleaning Flow database for Node {}", node);
if (nodeFlows.containsKey(node)) {
List<FlowEntryInstall> toRemove = new ArrayList<FlowEntryInstall>(nodeFlows.get(node));
@Override
public void portGroupChanged(PortGroupConfig config, Map<Node, PortGroup> data, boolean add) {
- log.info("PortGroup Changed for: {} Data: {}", config, portGroupData);
+ log.trace("PortGroup Changed for: {} Data: {}", config, portGroupData);
Map<Node, PortGroup> existingData = portGroupData.get(config);
if (existingData != null) {
for (Map.Entry<Node, PortGroup> entry : data.entrySet()) {
log.warn("Invalid policy name \"{}\", defaulting to {}", policy, handler);
}
}
- log.info("Setting uncaught exception policy to {}", handler);
+ log.trace("Setting uncaught exception policy to {}", handler);
Thread.setDefaultUncaughtExceptionHandler(handler);
/*
private Logger logger = LoggerFactory.getLogger(ClusteredDataStoreImpl.class);
public ClusteredDataStoreImpl(IClusterGlobalServices clusterGlobalServices) throws CacheConfigException {
- logger.info("Constructing clustered data store");
+ logger.trace("Constructing clustered data store");
Preconditions.checkNotNull(clusterGlobalServices, "clusterGlobalServices cannot be null");
operationalDataCache = getOrCreateCache(clusterGlobalServices, OPERATIONAL_DATA_CACHE);
public boolean containsConfigurationPath(InstanceIdentifier path) {
return configurationDataCache.containsKey(path);
}
-
+
@Override
public boolean containsOperationalPath(InstanceIdentifier path) {
return operationalDataCache.containsKey(path);
}
-
+
@Override
public Iterable<InstanceIdentifier> getStoredConfigurationPaths() {
return configurationDataCache.keySet();
}
-
+
@Override
public Iterable<InstanceIdentifier> getStoredOperationalPaths() {
return operationalDataCache.keySet();
}
-
-
-
+
+
+
@Override
public CompositeNode readConfigurationData(InstanceIdentifier path) {
Preconditions.checkNotNull(path, "path cannot be null");
val table= it.readConfigurationData(tableRef) as Table;
if(table != null){
- LOG.info("Number of flows installed in table 0 of node {} : {}",node,table.flow.size);
+ LOG.trace("Number of flows installed in table 0 of node {} : {}",node,table.flow.size);
for(flow : table.flow){
val table= it.readConfigurationData(tableRef) as Table;
if(table != null){
- LOG.info("Number of flows installed in table 0 of node {} : {}",node,table.flow.size);
+ LOG.trace("Number of flows installed in table 0 of node {} : {}",node,table.flow.size);
for(mdsalFlow : table.flow){
if(FromSalConversionsUtils.flowEquals(mdsalFlow, MDFlowMapping.toMDSalflow(targetFlow))){
if (typeDef !== null) {
return typeDef;
}
- LOG.info("Thread blocked waiting for schema for: {}",type.fullyQualifiedName)
+ LOG.trace("Thread blocked waiting for schema for: {}",type.fullyQualifiedName)
return type.getSchemaInFuture.get();
}
}
private def createDummyImplementation(Class<?> object, GeneratedTransferObject typeSpec) {
- log.info("Generating Dummy DOM Codec for {} with {}", object, object.classLoader)
+ log.trace("Generating Dummy DOM Codec for {} with {}", object, object.classLoader)
return createClass(typeSpec.codecClassName) [
if (object.isYangBindingAvailable) {
implementsType(BINDING_CODEC)
if(log.isDebugEnabled){\r
log.debug("Delivering notification {} to {}",notification,listener);\r
} else {\r
- log.info("Delivering notification {} to {}",notification.class.name,listener);\r
+ log.trace("Delivering notification {} to {}",notification.class.name,listener);\r
}\r
listener.onNotification(notification);\r
if(log.isDebugEnabled){\r
log.debug("Notification delivered {} to {}",notification,listener);\r
} else {\r
- log.info("Notification delivered {} to {}",notification.class.name,listener);\r
+ log.trace("Notification delivered {} to {}",notification.class.name,listener);\r
}\r
} catch (Exception e) {\r
log.error("Unhandled exception thrown by listener: {}", listener, e);\r
}
DataModificationTransaction domTransaction = createBindingToDomTransaction(bindingTransaction);
BindingToDomTransaction wrapped = new BindingToDomTransaction(domTransaction, bindingTransaction);
- LOG.info("Forwarding Binding Transaction: {} as DOM Transaction: {} .", bindingTransaction.getIdentifier(),
+ LOG.trace("Forwarding Binding Transaction: {} as DOM Transaction: {} .", bindingTransaction.getIdentifier(),
domTransaction.getIdentifier());
return wrapped;
}
org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction baTransaction = createDomToBindingTransaction(domTransaction);
DomToBindingTransaction forwardedTransaction = new DomToBindingTransaction(baTransaction, domTransaction);
- LOG.info("Forwarding DOM Transaction: {} as Binding Transaction: {}.", domTransaction.getIdentifier(),
+ LOG.trace("Forwarding DOM Transaction: {} as Binding Transaction: {}.", domTransaction.getIdentifier(),
baTransaction.getIdentifier());
return forwardedTransaction;
}
override final registerCommitHandler(P path, DataCommitHandler<P, D> commitHandler) {\r
val registration = new DataCommitHandlerRegistrationImpl(path, commitHandler, this);\r
commitHandlers.put(path, registration)\r
- LOG.info("Registering Commit Handler {} for path: {}",commitHandler,path);\r
+ LOG.trace("Registering Commit Handler {} for path: {}",commitHandler,path);\r
for(listener : commitHandlerRegistrationListeners) {\r
try {\r
listener.instance.onRegister(registration);\r
protected final def removeCommitHandler(DataCommitHandlerRegistrationImpl<P, D> registration) {\r
commitHandlers.remove(registration.path, registration);\r
\r
- LOG.info("Removing Commit Handler {} for path: {}",registration.instance,registration.path);\r
+ LOG.trace("Removing Commit Handler {} for path: {}",registration.instance,registration.path);\r
for(listener : commitHandlerRegistrationListeners) {\r
try {\r
listener.instance.onUnregister(registration);\r
\r
val transactionId = transaction.identifier;\r
\r
- log.info("Transaction: {} Started.",transactionId);\r
+ log.trace("Transaction: {} Started.",transactionId);\r
// requesting commits\r
val Iterable<DataCommitHandler<P, D>> commitHandlers = dataBroker.affectedCommitHandlers(affectedPaths);\r
val List<DataCommitTransaction<P, D>> handlerTransactions = new ArrayList();\r
dataBroker.failedTransactionsCount.andIncrement\r
return rollback(handlerTransactions, e);\r
}\r
- log.info("Transaction: {} Finished successfully.",transactionId);\r
+ log.trace("Transaction: {} Finished successfully.",transactionId);\r
dataBroker.finishedTransactionsCount.andIncrement;\r
return Rpcs.getRpcResult(true, TransactionStatus.COMMITED, Collections.emptySet());\r
\r
override registerConsumer(Consumer consumer, BundleContext ctx) {
checkPredicates(consumer);
- log.info("Registering consumer " + consumer);
+ log.trace("Registering consumer " + consumer);
val session = newSessionFor(consumer, ctx);
consumer.onSessionInitiated(session);
sessions.add(session);
private void sendNotification(CompositeNode notification) {
QName type = notification.getNodeType();
Collection<NotificationListener> toNotify = listeners.get(type);
- log.info("Publishing notification " + type);
+ log.trace("Publishing notification " + type);
if (toNotify == null) {
// No listeners were registered - returns.
.create();
private boolean closed = false;
-
+
public Registration<NotificationListener> addNotificationListener(QName notification,
NotificationListener listener) {
checkSessionState();
consumerListeners.put(notification, listener);
listeners.put(notification, listener);
- log.info("Registered listener for notification: " + notification);
+ log.trace("Registered listener for notification: " + notification);
return null; // Return registration Object.
}
inconsistentBundlesToYangURLs.clear();
// update cache
updateCache(snapshot);
- logger.info("SchemaService updated to new consistent state");
+ logger.trace("SchemaService updated to new consistent state");
logger.trace("SchemaService updated to new consistent state containing {}", consistentBundlesToYangURLs);
// notifyListeners(changedURLs, adding);
private void sendNotification(CompositeNode notification) {
QName type = notification.getNodeType();
Collection<Registration<NotificationListener>> toNotify = listeners.get(type);
- log.info("Publishing notification " + type);
+ log.trace("Publishing notification " + type);
if (toNotify == null) {
// No listeners were registered - returns.
}
if (schema == null) {
- LOG.info("Validation not performed for {}. Reason: YANG Schema not present.", modification.getIdentifier());
+ LOG.warn("Validation not performed for {}. Reason: YANG Schema not present.", modification.getIdentifier());
return;
}
}
request.addLeaf("version", revision.get());
}
- device.logger.info("Loading YANG schema source for {}:{}", moduleName, revision);
+ device.logger.trace("Loading YANG schema source for {}:{}", moduleName, revision);
RpcResult<CompositeNode> schemaReply = device.invokeRpc(GET_SCHEMA_QNAME, request.toInstance());
if (schemaReply.isSuccessful()) {
String schemaBody = getSchemaFromRpc(schemaReply.getResult());
if (schemaBody != null) {
- device.logger.info("YANG Schema successfully retrieved from remote for {}:{}", moduleName, revision);
+ device.logger.trace("YANG Schema successfully retrieved from remote for {}:{}", moduleName, revision);
return Optional.of(schemaBody);
}
}
- device.logger.info("YANG shcema was not successfully retrieved.");
+ device.logger.warn("YANG shcema was not successfully retrieved.");
return Optional.absent();
}
}
return null;
}
-
+
public static final boolean isSupportedFor(Set<QName> capabilities) {
return capabilities.contains(IETF_NETCONF_MONITORING);
}
override readConfigurationData(InstanceIdentifier path) {
checkPreconditions
- LOG.info("Read Configuration via Restconf: {}", path)
+ LOG.trace("Read Configuration via Restconf: {}", path)
return dataService.readConfigurationData(path);
}
def readConfigurationDataBehindMountPoint(MountInstance mountPoint, InstanceIdentifier path) {
checkPreconditions
- LOG.info("Read Configuration via Restconf: {}", path)
+ LOG.trace("Read Configuration via Restconf: {}", path)
return mountPoint.readConfigurationData(path);
}
override readOperationalData(InstanceIdentifier path) {
checkPreconditions
- LOG.info("Read Operational via Restconf: {}", path)
+ LOG.trace("Read Operational via Restconf: {}", path)
return dataService.readOperationalData(path);
}
def readOperationalDataBehindMountPoint(MountInstance mountPoint, InstanceIdentifier path) {
checkPreconditions
- LOG.info("Read Operational via Restconf: {}", path)
+ LOG.trace("Read Operational via Restconf: {}", path)
return mountPoint.readOperationalData(path);
}
def commitConfigurationDataPut(InstanceIdentifier path, CompositeNode payload) {
checkPreconditions
val transaction = dataService.beginTransaction;
- LOG.info("Put Configuration via Restconf: {}", path)
+ LOG.trace("Put Configuration via Restconf: {}", path)
transaction.putConfigurationData(path, payload);
return transaction.commit
}
def commitConfigurationDataPutBehindMountPoint(MountInstance mountPoint, InstanceIdentifier path, CompositeNode payload) {
checkPreconditions
val transaction = mountPoint.beginTransaction;
- LOG.info("Put Configuration via Restconf: {}", path)
+ LOG.trace("Put Configuration via Restconf: {}", path)
transaction.putConfigurationData(path, payload);
return transaction.commit
}
val transaction = dataService.beginTransaction;
transaction.putConfigurationData(path, payload);
if (payload == transaction.createdConfigurationData.get(path)) {
- LOG.info("Post Configuration via Restconf: {}", path)
+ LOG.trace("Post Configuration via Restconf: {}", path)
return transaction.commit
}
- LOG.info("Post Configuration via Restconf was not executed because data already exists: {}", path)
+ LOG.trace("Post Configuration via Restconf was not executed because data already exists: {}", path)
return null;
}
val transaction = mountPoint.beginTransaction;
transaction.putConfigurationData(path, payload);
if (payload == transaction.createdConfigurationData.get(path)) {
- LOG.info("Post Configuration via Restconf: {}", path)
+ LOG.trace("Post Configuration via Restconf: {}", path)
return transaction.commit
}
- LOG.info("Post Configuration via Restconf was not executed because data already exists: {}", path)
+ LOG.trace("Post Configuration via Restconf was not executed because data already exists: {}", path)
return null;
}
RpcResult<Void> result = getToastService().makeToast(toastInput.build()).get();
if (result.isSuccessful()) {
- log.info("Toast was successfuly finished");
+ log.trace("Toast was successfuly finished");
} else {
- log.info("Toast was not successfuly finished");
+ log.warn("Toast was not successfuly finished");
}
return result.isSuccessful();
} catch (InterruptedException | ExecutionException e) {
- log.info("Error occured during toast creation");
+ log.warn("Error occured during toast creation");
}
return false;
}
-
+
@Override
@Deprecated
protected void startImpl(BundleContext context) {
@Override
public void onNotification(ToastDone notification) {
- log.info("ToastDone Notification Received: {} ",notification.getToastStatus());
+ log.trace("ToastDone Notification Received: {} ",notification.getToastStatus());
}
@Override
public Future<RpcResult<Void>> makeToast(MakeToastInput input) {
// TODO Auto-generated method stub
- log.info("makeToast - Received input for toast");
+ log.trace("makeToast - Received input for toast");
logToastInput(input);
if (currentTask != null) {
return inProgressError();
private void logToastInput(MakeToastInput input) {
String toastType = input.getToasterToastType().getName();
String toastDoneness = input.getToasterDoneness().toString();
- log.info("Toast: {} doneness: {}", toastType, toastDoneness);
+ log.trace("Toast: {} doneness: {}", toastType, toastDoneness);
}
private class MakeToastTask implements Callable<RpcResult<Void>> {
ToastDoneBuilder notifyBuilder = new ToastDoneBuilder();
notifyBuilder.setToastStatus(ToastStatus.Done);
notificationProvider.notify(notifyBuilder.build());
- log.info("Toast Done");
+ log.trace("Toast Done");
logToastInput(toastRequest);
currentTask = null;
return Rpcs.<Void> getRpcResult(true, null, Collections.<RpcError> emptySet());
public class StatisticsProvider implements AutoCloseable {
public final static Logger spLogger = LoggerFactory.getLogger(StatisticsProvider.class);
-
+
private DataProviderService dps;
private NotificationProviderService nps;
-
+
private OpendaylightGroupStatisticsService groupStatsService;
-
+
private OpendaylightMeterStatisticsService meterStatsService;
-
+
private OpendaylightFlowStatisticsService flowStatsService;
-
+
private OpendaylightPortStatisticsService portStatsService;
private OpendaylightFlowTableStatisticsService flowTableStatsService;
private OpendaylightQueueStatisticsService queueStatsService;
private final MultipartMessageManager multipartMessageManager = new MultipartMessageManager();
-
+
private Thread statisticsRequesterThread;
-
+
private final InstanceIdentifier<Nodes> nodesIdentifier = InstanceIdentifier.builder(Nodes.class).toInstance();
-
+
private final int STATS_THREAD_EXECUTION_TIME= 50000;
//Local caching of stats
-
- private final ConcurrentMap<NodeId,NodeStatistics> statisticsCache =
+
+ private final ConcurrentMap<NodeId,NodeStatistics> statisticsCache =
new ConcurrentHashMap<NodeId,NodeStatistics>();
-
+
public DataProviderService getDataService() {
return this.dps;
}
-
+
public void setDataService(final DataProviderService dataService) {
this.dps = dataService;
}
-
+
public NotificationProviderService getNotificationService() {
return this.nps;
}
-
+
public void setNotificationService(final NotificationProviderService notificationService) {
this.nps = notificationService;
}
}
private final StatisticsUpdateCommiter updateCommiter = new StatisticsUpdateCommiter(StatisticsProvider.this);
-
+
private Registration<NotificationListener> listenerRegistration;
-
+
public void start() {
-
+
NotificationProviderService nps = this.getNotificationService();
Registration<NotificationListener> registerNotificationListener = nps.registerNotificationListener(this.updateCommiter);
this.listenerRegistration = registerNotificationListener;
-
+
// Get Group/Meter statistics service instance
groupStatsService = StatisticsManagerActivator.getProviderContext().
getRpcService(OpendaylightGroupStatisticsService.class);
-
+
meterStatsService = StatisticsManagerActivator.getProviderContext().
getRpcService(OpendaylightMeterStatisticsService.class);
-
+
flowStatsService = StatisticsManagerActivator.getProviderContext().
getRpcService(OpendaylightFlowStatisticsService.class);
flowTableStatsService = StatisticsManagerActivator.getProviderContext().
getRpcService(OpendaylightFlowTableStatisticsService.class);
-
+
queueStatsService = StatisticsManagerActivator.getProviderContext().
getRpcService(OpendaylightQueueStatisticsService.class);
-
+
statisticsRequesterThread = new Thread( new Runnable(){
@Override
while(true){
try {
statsRequestSender();
-
+
Thread.sleep(STATS_THREAD_EXECUTION_TIME);
}catch (Exception e){
spLogger.error("Exception occurred while sending stats request : {}",e);
}
}
});
-
+
spLogger.debug("Statistics requester thread started with timer interval : {}",STATS_THREAD_EXECUTION_TIME);
-
+
statisticsRequesterThread.start();
-
+
spLogger.info("Statistics Provider started.");
}
-
+
protected DataModificationTransaction startChange() {
-
+
DataProviderService dps = this.getDataService();
return dps.beginTransaction();
}
-
+
private void statsRequestSender(){
-
+
List<Node> targetNodes = getAllConnectedNodes();
-
+
if(targetNodes == null)
return;
-
+
for (Node targetNode : targetNodes){
-
+
if(targetNode.getAugmentation(FlowCapableNode.class) != null){
- spLogger.info("Send request for stats collection to node : {})",targetNode.getId());
-
+ spLogger.trace("Send request for stats collection to node : {})",targetNode.getId());
+
InstanceIdentifier<Node> targetInstanceId = InstanceIdentifier.builder(Nodes.class).child(Node.class,targetNode.getKey()).toInstance();
-
+
NodeRef targetNodeRef = new NodeRef(targetInstanceId);
-
+
try{
sendAggregateFlowsStatsFromAllTablesRequest(targetNode.getKey());
-
+
sendAllFlowsStatsFromAllTablesRequest(targetNodeRef);
sendAllNodeConnectorsStatisticsRequest(targetNodeRef);
-
+
sendAllFlowTablesStatisticsRequest(targetNodeRef);
-
+
sendAllQueueStatsFromAllNodeConnector (targetNodeRef);
sendAllGroupStatisticsRequest(targetNodeRef);
-
+
sendAllMeterStatisticsRequest(targetNodeRef);
-
+
sendGroupDescriptionRequest(targetNodeRef);
-
+
sendMeterConfigStatisticsRequest(targetNodeRef);
}catch(Exception e){
spLogger.error("Exception occured while sending statistics requests : {}", e);
}
private void sendAllFlowTablesStatisticsRequest(NodeRef targetNodeRef) throws InterruptedException, ExecutionException {
- final GetFlowTablesStatisticsInputBuilder input =
+ final GetFlowTablesStatisticsInputBuilder input =
new GetFlowTablesStatisticsInputBuilder();
-
+
input.setNode(targetNodeRef);
- Future<RpcResult<GetFlowTablesStatisticsOutput>> response =
+ Future<RpcResult<GetFlowTablesStatisticsOutput>> response =
flowTableStatsService.getFlowTablesStatistics(input.build());
this.multipartMessageManager.addTxIdToRequestTypeEntry(response.get().getResult().getTransactionId()
private void sendAllFlowsStatsFromAllTablesRequest(NodeRef targetNode) throws InterruptedException, ExecutionException{
final GetAllFlowsStatisticsFromAllFlowTablesInputBuilder input =
new GetAllFlowsStatisticsFromAllFlowTablesInputBuilder();
-
+
input.setNode(targetNode);
-
- Future<RpcResult<GetAllFlowsStatisticsFromAllFlowTablesOutput>> response =
+
+ Future<RpcResult<GetAllFlowsStatisticsFromAllFlowTablesOutput>> response =
flowStatsService.getAllFlowsStatisticsFromAllFlowTables(input.build());
-
+
this.multipartMessageManager.addTxIdToRequestTypeEntry(response.get().getResult().getTransactionId()
, StatsRequestType.ALL_FLOW);
-
+
}
-
+
private void sendAggregateFlowsStatsFromAllTablesRequest(NodeKey targetNodeKey) throws InterruptedException, ExecutionException{
-
+
List<Short> tablesId = getTablesFromNode(targetNodeKey);
-
+
if(tablesId.size() != 0){
for(Short id : tablesId){
-
- spLogger.info("Send aggregate stats request for flow table {} to node {}",id,targetNodeKey);
- GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder input =
+
+ spLogger.trace("Send aggregate stats request for flow table {} to node {}",id,targetNodeKey);
+ GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder input =
new GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder();
-
+
input.setNode(new NodeRef(InstanceIdentifier.builder(Nodes.class).child(Node.class, targetNodeKey).toInstance()));
input.setTableId(new org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId(id));
- Future<RpcResult<GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutput>> response =
+ Future<RpcResult<GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutput>> response =
flowStatsService.getAggregateFlowStatisticsFromFlowTableForAllFlows(input.build());
-
+
multipartMessageManager.setTxIdAndTableIdMapEntry(response.get().getResult().getTransactionId(), id);
this.multipartMessageManager.addTxIdToRequestTypeEntry(response.get().getResult().getTransactionId()
, StatsRequestType.AGGR_FLOW);
}
private void sendAllNodeConnectorsStatisticsRequest(NodeRef targetNode) throws InterruptedException, ExecutionException{
-
+
final GetAllNodeConnectorsStatisticsInputBuilder input = new GetAllNodeConnectorsStatisticsInputBuilder();
-
+
input.setNode(targetNode);
- Future<RpcResult<GetAllNodeConnectorsStatisticsOutput>> response =
+ Future<RpcResult<GetAllNodeConnectorsStatisticsOutput>> response =
portStatsService.getAllNodeConnectorsStatistics(input.build());
this.multipartMessageManager.addTxIdToRequestTypeEntry(response.get().getResult().getTransactionId()
, StatsRequestType.ALL_PORT);
}
private void sendAllGroupStatisticsRequest(NodeRef targetNode) throws InterruptedException, ExecutionException{
-
+
final GetAllGroupStatisticsInputBuilder input = new GetAllGroupStatisticsInputBuilder();
-
+
input.setNode(targetNode);
- Future<RpcResult<GetAllGroupStatisticsOutput>> response =
+ Future<RpcResult<GetAllGroupStatisticsOutput>> response =
groupStatsService.getAllGroupStatistics(input.build());
-
+
this.multipartMessageManager.addTxIdToRequestTypeEntry(response.get().getResult().getTransactionId()
, StatsRequestType.ALL_GROUP);
}
-
+
private void sendGroupDescriptionRequest(NodeRef targetNode) throws InterruptedException, ExecutionException{
final GetGroupDescriptionInputBuilder input = new GetGroupDescriptionInputBuilder();
-
+
input.setNode(targetNode);
- Future<RpcResult<GetGroupDescriptionOutput>> response =
+ Future<RpcResult<GetGroupDescriptionOutput>> response =
groupStatsService.getGroupDescription(input.build());
this.multipartMessageManager.addTxIdToRequestTypeEntry(response.get().getResult().getTransactionId()
, StatsRequestType.GROUP_DESC);
}
-
+
private void sendAllMeterStatisticsRequest(NodeRef targetNode) throws InterruptedException, ExecutionException{
-
+
GetAllMeterStatisticsInputBuilder input = new GetAllMeterStatisticsInputBuilder();
-
+
input.setNode(targetNode);
- Future<RpcResult<GetAllMeterStatisticsOutput>> response =
+ Future<RpcResult<GetAllMeterStatisticsOutput>> response =
meterStatsService.getAllMeterStatistics(input.build());
-
+
this.multipartMessageManager.addTxIdToRequestTypeEntry(response.get().getResult().getTransactionId()
, StatsRequestType.ALL_METER);;
}
-
+
private void sendMeterConfigStatisticsRequest(NodeRef targetNode) throws InterruptedException, ExecutionException{
-
+
GetAllMeterConfigStatisticsInputBuilder input = new GetAllMeterConfigStatisticsInputBuilder();
-
+
input.setNode(targetNode);
- Future<RpcResult<GetAllMeterConfigStatisticsOutput>> response =
+ Future<RpcResult<GetAllMeterConfigStatisticsOutput>> response =
meterStatsService.getAllMeterConfigStatistics(input.build());
-
+
this.multipartMessageManager.addTxIdToRequestTypeEntry(response.get().getResult().getTransactionId()
, StatsRequestType.METER_CONFIG);;
}
-
+
private void sendAllQueueStatsFromAllNodeConnector(NodeRef targetNode) throws InterruptedException, ExecutionException {
GetAllQueuesStatisticsFromAllPortsInputBuilder input = new GetAllQueuesStatisticsFromAllPortsInputBuilder();
-
+
input.setNode(targetNode);
-
- Future<RpcResult<GetAllQueuesStatisticsFromAllPortsOutput>> response =
+
+ Future<RpcResult<GetAllQueuesStatisticsFromAllPortsOutput>> response =
queueStatsService.getAllQueuesStatisticsFromAllPorts(input.build());
-
+
this.multipartMessageManager.addTxIdToRequestTypeEntry(response.get().getResult().getTransactionId()
, StatsRequestType.ALL_QUEUE_STATS);;
public ConcurrentMap<NodeId, NodeStatistics> getStatisticsCache() {
return statisticsCache;
}
-
+
private List<Node> getAllConnectedNodes(){
-
+
Nodes nodes = (Nodes) dps.readOperationalData(nodesIdentifier);
if(nodes == null)
return null;
-
- spLogger.info("Number of connected nodes : {}",nodes.getNode().size());
+
+ spLogger.trace("Number of connected nodes : {}",nodes.getNode().size());
return nodes.getNode();
}
-
+
private List<Short> getTablesFromNode(NodeKey nodeKey){
InstanceIdentifier<FlowCapableNode> nodesIdentifier = InstanceIdentifier.builder(Nodes.class).child(Node.class,nodeKey).augmentation(FlowCapableNode.class).toInstance();
-
+
FlowCapableNode node = (FlowCapableNode)dps.readOperationalData(nodesIdentifier);
List<Short> tablesId = new ArrayList<Short>();
if(node != null && node.getTable()!=null){
- spLogger.info("Number of tables {} supported by node {}",node.getTable().size(),nodeKey);
+ spLogger.trace("Number of tables {} supported by node {}",node.getTable().size(),nodeKey);
for(Table table: node.getTable()){
tablesId.add(table.getId());
}
@SuppressWarnings("deprecation")
@Override
public void close(){
-
+
try {
- spLogger.info("Statistics Provider stopped.");
+ spLogger.trace("Statistics Provider stopped.");
if (this.listenerRegistration != null) {
-
+
this.listenerRegistration.close();
-
+
this.statisticsRequesterThread.destroy();
-
+
}
} catch (Throwable e) {
throw Exceptions.sneakyThrow(e);
import org.slf4j.LoggerFactory;
/**
- * Class implement statistics manager related listener interface and augment all the
+ * Class implement statistics manager related listener interface and augment all the
* received statistics data to data stores.
- * TODO: Need to add error message listener and clean-up the associated tx id
+ * TODO: Need to add error message listener and clean-up the associated tx id
* if it exists in the tx-id cache.
* @author vishnoianil
*
*/
public class StatisticsUpdateCommiter implements OpendaylightGroupStatisticsListener,
- OpendaylightMeterStatisticsListener,
+ OpendaylightMeterStatisticsListener,
OpendaylightFlowStatisticsListener,
OpendaylightPortStatisticsListener,
OpendaylightFlowTableStatisticsListener,
OpendaylightQueueStatisticsListener{
-
+
public final static Logger sucLogger = LoggerFactory.getLogger(StatisticsUpdateCommiter.class);
private final StatisticsProvider statisticsManager;
-
+
private final int unaccountedFlowsCounter = 1;
public StatisticsUpdateCommiter(final StatisticsProvider manager){
this.statisticsManager = manager;
}
-
+
public StatisticsProvider getStatisticsManager(){
return statisticsManager;
}
-
+
@Override
public void onMeterConfigStatsUpdated(MeterConfigStatsUpdated notification) {
//Check if response is for the request statistics-manager sent.
if(this.statisticsManager.getMultipartMessageManager().removeTxId(notification.getTransactionId()) == null)
return;
-
+
//Add statistics to local cache
ConcurrentMap<NodeId, NodeStatistics> cache = this.statisticsManager.getStatisticsCache();
if(!cache.containsKey(notification.getId())){
cache.put(notification.getId(), new NodeStatistics());
}
cache.get(notification.getId()).setMeterConfigStats(notification.getMeterConfigStats());
-
+
//Publish data to configuration data store
NodeKey key = new NodeKey(notification.getId());
-
+
List<MeterConfigStats> eterConfigStatsList = notification.getMeterConfigStats();
-
+
for(MeterConfigStats meterConfigStats : eterConfigStatsList){
DataModificationTransaction it = this.statisticsManager.startChange();
MeterBuilder meterBuilder = new MeterBuilder();
MeterKey meterKey = new MeterKey(meterConfigStats.getMeterId());
meterBuilder.setKey(meterKey);
-
+
InstanceIdentifier<Meter> meterRef = InstanceIdentifier.builder(Nodes.class).child(Node.class,key)
.augmentation(FlowCapableNode.class)
.child(Meter.class,meterKey).toInstance();
-
+
NodeMeterConfigStatsBuilder meterConfig= new NodeMeterConfigStatsBuilder();
MeterConfigStatsBuilder stats = new MeterConfigStatsBuilder();
stats.fieldsFrom(meterConfigStats);
meterConfig.setMeterConfigStats(stats.build());
-
+
//Update augmented data
meterBuilder.addAugmentation(NodeMeterConfigStats.class, meterConfig.build());
it.putOperationalData(meterRef, meterBuilder.build());
@Override
public void onMeterStatisticsUpdated(MeterStatisticsUpdated notification) {
-
+
//Check if response is for the request statistics-manager sent.
if(this.statisticsManager.getMultipartMessageManager().removeTxId(notification.getTransactionId()) == null)
return;
cache.put(notification.getId(), new NodeStatistics());
}
cache.get(notification.getId()).setMeterStatistics(notification.getMeterStats());
-
+
NodeKey key = new NodeKey(notification.getId());
-
+
List<MeterStats> meterStatsList = notification.getMeterStats();
-
+
for(MeterStats meterStats : meterStatsList){
//Publish data to configuration data store
MeterBuilder meterBuilder = new MeterBuilder();
MeterKey meterKey = new MeterKey(meterStats.getMeterId());
meterBuilder.setKey(meterKey);
-
+
InstanceIdentifier<Meter> meterRef = InstanceIdentifier.builder(Nodes.class).child(Node.class,key)
.augmentation(FlowCapableNode.class)
.child(Meter.class,meterKey).toInstance();
-
+
NodeMeterStatisticsBuilder meterStatsBuilder= new NodeMeterStatisticsBuilder();
MeterStatisticsBuilder stats = new MeterStatisticsBuilder();
stats.fieldsFrom(meterStats);
@Override
public void onGroupDescStatsUpdated(GroupDescStatsUpdated notification) {
-
+
//Check if response is for the request statistics-manager sent.
if(this.statisticsManager.getMultipartMessageManager().removeTxId(notification.getTransactionId()) == null)
return;
cache.put(notification.getId(), new NodeStatistics());
}
cache.get(notification.getId()).setGroupDescStats(notification.getGroupDescStats());
-
+
//Publish data to configuration data store
NodeKey key = new NodeKey(notification.getId());
List<GroupDescStats> groupDescStatsList = notification.getGroupDescStats();
for(GroupDescStats groupDescStats : groupDescStatsList){
DataModificationTransaction it = this.statisticsManager.startChange();
-
+
GroupBuilder groupBuilder = new GroupBuilder();
GroupKey groupKey = new GroupKey(groupDescStats.getGroupId());
groupBuilder.setKey(groupKey);
-
+
InstanceIdentifier<Group> groupRef = InstanceIdentifier.builder(Nodes.class).child(Node.class,key)
.augmentation(FlowCapableNode.class)
.child(Group.class,groupKey).toInstance();
GroupDescBuilder stats = new GroupDescBuilder();
stats.fieldsFrom(groupDescStats);
groupDesc.setGroupDesc(stats.build());
-
+
//Update augmented data
groupBuilder.addAugmentation(NodeGroupDescStats.class, groupDesc.build());
@Override
public void onGroupStatisticsUpdated(GroupStatisticsUpdated notification) {
-
+
//Check if response is for the request statistics-manager sent.
if(this.statisticsManager.getMultipartMessageManager().removeTxId(notification.getTransactionId()) == null)
return;
cache.put(notification.getId(), new NodeStatistics());
}
cache.get(notification.getId()).setGroupStatistics(notification.getGroupStats());
-
+
//Publish data to configuration data store
NodeKey key = new NodeKey(notification.getId());
List<GroupStats> groupStatsList = notification.getGroupStats();
for(GroupStats groupStats : groupStatsList){
DataModificationTransaction it = this.statisticsManager.startChange();
-
+
GroupBuilder groupBuilder = new GroupBuilder();
GroupKey groupKey = new GroupKey(groupStats.getGroupId());
groupBuilder.setKey(groupKey);
-
+
InstanceIdentifier<Group> groupRef = InstanceIdentifier.builder(Nodes.class).child(Node.class,key)
.augmentation(FlowCapableNode.class)
.child(Group.class,groupKey).toInstance();
GroupStatisticsBuilder stats = new GroupStatisticsBuilder();
stats.fieldsFrom(groupStats);
groupStatisticsBuilder.setGroupStatistics(stats.build());
-
+
//Update augmented data
groupBuilder.addAugmentation(NodeGroupStatistics.class, groupStatisticsBuilder.build());
it.putOperationalData(groupRef, groupBuilder.build());
it.commit();
}
}
-
+
@Override
public void onMeterFeaturesUpdated(MeterFeaturesUpdated notification) {
meterFeature.setMaxBands(notification.getMaxBands());
meterFeature.setMaxColor(notification.getMaxColor());
meterFeature.setMaxMeter(notification.getMaxMeter());
-
+
cache.get(notification.getId()).setMeterFeatures(meterFeature.build());
-
+
//Publish data to configuration data store
DataModificationTransaction it = this.statisticsManager.startChange();
NodeKey key = new NodeKey(notification.getId());
NodeRef ref = getNodeRef(key);
-
- final NodeBuilder nodeData = new NodeBuilder();
+
+ final NodeBuilder nodeData = new NodeBuilder();
nodeData.setKey(key);
-
+
NodeMeterFeaturesBuilder nodeMeterFeatures= new NodeMeterFeaturesBuilder();
nodeMeterFeatures.setMeterFeatures(meterFeature.build());
-
+
//Update augmented data
nodeData.addAugmentation(NodeMeterFeatures.class, nodeMeterFeatures.build());
-
+
InstanceIdentifier<? extends Object> refValue = ref.getValue();
it.putOperationalData(refValue, nodeData.build());
it.commit();
}
-
+
@Override
public void onGroupFeaturesUpdated(GroupFeaturesUpdated notification) {
if(!cache.containsKey(notification.getId())){
cache.put(notification.getId(), new NodeStatistics());
}
-
+
GroupFeaturesBuilder groupFeatures = new GroupFeaturesBuilder();
groupFeatures.setActions(notification.getActions());
groupFeatures.setGroupCapabilitiesSupported(notification.getGroupCapabilitiesSupported());
groupFeatures.setGroupTypesSupported(notification.getGroupTypesSupported());
groupFeatures.setMaxGroups(notification.getMaxGroups());
cache.get(notification.getId()).setGroupFeatures(groupFeatures.build());
-
+
//Publish data to configuration data store
DataModificationTransaction it = this.statisticsManager.startChange();
NodeKey key = new NodeKey(notification.getId());
NodeRef ref = getNodeRef(key);
-
- final NodeBuilder nodeData = new NodeBuilder();
+
+ final NodeBuilder nodeData = new NodeBuilder();
nodeData.setKey(key);
-
+
NodeGroupFeaturesBuilder nodeGroupFeatures= new NodeGroupFeaturesBuilder();
nodeGroupFeatures.setGroupFeatures(groupFeatures.build());
-
+
//Update augmented data
nodeData.addAugmentation(NodeGroupFeatures.class, nodeGroupFeatures.build());
-
+
InstanceIdentifier<? extends Object> refValue = ref.getValue();
it.putOperationalData(refValue, nodeData.build());
it.commit();
@Override
public void onFlowsStatisticsUpdate(FlowsStatisticsUpdate notification) {
-
+
//Check if response is for the request statistics-manager sent.
if(this.statisticsManager.getMultipartMessageManager().removeTxId(notification.getTransactionId()) == null)
return;
NodeKey key = new NodeKey(notification.getId());
sucLogger.debug("Received flow stats update : {}",notification.toString());
-
+
for(FlowAndStatisticsMapList map: notification.getFlowAndStatisticsMapList()){
short tableId = map.getTableId();
-
+
DataModificationTransaction it = this.statisticsManager.startChange();
boolean foundOriginalFlow = false;
flow.setPriority(map.getPriority());
flow.setStrict(map.isStrict());
flow.setTableId(tableId);
-
+
Flow flowRule = flow.build();
-
+
FlowAndStatisticsMapListBuilder stats = new FlowAndStatisticsMapListBuilder();
stats.setByteCount(map.getByteCount());
stats.setPacketCount(map.getPacketCount());
stats.setDuration(map.getDuration());
-
+
GenericStatistics flowStats = stats.build();
-
+
//Add statistics to local cache
ConcurrentMap<NodeId, NodeStatistics> cache = this.statisticsManager.getStatisticsCache();
if(!cache.containsKey(notification.getId())){
cache.get(notification.getId()).getFlowAndStatsMap().put(tableId, new HashMap<Flow,GenericStatistics>());
}
cache.get(notification.getId()).getFlowAndStatsMap().get(tableId).put(flowRule,flowStats);
-
+
//Augment the data to the flow node
FlowStatisticsBuilder flowStatistics = new FlowStatisticsBuilder();
flowStatistics.setTableId(tableId);
flowStatisticsData.setFlowStatistics(flowStatistics.build());
-
+
sucLogger.debug("Flow : {}",flowRule.toString());
sucLogger.debug("Statistics to augment : {}",flowStatistics.build().toString());
InstanceIdentifier<Table> tableRef = InstanceIdentifier.builder(Nodes.class).child(Node.class, key)
.augmentation(FlowCapableNode.class).child(Table.class, new TableKey(tableId)).toInstance();
-
+
Table table= (Table)it.readConfigurationData(tableRef);
//TODO: Not a good way to do it, need to figure out better way.
- //TODO: major issue in any alternate approach is that flow key is incrementally assigned
+ //TODO: major issue in any alternate approach is that flow key is incrementally assigned
//to the flows stored in data store.
if(table != null){
.child(Flow.class,existingFlow.getKey()).toInstance();
flowBuilder.setKey(existingFlow.getKey());
flowBuilder.addAugmentation(FlowStatisticsData.class, flowStatisticsData.build());
- sucLogger.info("Found matching flow in the datastore, augmenting statistics");
+ sucLogger.trace("Found matching flow in the datastore, augmenting statistics");
foundOriginalFlow = true;
it.putOperationalData(flowRef, flowBuilder.build());
it.commit();
}
}
}
-
+
if(!foundOriginalFlow){
sucLogger.debug("Associated original flow is not found in data store. Augmenting flow in operational data store");
//TODO: Temporary fix: format [ 1+tableid+1+unaccounted flow counter]
.child(Flow.class,newFlowKey).toInstance();
flowBuilder.setKey(newFlowKey);
flowBuilder.addAugmentation(FlowStatisticsData.class, flowStatisticsData.build());
- sucLogger.info("Flow was no present in data store, augmenting statistics as an unaccounted flow");
+ sucLogger.trace("Flow was no present in data store, augmenting statistics as an unaccounted flow");
it.putOperationalData(flowRef, flowBuilder.build());
it.commit();
}
NodeKey key = new NodeKey(notification.getId());
sucLogger.debug("Received aggregate flow statistics update : {}",notification.toString());
-
+
Short tableId = this.statisticsManager.getMultipartMessageManager().getTableIdForTxId(notification.getTransactionId());
if(tableId != null){
-
+
DataModificationTransaction it = this.statisticsManager.startChange();
InstanceIdentifier<Table> tableRef = InstanceIdentifier.builder(Nodes.class).child(Node.class, key)
aggregateFlowStatisticsBuilder.setFlowCount(notification.getFlowCount());
aggregateFlowStatisticsBuilder.setPacketCount(notification.getPacketCount());
aggregateFlowStatisticsDataBuilder.setAggregateFlowStatistics(aggregateFlowStatisticsBuilder.build());
-
+
ConcurrentMap<NodeId, NodeStatistics> cache = this.statisticsManager.getStatisticsCache();
if(!cache.containsKey(notification.getId())){
cache.put(notification.getId(), new NodeStatistics());
}
cache.get(notification.getId()).getTableAndAggregateFlowStatsMap().put(tableId,aggregateFlowStatisticsBuilder.build());
-
+
sucLogger.debug("Augment aggregate statistics: {} for table {} on Node {}",aggregateFlowStatisticsBuilder.build().toString(),tableId,key);
TableBuilder tableBuilder = new TableBuilder();
NodeKey key = new NodeKey(notification.getId());
sucLogger.debug("Received port stats update : {}",notification.toString());
-
+
//Add statistics to local cache
ConcurrentMap<NodeId, NodeStatistics> cache = this.statisticsManager.getStatisticsCache();
if(!cache.containsKey(notification.getId())){
List<NodeConnectorStatisticsAndPortNumberMap> portsStats = notification.getNodeConnectorStatisticsAndPortNumberMap();
for(NodeConnectorStatisticsAndPortNumberMap portStats : portsStats){
-
+
DataModificationTransaction it = this.statisticsManager.startChange();
- FlowCapableNodeConnectorStatisticsBuilder statisticsBuilder
+ FlowCapableNodeConnectorStatisticsBuilder statisticsBuilder
= new FlowCapableNodeConnectorStatisticsBuilder();
statisticsBuilder.setBytes(portStats.getBytes());
statisticsBuilder.setCollisionCount(portStats.getCollisionCount());
statisticsBuilder.setReceiveOverRunError(portStats.getReceiveOverRunError());
statisticsBuilder.setTransmitDrops(portStats.getTransmitDrops());
statisticsBuilder.setTransmitErrors(portStats.getTransmitErrors());
-
+
//Update data in the cache
cache.get(notification.getId()).getNodeConnectorStats().put(portStats.getNodeConnectorId(), statisticsBuilder.build());
-
+
//Augment data to the node-connector
- FlowCapableNodeConnectorStatisticsDataBuilder statisticsDataBuilder =
+ FlowCapableNodeConnectorStatisticsDataBuilder statisticsDataBuilder =
new FlowCapableNodeConnectorStatisticsDataBuilder();
-
+
statisticsDataBuilder.setFlowCapableNodeConnectorStatistics(statisticsBuilder.build());
-
+
InstanceIdentifier<NodeConnector> nodeConnectorRef = InstanceIdentifier.builder(Nodes.class).child(Node.class, key).child(NodeConnector.class, new NodeConnectorKey(portStats.getNodeConnectorId())).toInstance();
-
+
NodeConnector nodeConnector = (NodeConnector)it.readOperationalData(nodeConnectorRef);
-
+
if(nodeConnector != null){
sucLogger.debug("Augmenting port statistics {} to port {}",statisticsDataBuilder.build().toString(),nodeConnectorRef.toString());
NodeConnectorBuilder nodeConnectorBuilder = new NodeConnectorBuilder();
NodeKey key = new NodeKey(notification.getId());
sucLogger.debug("Received flow table statistics update : {}",notification.toString());
-
+
List<FlowTableAndStatisticsMap> flowTablesStatsList = notification.getFlowTableAndStatisticsMap();
for (FlowTableAndStatisticsMap ftStats : flowTablesStatsList){
-
+
DataModificationTransaction it = this.statisticsManager.startChange();
InstanceIdentifier<Table> tableRef = InstanceIdentifier.builder(Nodes.class).child(Node.class, key)
.augmentation(FlowCapableNode.class).child(Table.class, new TableKey(ftStats.getTableId().getValue())).toInstance();
-
+
FlowTableStatisticsDataBuilder statisticsDataBuilder = new FlowTableStatisticsDataBuilder();
-
+
FlowTableStatisticsBuilder statisticsBuilder = new FlowTableStatisticsBuilder();
statisticsBuilder.setActiveFlows(ftStats.getActiveFlows());
statisticsBuilder.setPacketsLookedUp(ftStats.getPacketsLookedUp());
statisticsBuilder.setPacketsMatched(ftStats.getPacketsMatched());
-
+
statisticsDataBuilder.setFlowTableStatistics(statisticsBuilder.build());
-
+
ConcurrentMap<NodeId, NodeStatistics> cache = this.statisticsManager.getStatisticsCache();
if(!cache.containsKey(notification.getId())){
cache.put(notification.getId(), new NodeStatistics());
}
cache.get(notification.getId()).getFlowTableAndStatisticsMap().put(ftStats.getTableId().getValue(),statisticsBuilder.build());
-
+
sucLogger.debug("Augment flow table statistics: {} for table {} on Node {}",statisticsBuilder.build().toString(),ftStats.getTableId(),key);
-
+
TableBuilder tableBuilder = new TableBuilder();
tableBuilder.setKey(new TableKey(ftStats.getTableId().getValue()));
tableBuilder.addAugmentation(FlowTableStatisticsData.class, statisticsDataBuilder.build());
@Override
public void onQueueStatisticsUpdate(QueueStatisticsUpdate notification) {
-
+
//Check if response is for the request statistics-manager sent.
if(this.statisticsManager.getMultipartMessageManager().removeTxId(notification.getTransactionId()) == null)
return;
NodeKey key = new NodeKey(notification.getId());
sucLogger.debug("Received queue stats update : {}",notification.toString());
-
+
//Add statistics to local cache
ConcurrentMap<NodeId, NodeStatistics> cache = this.statisticsManager.getStatisticsCache();
if(!cache.containsKey(notification.getId())){
cache.put(notification.getId(), new NodeStatistics());
}
-
+
List<QueueIdAndStatisticsMap> queuesStats = notification.getQueueIdAndStatisticsMap();
for(QueueIdAndStatisticsMap swQueueStats : queuesStats){
-
+
if(!cache.get(notification.getId()).getNodeConnectorAndQueuesStatsMap().containsKey(swQueueStats.getNodeConnectorId())){
cache.get(notification.getId()).getNodeConnectorAndQueuesStatsMap().put(swQueueStats.getNodeConnectorId(), new HashMap<QueueId,GenericQueueStatistics>());
}
-
+
FlowCapableNodeConnectorQueueStatisticsDataBuilder queueStatisticsDataBuilder = new FlowCapableNodeConnectorQueueStatisticsDataBuilder();
-
+
FlowCapableNodeConnectorQueueStatisticsBuilder queueStatisticsBuilder = new FlowCapableNodeConnectorQueueStatisticsBuilder();
-
+
queueStatisticsBuilder.fieldsFrom(swQueueStats);
-
+
queueStatisticsDataBuilder.setFlowCapableNodeConnectorQueueStatistics(queueStatisticsBuilder.build());
-
+
cache.get(notification.getId()).getNodeConnectorAndQueuesStatsMap()
.get(swQueueStats.getNodeConnectorId())
.put(swQueueStats.getQueueId(), queueStatisticsBuilder.build());
-
-
+
+
DataModificationTransaction it = this.statisticsManager.startChange();
- InstanceIdentifier<Queue> queueRef
+ InstanceIdentifier<Queue> queueRef
= InstanceIdentifier.builder(Nodes.class)
.child(Node.class, key)
.child(NodeConnector.class, new NodeConnectorKey(swQueueStats.getNodeConnectorId()))
.augmentation(FlowCapableNodeConnector.class)
.child(Queue.class, new QueueKey(swQueueStats.getQueueId())).toInstance();
-
+
QueueBuilder queueBuilder = new QueueBuilder();
queueBuilder.addAugmentation(FlowCapableNodeConnectorQueueStatisticsData.class, queueStatisticsDataBuilder.build());
queueBuilder.setKey(new QueueKey(swQueueStats.getQueueId()));
- sucLogger.info("Augmenting queue statistics {} of queue {} to port {}"
+ sucLogger.trace("Augmenting queue statistics {} of queue {} to port {}"
,queueStatisticsDataBuilder.build().toString(),
swQueueStats.getQueueId(),
swQueueStats.getNodeConnectorId());
-
+
it.putOperationalData(queueRef, queueBuilder.build());
it.commit();
-
+
}
-
+
}
private NodeRef getNodeRef(NodeKey nodeKey){
InstanceIdentifierBuilder<?> builder = InstanceIdentifier.builder(Nodes.class).child(Node.class, nodeKey);
return new NodeRef(builder.toInstance());
}
-
+
public boolean flowEquals(Flow statsFlow, Flow storedFlow) {
if (statsFlow.getClass() != storedFlow.getClass()) {
return false;
}
return true;
}
-
+
/**
* Explicit equals method to compare the 'match' for flows stored in the data-stores and flow fetched from the switch.
- * Usecase: e.g If user don't set any ethernet source and destination address for match,data store will store null for
+ * Usecase: e.g If user don't set any ethernet source and destination address for match,data store will store null for
* these address.
* e.g [_ethernetMatch=EthernetMatch [_ethernetDestination=null, _ethernetSource=null, _ethernetType=
* EthernetType [_type=EtherType [_value=2048], _mask=null, augmentation=[]]
- *
- * But when you fetch the flows from switch, openflow driver library converts all zero bytes of mac address in the
- * message stream to 00:00:00:00:00:00. Following string shows how library interpret the zero mac address bytes and
- * eventually when translator convert it to MD-SAL match, this is how it looks
- * [_ethernetDestination=EthernetDestination [_address=MacAddress [_value=00:00:00:00:00:00], _mask=null, augmentation=[]],
- * _ethernetSource=EthernetSource [_address=MacAddress [_value=00:00:00:00:00:00], _mask=null, augmentation=[]],
+ *
+ * But when you fetch the flows from switch, openflow driver library converts all zero bytes of mac address in the
+ * message stream to 00:00:00:00:00:00. Following string shows how library interpret the zero mac address bytes and
+ * eventually when translator convert it to MD-SAL match, this is how it looks
+ * [_ethernetDestination=EthernetDestination [_address=MacAddress [_value=00:00:00:00:00:00], _mask=null, augmentation=[]],
+ * _ethernetSource=EthernetSource [_address=MacAddress [_value=00:00:00:00:00:00], _mask=null, augmentation=[]],
* _ethernetType=EthernetType [_type=EtherType [_value=2048], _mask=null, augmentation=[]]
- *
- * Similarly for inPort, if user/application don't set any value for it, FRM will store null value for it in data store.
+ *
+ * Similarly for inPort, if user/application don't set any value for it, FRM will store null value for it in data store.
* When we fetch the same flow (with its statistics) from switch, plugin converts its value to openflow:X:0.
- * e.g _inPort=Uri [_value=openflow:1:0]
- *
+ * e.g _inPort=Uri [_value=openflow:1:0]
+ *
* So this custom equals method add additional check to take care of these scenario, in case any match element is null in data-store-flow, but not
* in the flow fetched from switch.
- *
+ *
* @param statsFlow
* @param storedFlow
* @return
"Unable to retrieve config snapshot after commit for persister, details: " + e.getMessage(),
ErrorType.application, ErrorTag.operation_failed, ErrorSeverity.error, e.getErrorInfo());
}
- logger.info("Datastore {} committed successfully: {}", Datastore.candidate, status);
+ logger.trace("Datastore {} committed successfully: {}", Datastore.candidate, status);
return document.createElement(XmlNetconfConstants.OK);
}
throw new NetconfDocumentedException(e.getMessage(), e, ErrorType.application, ErrorTag.operation_failed,
ErrorSeverity.error, errorInfo);
}
- logger.info("Changes discarded successfully from datastore {}", Datastore.candidate);
+ logger.trace("Changes discarded successfully from datastore {}", Datastore.candidate);
return document.createElement(XmlNetconfConstants.OK);
}
}
- logger.info("Datastore {} validated successfully", Datastore.candidate);
+ logger.trace("Datastore {} validated successfully", Datastore.candidate);
return document.createElement(XmlNetconfConstants.OK);
}
executeSet(configRegistryClient, editConfigExecution);
}
- logger.info("Operation {} successful", EditConfigXmlParser.EDIT_CONFIG);
+ logger.trace("Operation {} successful", EditConfigXmlParser.EDIT_CONFIG);
return document.createElement(XmlNetconfConstants.OK);
}
ObjectName on = null;
try {
on = ta.createModule(module, instance);
- logger.info("New instance for {} {} created under name {}", module, instance, on);
+ logger.trace("New instance for {} {} created under name {}", module, instance, on);
} catch (InstanceAlreadyExistsException e1) {
throw new IllegalStateException("Unable to create instance for " + module + " : " + instance);
}
ConfigTransactionClient ta = configRegistryClient.getConfigTransactionClient(txOn);
final Element element = runtime.toXml(runtimeBeans, configBeans, document, new ServiceRegistryWrapper(ta));
- logger.info("{} operation successful", XmlNetconfConstants.GET);
+ logger.trace("{} operation successful", XmlNetconfConstants.GET);
return element;
}
ServiceRegistryWrapper serviceTracker = new ServiceRegistryWrapper(ta);
dataElement = configMapping.toXml(instances, this.maybeNamespace, document, dataElement, serviceTracker);
- logger.info("{} operation successful", GET_CONFIG);
+ logger.trace("{} operation successful", GET_CONFIG);
return dataElement;
}
final Object result = executeOperation(configRegistryClient, execution.on, execution.operationName,
execution.attributes);
- logger.info("Operation {} called successfully on {} with arguments {} with result {}", execution.operationName,
+ logger.trace("Operation {} called successfully on {} with arguments {} with result {}", execution.operationName,
execution.on, execution.attributes, result);
if (execution.isVoid()) {
// Socket should not be closed at this point
// Activator unregisters this as JMX listener before close is called
- logger.info("Received notification {}", notification);
+ logger.trace("Received notification {}", notification);
if (notification instanceof CommitJMXNotification) {
try {
handleAfterCommitNotification((CommitJMXNotification) notification);
try {
persisterAggregator.persistConfig(new CapabilityStrippingConfigSnapshotHolder(notification.getConfigSnapshot(),
notification.getCapabilities(), ignoredMissingCapabilityRegex));
- logger.info("Configuration persisted successfully");
+ logger.trace("Configuration persisted successfully");
} catch (IOException e) {
throw new RuntimeException("Unable to persist configuration snapshot", e);
}
latestCapabilities = netconfClient.getCapabilities();
if (Util.isSubset(netconfClient, expectedCaps)) {
logger.debug("Hello from netconf stable with {} capabilities", latestCapabilities);
- logger.info("Session id received from netconf server: {}", netconfClient.getClientSession());
+ logger.trace("Session id received from netconf server: {}", netconfClient.getClientSession());
return netconfClient;
}
logger.debug("Polling hello from netconf, attempt {}, capabilities {}", attempt, latestCapabilities);
throws ConflictingVersionException, IOException, SAXException {
Element xmlToBePersisted = XmlUtil.readXmlToElement(configSnapshotHolder.getConfigSnapshot());
- logger.info("Pushing last configuration to netconf: {}", configSnapshotHolder);
+ logger.trace("Pushing last configuration to netconf: {}", configSnapshotHolder);
StringBuilder response = new StringBuilder("editConfig response = {");
NetconfMessage message = createEditConfigMessage(xmlToBePersisted, "/netconfOp/editConfig.xml");
response.append("commit response = {");
response.append(XmlUtil.toString(responseMessage.getDocument()));
response.append("}");
- logger.info("Last configuration loaded successfully");
+ logger.trace("Last configuration loaded successfully");
logger.trace("Detailed message {}", response);
}
XmlUtil.addNamespaceAttr(getSchemaResult,
XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_YANG_IETF_NETCONF_MONITORING);
- logger.info("{} operation successful", GET_SCHEMA);
+ logger.trace("{} operation successful", GET_SCHEMA);
return getSchemaResult;
}
.createElement(XmlNetconfConstants.RPC_ERROR);\r
}\r
\r
- logger.info("{} operation successful", START_EXI);\r
+ logger.trace("{} operation successful", START_EXI);\r
logger.debug("received start-exi message {} ", XmlUtil.toString(document));\r
return getSchemaResult;\r
\r
Element getSchemaResult = document.createElement(XmlNetconfConstants.OK);\r
XmlUtil.addNamespaceAttr(getSchemaResult,\r
XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0);\r
- logger.info("{} operation successful", STOP_EXI);\r
+ logger.trace("{} operation successful", STOP_EXI);\r
logger.debug("received stop-exi message {} ", XmlUtil.toString(document));\r
return getSchemaResult;\r
}\r
ServiceTrackerCustomizer<IUserManager, IUserManager> customizer = new ServiceTrackerCustomizer<IUserManager, IUserManager>(){
@Override
public IUserManager addingService(ServiceReference<IUserManager> reference) {
- logger.info("Service {} added, let there be SSH bridge.", reference);
+ logger.trace("Service {} added, let there be SSH bridge.", reference);
iUserManager = context.getService(reference);
try {
onUserManagerFound(iUserManager);
}
@Override
public void modifiedService(ServiceReference<IUserManager> reference, IUserManager service) {
- logger.info("Replacing modified service {} in netconf SSH.", reference);
+ logger.trace("Replacing modified service {} in netconf SSH.", reference);
server.addUserManagerService(service);
}
@Override
public void removedService(ServiceReference<IUserManager> reference, IUserManager service) {
- logger.info("Removing service {} from netconf SSH. " +
+ logger.trace("Removing service {} from netconf SSH. " +
"SSH won't authenticate users until IUserManeger service will be started.", reference);
removeUserManagerService();
}
return AuthenticationResult.SUCCESS;
}
} catch (Exception e){
- logger.info("Authentication failed due to :" + e.getLocalizedMessage());
+ logger.warn("Authentication failed due to :" + e.getLocalizedMessage());
}
return AuthenticationResult.FAILURE;
}
public static void sendErrorMessage(final NetconfSession session,
final NetconfDocumentedException sendErrorException) {
- logger.info("Sending error {}", sendErrorException.getMessage(), sendErrorException);
+ logger.trace("Sending error {}", sendErrorException.getMessage(), sendErrorException);
final Document errorDocument = createDocument(sendErrorException);
session.sendMessage(new NetconfMessage(errorDocument));
}
public static void sendErrorMessage(Channel channel, NetconfDocumentedException sendErrorException) {
- logger.info("Sending error {}", sendErrorException.getMessage(), sendErrorException);
+ logger.trace("Sending error {}", sendErrorException.getMessage(), sendErrorException);
final Document errorDocument = createDocument(sendErrorException);
channel.writeAndFlush(new NetconfMessage(errorDocument));
}
public static void sendErrorMessage(NetconfSession session, NetconfDocumentedException sendErrorException,
NetconfMessage incommingMessage) {
final Document errorDocument = createDocument(sendErrorException);
- logger.info("Sending error {}", XmlUtil.toString(errorDocument));
+ logger.trace("Sending error {}", XmlUtil.toString(errorDocument));
tryToCopyAttributes(incommingMessage.getDocument(), errorDocument, sendErrorException);
session.sendMessage(new NetconfMessage(errorDocument));
}
}
}
if (conflictsMsg.length() > 0) {
- LOGGER.info("JAXB type conflicts detected : {}", conflictsMsg.toString());
+ LOGGER.warn("JAXB type conflicts detected : {}", conflictsMsg.toString());
}
}
} else {
auditMsg = "Mode: " + mode + " User " + user + " " + action + " " + moduleName + " " + resource;
}
- logger.info(auditMsg);
+ logger.trace(auditMsg);
}
public static void auditlog(String moduleName, String user, String action, String resource) {
} else {
if (!currentlyUp) {
// only generate log if the interface changes from down to up
- logger.info("Interface {} with address {} is UP!",
+ logger.trace("Interface {} with address {} is UP!",
netInt.getDisplayName(),
controllerIP.getHostAddress());
}
List<LLDPTLV> optionalTLVList = lldp.getOptionalTLVList();
if (optionalTLVList == null) {
- logger.info("The discovery packet with null custom option from {}", dstNodeConnector);
+ logger.warn("The discovery packet with null custom option from {}", dstNodeConnector);
return false;
}
continue;
}
if (action.getType() == ActionType.SET_NEXT_HOP) {
- logger.info("Unsupported action: {}", action);
+ logger.warn("Unsupported action: {}", action);
continue;
}
}
}
private void printInfoMessage(String type, StatsRequest request) {
- log.info("{} stats request not inserted for switch: {}. Queue size: {}. Collector state: {}.",
+ log.trace("{} stats request not inserted for switch: {}. Queue size: {}. Collector state: {}.",
new Object[] {type, HexString.toHexString(request.switchId), pendingStatsRequests.size(),
statisticsCollector.getState().toString() });
}
}
public VIP getVIPWithPoolName(VIP vip){
- cmLogger.info("Search a VIP with name:{}",vip);
+ cmLogger.trace("Search a VIP with name:{}",vip);
for(VIP vipTemp: this.vips.values()){
if(vipTemp.equals(vip)){
- cmLogger.info("Found VIP with pool detail : {}",vipTemp);
+ cmLogger.trace("Found VIP with pool detail : {}",vipTemp);
return vipTemp;
}
}
- cmLogger.info("VIP with pool detail not found ");
+ cmLogger.trace("VIP with pool detail not found ");
return null;
}
@Override
public VIP createVIP(String name,String ip,String protocol,short protocolPort,String poolName){
- cmLogger.info("Create VIP with the following details :[ name : "+name
+ cmLogger.trace("Create VIP with the following details :[ name : "+name
+" ip : "+ip
+" protocol : "+protocol
+" protocol_port : "+protocolPort
vip.setStatus(LBConst.STATUS_ACTIVE);
this.vips.put(name, vip);
- cmLogger.info("New VIP created : "+vip.toString());
+ cmLogger.trace("New VIP created : "+vip.toString());
return vip;
}
@Override
public VIP updateVIP(String name, String poolName){
- cmLogger.info("Updating VIP : "+name+" pool name to "+poolName);
+ cmLogger.trace("Updating VIP : "+name+" pool name to "+poolName);
if(vips.containsKey(name)){
VIP vip = vips.get(name);
@Override
public VIP deleteVIP(String name){
- cmLogger.info("Deleting VIP : "+name);
+ cmLogger.trace("Deleting VIP : "+name);
VIP vip = vips.get(name);
}
}
- cmLogger.info("VIP removed : "+vip.toString());
+ cmLogger.trace("VIP removed : "+vip.toString());
vips.remove(vip.getName());
PoolMember pm = new PoolMember(name,memberIP,poolName);
- cmLogger.info("Adding pool member : "+pm.toString());
+ cmLogger.trace("Adding pool member : "+pm.toString());
pools.get(poolName).addMember(pm);
@Override
public PoolMember removePoolMember(String name, String poolName){
- cmLogger.info("Removing pool member : {} from pool {}",name, poolName);
+ cmLogger.trace("Removing pool member : {} from pool {}",name, poolName);
Pool pool = pools.get(poolName);
pool.removeMember(name);
- cmLogger.info("Pool member {} removed from {} ",name,poolName);
+ cmLogger.trace("Pool member {} removed from {} ",name,poolName);
return pm;
}
Pool newPool = new Pool(name,lbMethod);
- cmLogger.info("New pool created : " + newPool.toString());
+ cmLogger.trace("New pool created : " + newPool.toString());
pools.put(name, newPool);
}
- cmLogger.info("Pool removed : "+pool.toString());
+ cmLogger.trace("Pool removed : "+pool.toString());
pools.remove(poolName);
* @return Details of the source machine in Client object.
*/
public Client getClientFromPacket(IPv4 inPkt){
- lbuLogger.info("Find client information from packet : {}",inPkt.toString());
+ lbuLogger.trace("Find client information from packet : {}",inPkt.toString());
String ip = NetUtils.getInetAddress(inPkt.getSourceAddress()).getHostAddress();
String protocol = IPProtocols.getProtocolName(inPkt.getProtocol());
- lbuLogger.info("client ip {} and protocl {}",ip,protocol);
+ lbuLogger.trace("client ip {} and protocl {}",ip,protocol);
Packet tpFrame= inPkt.getPayload();
- lbuLogger.info("Get protocol layer {}",tpFrame.toString());
+ lbuLogger.trace("Get protocol layer {}",tpFrame.toString());
short port = 0;
port = udpFrame.getSourcePort();
}
- lbuLogger.info("Found port {}",port);
+ lbuLogger.trace("Found port {}",port);
Client source = new Client(ip, protocol,port);
- lbuLogger.info("Client information : {}",source.toString());
+ lbuLogger.trace("Client information : {}",source.toString());
return source;
}
*/
public VIP getVIPFromPacket(IPv4 inPkt){
- lbuLogger.info("Find VIP information from packet : {}",inPkt.toString());
+ lbuLogger.trace("Find VIP information from packet : {}",inPkt.toString());
String ip = NetUtils.getInetAddress(inPkt.getDestinationAddress()).getHostAddress();
VIP dest = new VIP(null,ip, protocol,port,null);
- lbuLogger.info("VIP information : {}",dest.toString());
+ lbuLogger.trace("VIP information : {}",dest.toString());
return dest;
}
forwardPort = hnConnector.getnodeConnector();
lbsLogger
- .info("Both source (client) and destination pool machine is connected to same switch nodes. Respective ports are - {},{}",
+ .trace("Both source (client) and destination pool machine is connected to same switch nodes. Respective ports are - {},{}",
forwardPort, inPkt.getIncomingNodeConnector());
} else {
Path route = this.routing.getRoute(clientNode, destNode);
- lbsLogger.info("Path between source (client) and destination switch nodes : {}",
+ lbsLogger.trace("Path between source (client) and destination switch nodes : {}",
route.toString());
forwardPort = route.getEdges().get(0).getTailNodeConnector();
if (installLoadBalancerFlow(client, vip, clientNode, poolMemberIp,
hnConnector.getDataLayerAddressBytes(), forwardPort,
LBConst.FORWARD_DIRECTION_LB_FLOW)) {
- lbsLogger.info("Traffic from client : {} will be routed " + "to pool machine : {}",
+ lbsLogger.trace("Traffic from client : {} will be routed " + "to pool machine : {}",
client, poolMemberIp);
} else {
lbsLogger.error("Not able to route traffic from client : {}", client);
if (installLoadBalancerFlow(client, vip, clientNode, poolMemberIp, vipMacAddr,
inPkt.getIncomingNodeConnector(), LBConst.REVERSE_DIRECTION_LB_FLOW)) {
- lbsLogger.info("Flow rule installed to change the source ip/mac from "
+ lbsLogger.trace("Flow rule installed to change the source ip/mac from "
+ "pool machine ip {} to VIP {} for traffic coming pool machine", poolMemberIp,
vip);
} else {
FlowEntry fEntry = new FlowEntry(policyName, flowName, flow, sourceSwitch);
- lbsLogger.info("Install flow entry {} on node {}", fEntry.toString(), sourceSwitch.toString());
+ lbsLogger.trace("Install flow entry {} on node {}", fEntry.toString(), sourceSwitch.toString());
if (!this.ruleManager.checkFlowEntryConflict(fEntry)) {
if (this.ruleManager.installFlowEntry(fEntry).isSuccess()) {
if (props != null) {
this.containerName = (String) props.get("containerName");
- lbsLogger.info("Running container name:" + this.containerName);
+ lbsLogger.trace("Running container name:" + this.containerName);
} else {
// In the Global instance case the containerName is empty
this.containerName = "";
}
- lbsLogger.info(configManager.toString());
+ lbsLogger.trace(configManager.toString());
}
@Override
public String getPoolMemberForClient(Client source, VIP dest){
- rLogger.info("Received traffic from client : {} for VIP : {} ",source, dest);
+ rLogger.trace("Received traffic from client : {} for VIP : {} ",source, dest);
syncWithLoadBalancerData();
if(this.clientMemberMap.containsKey(source)){
pm= this.clientMemberMap.get(source);
- rLogger.info("Client {} had sent traffic before,new traffic will be routed to the same pool member {}",source,pm);
+ rLogger.trace("Client {} had sent traffic before,new traffic will be routed to the same pool member {}",source,pm);
}else{
Pool pool = null;
pool = this.cmgr.getPool(dest.getPoolName());
int memberNum = this.randomGenerator.nextInt(pool.getAllMembers().size()-1);
pm = pool.getAllMembers().get(memberNum);
this.clientMemberMap.put(source, pm );
- rLogger.info("Network traffic from client {} will be directed to pool member {}",pm);
+ rLogger.trace("Network traffic from client {} will be directed to pool member {}",pm);
}
return pm.getIp();
}
@Override
public String getPoolMemberForClient(Client source, VIP dest){
- rrLogger.info("Received traffic from client : {} for VIP : {} ",source, dest);
+ rrLogger.trace("Received traffic from client : {} for VIP : {} ",source, dest);
syncWithLoadBalancerData();
if(this.clientMemberMap.containsKey(source)){
pm= this.clientMemberMap.get(source);
- rrLogger.info("Client {} had sent traffic before,new traffic will be routed to the same pool member {}",source,pm);
+ rrLogger.trace("Client {} had sent traffic before,new traffic will be routed to the same pool member {}",source,pm);
}else{
Pool pool = null;
pool = this.cmgr.getPool(dest.getPoolName());
pm = pool.getAllMembers().get(memberNum);
this.clientMemberMap.put(source, pm );
- rrLogger.info("New client's packet will be directed to pool member {}",pm);
+ rrLogger.trace("New client's packet will be directed to pool member {}",pm);
memberNum++;
if(memberNum > pool.getAllMembers().size()-1){
pm = pool.getAllMembers().get(0);
this.clientMemberMap.put(source, pm);
- rrLogger.info("Network traffic from client {} will be directed to pool member {}",pm);
+ rrLogger.trace("Network traffic from client {} will be directed to pool member {}",pm);
this.nextItemFromPool.put(dest, 1);
rrLogger.debug("Next pool member for new client of VIP is set to {}",pool.getAllMembers().get(1));
}
private void allocateCaches() {
if (this.clusterContainerService == null) {
- log.info("un-initialized clusterContainerService, can't create cache");
+ log.trace("un-initialized clusterContainerService, can't create cache");
return;
}
@SuppressWarnings({ "unchecked" })
private void retrieveCaches() {
if (this.clusterContainerService == null) {
- log.info("un-initialized clusterContainerService, can't retrieve cache");
+ log.trace("un-initialized clusterContainerService, can't retrieve cache");
return;
}
private void destroyCaches() {
if (this.clusterContainerService == null) {
- log.info("un-initialized clusterContainerService, can't destroy cache");
+ log.trace("un-initialized clusterContainerService, can't destroy cache");
return;
}
@SuppressWarnings({ "unchecked" })
private void retrieveCaches() {
if (this.clusterContainerService == null) {
- log.info("un-initialized clusterContainerService, can't create cache");
+ log.warn("un-initialized clusterContainerService, can't create cache");
return;
}
return;
}
- log.info("Set Node {}'s Mode to {}", nodeId, cfgObject.getMode());
+ log.trace("Set Node {}'s Mode to {}", nodeId, cfgObject.getMode());
if (modeChange) {
notifyModeChange(node, cfgObject.isProactive());
rcResponse = aaaClient.authService(userName, password,
aaaServer.getAddress(), aaaServer.getSecret());
if (rcResponse.getStatus() == AuthResultEnum.AUTH_ACCEPT) {
- logger.info(
+ logger.trace(
"Remote Authentication Succeeded for User: \"{}\", by Server: {}",
userName, aaaServer.getAddress());
remotelyAuthenticated = true;
break;
} else if (rcResponse.getStatus() == AuthResultEnum.AUTH_REJECT) {
- logger.info(
+ logger.trace(
"Remote Authentication Rejected User: \"{}\", from Server: {}, Reason:{}",
new Object[] { userName, aaaServer.getAddress(),
rcResponse.getStatus().toString() });
} else {
- logger.info(
+ logger.trace(
"Remote Authentication Failed for User: \"{}\", from Server: {}, Reason:{}",
new Object[] { userName, aaaServer.getAddress(),
rcResponse.getStatus().toString() });
// Trigger cluster update
localUserConfigList.put(user, targetConfigEntry);
- logger.info("Password changed for User \"{}\"", user);
+ logger.trace("Password changed for User \"{}\"", user);
return status;
}
// TODO: if user was authenticated through AAA server, send
// Acct-Status-Type=stop message to server with logout as reason
removeUserFromActiveList(userName);
- logger.info("User \"{}\" logged out", userName);
+ logger.trace("User \"{}\" logged out", userName);
}
/*
// TODO: if user was authenticated through AAA server, send
// Acct-Status-Type=stop message to server with timeout as reason
removeUserFromActiveList(userName);
- logger.info("User \"{}\" timed out", userName);
+ logger.trace("User \"{}\" timed out", userName);
}
@Override
} else {
auditMsg = "Mode: " + mode + " User " + user + " " + action + " " + moduleName + " " + resource;
}
- logger.info(auditMsg);
+ logger.trace(auditMsg);
}
public static void auditlog(String moduleName, String user, String action, String resource) {