2 * Copyright (c) 2015 - 2018 Ericsson India Global Services Pvt Ltd. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.genius.utils.batching;
10 import com.google.common.base.Preconditions;
11 import com.google.common.util.concurrent.FluentFuture;
12 import com.google.common.util.concurrent.FutureCallback;
13 import com.google.common.util.concurrent.Futures;
14 import com.google.common.util.concurrent.ListenableFuture;
15 import com.google.common.util.concurrent.MoreExecutors;
16 import com.google.common.util.concurrent.SettableFuture;
17 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
18 import java.util.ArrayList;
19 import java.util.HashMap;
20 import java.util.List;
22 import java.util.Optional;
24 import java.util.concurrent.BlockingQueue;
25 import java.util.concurrent.ConcurrentHashMap;
26 import java.util.concurrent.ExecutionException;
27 import java.util.concurrent.LinkedBlockingQueue;
28 import java.util.concurrent.ScheduledExecutorService;
29 import java.util.concurrent.TimeUnit;
30 import org.apache.commons.lang3.tuple.ImmutablePair;
31 import org.apache.commons.lang3.tuple.Pair;
32 import org.eclipse.jdt.annotation.NonNull;
33 import org.opendaylight.infrautils.utils.concurrent.Executors;
34 import org.opendaylight.mdsal.binding.api.DataBroker;
35 import org.opendaylight.mdsal.binding.api.ReadTransaction;
36 import org.opendaylight.mdsal.binding.api.ReadWriteTransaction;
37 import org.opendaylight.mdsal.binding.api.WriteTransaction;
38 import org.opendaylight.mdsal.common.api.CommitInfo;
39 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
40 import org.opendaylight.mdsal.common.api.ReadFailedException;
41 import org.opendaylight.yangtools.util.concurrent.FluentFutures;
42 import org.opendaylight.yangtools.yang.binding.DataObject;
43 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
44 import org.slf4j.Logger;
45 import org.slf4j.LoggerFactory;
48 * This class lets other modules submit their CRUD methods to it. This class
49 * will then supply a single transaction to such CRUD methods of the
50 * subscribers, on which such subscribers write data to that transaction.
51 * Finally the framework attempts to reliably write this single transaction
52 * which represents a batch of an ordered list of entities owned by that subscriber,
53 * to be written/updated/removed from a specific datastore as registered by the subscriber.
55 public class ResourceBatchingManager implements AutoCloseable {
57 private static final Logger LOG = LoggerFactory.getLogger(ResourceBatchingManager.class);
59 private static final int INITIAL_DELAY = 3000;
60 private static final TimeUnit TIME_UNIT = TimeUnit.MILLISECONDS;
62 private static final int PERIODICITY_IN_MS = 500;
63 private static final int BATCH_SIZE = 1000;
65 public enum ShardResource {
66 CONFIG_TOPOLOGY(LogicalDatastoreType.CONFIGURATION),
67 OPERATIONAL_TOPOLOGY(LogicalDatastoreType.OPERATIONAL),
68 CONFIG_INVENTORY(LogicalDatastoreType.CONFIGURATION),
69 OPERATIONAL_INVENTORY(LogicalDatastoreType.OPERATIONAL);
71 BlockingQueue<ActionableResource> queue = new LinkedBlockingQueue<>();
72 LogicalDatastoreType datastoreType;
74 ShardResource(LogicalDatastoreType datastoreType) {
75 this.datastoreType = datastoreType;
78 public LogicalDatastoreType getDatastoreType() {
82 BlockingQueue<ActionableResource> getQueue() {
87 private final ConcurrentHashMap<String, Pair<BlockingQueue<ActionableResource>, ResourceHandler>>
88 resourceHandlerMapper = new ConcurrentHashMap<>();
90 private final ConcurrentHashMap<String, ScheduledExecutorService>
91 resourceBatchingThreadMapper = new ConcurrentHashMap<>();
93 private final Map<String, Set<InstanceIdentifier<?>>> pendingModificationByResourceType = new ConcurrentHashMap<>();
95 private static ResourceBatchingManager instance;
98 instance = new ResourceBatchingManager();
101 public static ResourceBatchingManager getInstance() {
106 public void close() {
107 LOG.trace("ResourceBatchingManager Closed, closing all batched resources");
108 resourceBatchingThreadMapper.values().forEach(ScheduledExecutorService::shutdown);
111 public void registerBatchableResource(
112 String resourceType, final BlockingQueue<ActionableResource> resQueue, final ResourceHandler resHandler) {
113 Preconditions.checkNotNull(resQueue, "ResourceQueue to use for batching cannot not be null.");
114 Preconditions.checkNotNull(resHandler, "ResourceHandler cannot not be null.");
116 resourceHandlerMapper.put(resourceType, new ImmutablePair<>(resQueue, resHandler));
117 ScheduledExecutorService resDelegatorService =
118 Executors.newListeningScheduledThreadPool(1, "ResourceBatchingManager", LOG);
119 resourceBatchingThreadMapper.put(resourceType, resDelegatorService);
120 LOG.info("Registered resourceType {} with batchSize {} and batchInterval {}", resourceType,
121 resHandler.getBatchSize(), resHandler.getBatchInterval());
122 resDelegatorService.scheduleWithFixedDelay(
123 new Batcher(resourceType), resHandler.getBatchInterval(), resHandler.getBatchInterval(), TIME_UNIT);
124 pendingModificationByResourceType.putIfAbsent(resourceType, ConcurrentHashMap.newKeySet());
127 public void registerDefaultBatchHandlers(DataBroker broker) {
128 LOG.trace("Registering default batch handlers");
129 Integer batchSize = Integer.getInteger("resource.manager.batch.size", BATCH_SIZE);
130 Integer batchInterval = Integer.getInteger("resource.manager.batch.periodicity.ms", PERIODICITY_IN_MS);
132 for (ShardResource shardResource : ShardResource.values()) {
133 if (resourceHandlerMapper.containsKey(shardResource.name())) {
136 DefaultBatchHandler batchHandler = new DefaultBatchHandler(broker, shardResource.datastoreType, batchSize,
138 registerBatchableResource(shardResource.name(), shardResource.getQueue(), batchHandler);
142 private void beforeModification(String resoureType, InstanceIdentifier<?> iid) {
143 pendingModificationByResourceType.get(resoureType).add(iid);
146 @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
147 justification = "https://github.com/spotbugs/spotbugs/issues/811")
148 private void afterModification(String resoureType, InstanceIdentifier<?> iid) {
149 pendingModificationByResourceType.get(resoureType).remove(iid);
153 * Reads the identifier of the given resource type.
154 * Not to be used by the applications which uses their own resource queue
156 * @param resourceType resource type that was registered with batch manager
157 * @param identifier identifier to be read
158 * @return a CheckFuture containing the result of the read
160 public <T extends DataObject> FluentFuture<Optional<T>> read(
161 String resourceType, InstanceIdentifier<T> identifier) {
162 BlockingQueue<ActionableResource> queue = getQueue(resourceType);
164 if (pendingModificationByResourceType.get(resourceType).contains(identifier)) {
165 SettableFuture<Optional<T>> readFuture = SettableFuture.create();
166 queue.add(new ActionableReadResource<>(identifier, readFuture));
167 return FluentFuture.from(Futures.makeChecked(readFuture, ReadFailedException.MAPPER));
169 ResourceHandler resourceHandler = resourceHandlerMapper.get(resourceType).getRight();
170 try (ReadTransaction tx = resourceHandler.getResourceBroker().newReadOnlyTransaction()) {
171 return tx.read(resourceHandler.getDatastoreType(), identifier);
176 return FluentFutures.immediateFailedFluentFuture(new ReadFailedException(
177 "No batch handler was registered for resource " + resourceType));
180 public ListenableFuture<Void> merge(ShardResource shardResource, InstanceIdentifier<?> identifier,
181 DataObject updatedData) {
182 BlockingQueue<ActionableResource> queue = shardResource.getQueue();
184 beforeModification(shardResource.name(), identifier);
185 ActionableResource actResource = new ActionableResourceImpl(
186 identifier, ActionableResource.UPDATE, updatedData, null/*oldData*/);
187 queue.add(actResource);
188 return actResource.getResultFuture();
191 .immediateFailedFuture(new IllegalStateException("Queue missing for provided shardResource "
192 + shardResource.name()));
195 public void merge(String resourceType, InstanceIdentifier<?> identifier, DataObject updatedData) {
196 BlockingQueue<ActionableResource> queue = getQueue(resourceType);
198 beforeModification(resourceType, identifier);
199 ActionableResource actResource = new ActionableResourceImpl(
200 identifier, ActionableResource.UPDATE, updatedData, null/*oldData*/);
201 queue.add(actResource);
205 public ListenableFuture<Void> delete(ShardResource shardResource, InstanceIdentifier<?> identifier) {
206 BlockingQueue<ActionableResource> queue = shardResource.getQueue();
208 beforeModification(shardResource.name(), identifier);
209 ActionableResource actResource = new ActionableResourceImpl(
210 identifier, ActionableResource.DELETE, null, null/*oldData*/);
211 queue.add(actResource);
212 return actResource.getResultFuture();
215 .immediateFailedFuture(new IllegalStateException("Queue missing for provided shardResource "
216 + shardResource.name()));
219 public void delete(String resourceType, InstanceIdentifier<?> identifier) {
220 BlockingQueue<ActionableResource> queue = getQueue(resourceType);
222 beforeModification(resourceType, identifier);
223 ActionableResource actResource = new ActionableResourceImpl(
224 identifier, ActionableResource.DELETE, null, null/*oldData*/);
225 queue.add(actResource);
229 public ListenableFuture<Void> put(ShardResource shardResource, InstanceIdentifier<?> identifier,
230 DataObject updatedData) {
231 BlockingQueue<ActionableResource> queue = shardResource.getQueue();
233 beforeModification(shardResource.name(), identifier);
234 ActionableResource actResource = new ActionableResourceImpl(
235 identifier, ActionableResource.CREATE, updatedData, null/*oldData*/);
236 queue.add(actResource);
237 return actResource.getResultFuture();
240 .immediateFailedFuture(new IllegalStateException("Queue missing for provided shardResource "
241 + shardResource.name()));
244 public void put(String resourceType, InstanceIdentifier<?> identifier, DataObject updatedData) {
245 BlockingQueue<ActionableResource> queue = getQueue(resourceType);
247 beforeModification(resourceType, identifier);
248 ActionableResource actResource = new ActionableResourceImpl(
249 identifier, ActionableResource.CREATE, updatedData, null/*oldData*/);
250 queue.add(actResource);
254 private BlockingQueue<ActionableResource> getQueue(String resourceType) {
255 if (resourceHandlerMapper.containsKey(resourceType)) {
256 return resourceHandlerMapper.get(resourceType).getLeft();
261 public void deregisterBatchableResource(String resourceType) {
262 ScheduledExecutorService scheduledThreadPoolExecutor = resourceBatchingThreadMapper.get(resourceType);
263 if (scheduledThreadPoolExecutor != null) {
264 scheduledThreadPoolExecutor.shutdown();
266 resourceHandlerMapper.remove(resourceType);
267 resourceBatchingThreadMapper.remove(resourceType);
270 private class Batcher implements Runnable {
271 private final String resourceType;
273 Batcher(String resourceType) {
274 this.resourceType = resourceType;
279 List<ActionableResource> resList = new ArrayList<>();
282 Pair<BlockingQueue<ActionableResource>, ResourceHandler> resMapper =
283 resourceHandlerMapper.get(resourceType);
284 if (resMapper == null) {
285 LOG.error("Unable to find resourceMapper for batching the ResourceType {}", resourceType);
288 BlockingQueue<ActionableResource> resQueue = resMapper.getLeft();
289 ResourceHandler resHandler = resMapper.getRight();
290 resList.add(resQueue.take());
291 resQueue.drainTo(resList);
293 long start = System.currentTimeMillis();
294 int batchSize = resHandler.getBatchSize();
296 int batches = resList.size() / batchSize;
297 if (resList.size() > batchSize) {
298 LOG.info("Batched up resources of size {} into batches {} for resourcetype {}",
299 resList.size(), batches, resourceType);
300 for (int i = 0, j = 0; i < batches; j = j + batchSize,i++) {
301 new MdsalDsTask<>(resourceType, resList.subList(j, j + batchSize)).process();
303 // process remaining routes
304 LOG.trace("Picked up 1 size {} ", resList.subList(batches * batchSize, resList.size()).size());
305 new MdsalDsTask<>(resourceType, resList.subList(batches * batchSize, resList.size())).process();
307 // process less than OR == batchsize routes
308 LOG.trace("Picked up 2 size {}", resList.size());
309 new MdsalDsTask<>(resourceType, resList).process();
312 long timetaken = System.currentTimeMillis() - start;
313 LOG.debug("Total taken ##time = {}ms for resourceList of size {} for resourceType {}",
314 timetaken, resList.size(), resourceType);
316 } catch (InterruptedException e) {
317 LOG.error("InterruptedException during run()", e);
323 private class MdsalDsTask<T extends DataObject> {
325 List<ActionableResource> actResourceList;
327 MdsalDsTask(String resourceType, List<ActionableResource> actResourceList) {
328 this.resourceType = resourceType;
329 this.actResourceList = actResourceList;
332 @SuppressWarnings("unchecked")
333 public void process() {
334 LOG.trace("Picked up 3 size {} of resourceType {}", actResourceList.size(), resourceType);
335 Pair<BlockingQueue<ActionableResource>, ResourceHandler> resMapper =
336 resourceHandlerMapper.get(resourceType);
337 if (resMapper == null) {
338 LOG.error("Unable to find resourceMapper for batching the ResourceType {}", resourceType);
341 ResourceHandler resHandler = resMapper.getRight();
342 DataBroker broker = resHandler.getResourceBroker();
343 LogicalDatastoreType dsType = resHandler.getDatastoreType();
344 ReadWriteTransaction tx = broker.newReadWriteTransaction();
345 List<SubTransaction> transactionObjects = new ArrayList<>();
346 Map<SubTransaction, SettableFuture<Void>> txMap = new HashMap<>();
347 for (ActionableResource actResource : actResourceList) {
348 int startSize = transactionObjects.size();
349 switch (actResource.getAction()) {
350 case ActionableResource.CREATE:
351 resHandler.create(tx, dsType, actResource.getInstanceIdentifier(), actResource.getInstance(),
354 case ActionableResource.UPDATE:
355 Object updated = actResource.getInstance();
356 Object original = actResource.getOldInstance();
357 resHandler.update(tx, dsType, actResource.getInstanceIdentifier(), original,
358 updated,transactionObjects);
360 case ActionableResource.UPDATECONTAINER:
361 Object updatedContainer = actResource.getInstance();
362 Object originalContainer = actResource.getOldInstance();
363 resHandler.updateContainer(tx, dsType, actResource.getInstanceIdentifier(),
364 originalContainer, updatedContainer,transactionObjects);
366 case ActionableResource.DELETE:
367 resHandler.delete(tx, dsType, actResource.getInstanceIdentifier(), actResource.getInstance(),
370 case ActionableResource.READ:
371 ActionableReadResource<DataObject> readAction = (ActionableReadResource<DataObject>)actResource;
372 ListenableFuture<Optional<DataObject>> future =
373 tx.read(dsType, readAction.getInstanceIdentifier());
374 Futures.addCallback(future, new FutureCallback<Optional<DataObject>>() {
376 public void onSuccess(Optional<DataObject> result) {
377 readAction.getReadFuture().set(result);
381 public void onFailure(Throwable failure) {
382 readAction.getReadFuture().setException(failure);
384 }, MoreExecutors.directExecutor());
387 LOG.error("Unable to determine Action for ResourceType {} with ResourceKey {}",
388 resourceType, actResource);
390 int endSize = transactionObjects.size();
391 if (endSize > startSize) {
392 txMap.put(transactionObjects.get(endSize - 1),
393 (SettableFuture<Void>) actResource.getResultFuture());
398 long start = System.currentTimeMillis();
399 FluentFuture<? extends @NonNull CommitInfo> futures = tx.commit();
403 actResourceList.forEach(actionableResource -> {
404 ((SettableFuture<Void>) actionableResource.getResultFuture()).set(null);
405 postCommit(actionableResource.getAction(), actionableResource.getInstanceIdentifier());
407 long time = System.currentTimeMillis() - start;
408 LOG.trace("##### Time taken for {} = {}ms", actResourceList.size(), time);
410 } catch (InterruptedException | ExecutionException e) {
411 LOG.error("Exception occurred while batch writing to datastore", e);
412 LOG.info("Trying to submit transaction operations one at a time for resType {}", resourceType);
413 for (SubTransaction object : transactionObjects) {
414 WriteTransaction writeTransaction = broker.newWriteOnlyTransaction();
415 switch (object.getAction()) {
416 case SubTransaction.CREATE:
417 writeTransaction.put(dsType, object.getInstanceIdentifier(),
418 (DataObject) object.getInstance(), true);
420 case SubTransaction.DELETE:
421 writeTransaction.delete(dsType, object.getInstanceIdentifier());
423 case SubTransaction.UPDATE:
424 writeTransaction.merge(dsType, object.getInstanceIdentifier(),
425 (DataObject) object.getInstance(), true);
428 LOG.error("Unable to determine Action for transaction object with id {}",
429 object.getInstanceIdentifier());
431 FluentFuture<? extends @NonNull CommitInfo> futureOperation = writeTransaction.commit();
433 futureOperation.get();
434 if (txMap.containsKey(object)) {
435 txMap.get(object).set(null);
437 LOG.error("Subtx object {} has no Actionable-resource associated with it !! ",
438 object.getInstanceIdentifier());
440 } catch (InterruptedException | ExecutionException exception) {
441 if (txMap.containsKey(object)) {
442 txMap.get(object).setException(exception);
444 LOG.error("Error {} to datastore (path, data) : ({}, {})", object.getAction(),
445 object.getInstanceIdentifier(), object.getInstance(), exception);
447 postCommit(object.getAction(), object.getInstanceIdentifier());
453 private void postCommit(int action, InstanceIdentifier iid) {
455 case ActionableResource.CREATE:
456 case ActionableResource.UPDATE:
457 case ActionableResource.DELETE:
458 afterModification(resourceType, iid);
466 private static class ActionableReadResource<T extends DataObject> extends ActionableResourceImpl {
467 private final SettableFuture<Optional<T>> readFuture;
469 ActionableReadResource(InstanceIdentifier<T> identifier, SettableFuture<Optional<T>> readFuture) {
470 super(identifier, ActionableResource.READ, null, null);
471 this.readFuture = readFuture;
474 SettableFuture<Optional<T>> getReadFuture() {