import org.opendaylight.yangtools.yang.data.util.codec.SharedCodecCache;
import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
import org.opendaylight.yangtools.yang.model.api.TypedDataSchemaNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
*/
RFC7951() {
@Override
- JSONCodecFactory createFactory(final SchemaContext context, final CodecCache<JSONCodec<?>> cache) {
+ JSONCodecFactory createFactory(final EffectiveModelContext context, final CodecCache<JSONCodec<?>> cache) {
return new RFC7951JSONCodecFactory(context, cache);
}
},
*/
DRAFT_LHOTKA_NETMOD_YANG_JSON_02() {
@Override
- JSONCodecFactory createFactory(final SchemaContext context, final CodecCache<JSONCodec<?>> cache) {
+ JSONCodecFactory createFactory(final EffectiveModelContext context, final CodecCache<JSONCodec<?>> cache) {
return new Lhotka02JSONCodecFactory(context, cache);
}
};
private static final Logger LOG = LoggerFactory.getLogger(JSONCodecFactorySupplier.class);
- private static final class EagerCacheLoader extends CacheLoader<SchemaContext, JSONCodecFactory> {
- private final BiFunction<SchemaContext, CodecCache<JSONCodec<?>>, JSONCodecFactory> factorySupplier;
+ private static final class EagerCacheLoader extends CacheLoader<EffectiveModelContext, JSONCodecFactory> {
+ private final BiFunction<EffectiveModelContext, CodecCache<JSONCodec<?>>, JSONCodecFactory> factorySupplier;
- EagerCacheLoader(final BiFunction<SchemaContext, CodecCache<JSONCodec<?>>, JSONCodecFactory> factorySupplier) {
+ EagerCacheLoader(final BiFunction<EffectiveModelContext,
+ CodecCache<JSONCodec<?>>, JSONCodecFactory> factorySupplier) {
this.factorySupplier = requireNonNull(factorySupplier);
}
@Override
- public JSONCodecFactory load(final SchemaContext key) {
+ public JSONCodecFactory load(final EffectiveModelContext key) {
final Stopwatch sw = Stopwatch.createStarted();
final LazyCodecCache<JSONCodec<?>> lazyCache = new LazyCodecCache<>();
final JSONCodecFactory lazy = factorySupplier.apply(key, lazyCache);
}
// Weak keys to retire the entry when SchemaContext goes away
- private final LoadingCache<SchemaContext, JSONCodecFactory> precomputed;
+ private final LoadingCache<EffectiveModelContext, JSONCodecFactory> precomputed;
// Weak keys to retire the entry when SchemaContext goes away and to force identity-based lookup
- private final LoadingCache<SchemaContext, JSONCodecFactory> shared;
+ private final LoadingCache<EffectiveModelContext, JSONCodecFactory> shared;
JSONCodecFactorySupplier() {
precomputed = CacheBuilder.newBuilder().weakKeys().build(new EagerCacheLoader(this::createFactory));
- shared = CacheBuilder.newBuilder().weakKeys().build(new CacheLoader<SchemaContext, JSONCodecFactory>() {
+ shared = CacheBuilder.newBuilder().weakKeys().build(new CacheLoader<EffectiveModelContext, JSONCodecFactory>() {
@Override
- public JSONCodecFactory load(final SchemaContext key) {
+ public JSONCodecFactory load(final EffectiveModelContext key) {
return createFactory(key, new SharedCodecCache<>());
}
});
* <p>
* Choosing this implementation is appropriate when the memory overhead of keeping a full codec tree is not as
* great a concern as predictable performance. When compared to the implementation returned by
- * {@link #getShared(SchemaContext)}, this implementation is expected to offer higher performance and have lower
- * peak memory footprint when most of the SchemaContext is actually in use.
+ * {@link #getShared(EffectiveModelContext)}, this implementation is expected to offer higher performance and have
+ * lower peak memory footprint when most of the SchemaContext is actually in use.
*
* <p>
* For call sites which do not want to pay the CPU cost of pre-computing this implementation, but still would like
* to use it if is available (by being populated by some other caller), you can use
- * {@link #getPrecomputedIfAvailable(SchemaContext)}.
+ * {@link #getPrecomputedIfAvailable(EffectiveModelContext)}.
*
* @param context SchemaContext instance
* @return A sharable {@link JSONCodecFactory}
* @throws NullPointerException if context is null
*/
- public @NonNull JSONCodecFactory getPrecomputed(final @NonNull SchemaContext context) {
+ public @NonNull JSONCodecFactory getPrecomputed(final @NonNull EffectiveModelContext context) {
return verifyNotNull(precomputed.getUnchecked(context));
}
/**
* Get a thread-safe, eagerly-caching {@link JSONCodecFactory} for a SchemaContext, if it is available. This
- * method is a non-blocking equivalent of {@link #getPrecomputed(SchemaContext)} for use in code paths where
+ * method is a non-blocking equivalent of {@link #getPrecomputed(EffectiveModelContext)} for use in code paths where
* the potential of having to pre-compute the implementation is not acceptable. One such scenario is when the
* code base wants to opportunistically take advantage of pre-computed version, but is okay with a fallback to
* a different implementation.
* @return A sharable {@link JSONCodecFactory}, or absent if such an implementation is not available.
* @throws NullPointerException if context is null
*/
- public @NonNull Optional<JSONCodecFactory> getPrecomputedIfAvailable(final @NonNull SchemaContext context) {
+ public @NonNull Optional<JSONCodecFactory> getPrecomputedIfAvailable(final @NonNull EffectiveModelContext context) {
return Optional.ofNullable(precomputed.getIfPresent(context));
}
/**
* Get a thread-safe, lazily-caching {@link JSONCodecFactory} for a SchemaContext. This method can, and will,
- * return the same instance as long as the associated SchemaContext is present or the factory is not invalidated
- * by memory pressure. Returned object can be safely used by multiple threads concurrently.
+ * return the same instance as long as the associated EffectiveModelContext is present or the factory is not
+ * invalidated by memory pressure. Returned object can be safely used by multiple threads concurrently.
*
* <p>
* Choosing this implementation is a safe default, as it will not incur prohibitive blocking, nor will it tie up
* @return A sharable {@link JSONCodecFactory}
* @throws NullPointerException if context is null
*/
- public @NonNull JSONCodecFactory getShared(final @NonNull SchemaContext context) {
+ public @NonNull JSONCodecFactory getShared(final @NonNull EffectiveModelContext context) {
return verifyNotNull(shared.getUnchecked(context));
}
* @return A non-sharable {@link JSONCodecFactory}
* @throws NullPointerException if context is null
*/
- public @NonNull JSONCodecFactory createLazy(final @NonNull SchemaContext context) {
+ public @NonNull JSONCodecFactory createLazy(final @NonNull EffectiveModelContext context) {
return createFactory(context, new LazyCodecCache<>());
}
/**
- * Create a simplistic, thread-safe {@link JSONCodecFactory} for a {@link SchemaContext}. This method will return
- * distinct objects every time it is invoked. Returned object may be use from multiple threads concurrently.
+ * Create a simplistic, thread-safe {@link JSONCodecFactory} for a {@link EffectiveModelContext}. This method will
+ * return distinct objects every time it is invoked. Returned object may be use from multiple threads concurrently.
*
* <p>
* This implementation exists mostly for completeness only, as it does not perform any caching at all and each codec
* @return A non-sharable {@link JSONCodecFactory}
* @throws NullPointerException if context is null.
*/
- public @NonNull JSONCodecFactory createSimple(final @NonNull SchemaContext context) {
+ public @NonNull JSONCodecFactory createSimple(final @NonNull EffectiveModelContext context) {
return createFactory(context, NoopCodecCache.getInstance());
}
- abstract @NonNull JSONCodecFactory createFactory(SchemaContext context, CodecCache<JSONCodec<?>> cache);
+ abstract @NonNull JSONCodecFactory createFactory(EffectiveModelContext context, CodecCache<JSONCodec<?>> cache);
}