Bug Summary

File:home/sharpd/frr3/lib/northbound_oper.c
Warning:line 1617, column 21
Null pointer passed as 1st argument to string copy function

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name northbound_oper.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/home/sharpd/frr3 -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D HAVE_CONFIG_H -D SYSCONFDIR="/etc/frr/" -D CONFDATE=20240105 -I . -I ./lib/assert -I . -I ./include -I ./lib -I . -I /usr/include/lua5.3 -I /usr/include/x86_64-linux-gnu -D PIC -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/12/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O0 -Wwrite-strings -Wno-unused-result -Wno-unused-parameter -Wno-missing-field-initializers -Wno-microsoft-anon-tag -fconst-strings -fdebug-compilation-dir=/home/sharpd/frr3 -ferror-limit 19 -fms-extensions -fgnuc-version=4.2.1 -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2024-01-05-120749-780821-1 -x c lib/northbound_oper.c
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * October 14 2023, Christian Hopps <chopps@labn.net>
4 *
5 * Copyright (C) 2018 NetDEF, Inc.
6 * Renato Westphal
7 * Copyright (c) 2023, LabN Consulting, L.L.C.
8 *
9 */
10
11#include <zebra.h>
12#include "darr.h"
13#include "debug.h"
14#include "frrevent.h"
15#include "frrstr.h"
16#include "lib_errors.h"
17#include "monotime.h"
18#include "northbound.h"
19
20/*
21 * YANG model yielding design restrictions:
22 *
23 * In order to be able to yield and guarantee we have a valid data tree at the
24 * point of yielding we must know that each parent has all it's siblings
25 * collected to represent a complete element.
26 *
27 * Basically, there should be a only single branch in the schema tree that
28 * supports yielding. In practice this means:
29 *
30 * list node schema with lookup next:
31 * - must not have any lookup-next list node sibling schema
32 * - must not have any list or container node siblings with lookup-next descendants.
33 * - any parent list nodes must also be lookup-next list nodes
34 *
35 * We must also process containers with lookup-next descendants last.
36 */
37
38DEFINE_MTYPE_STATIC(LIB, NB_YIELD_STATE, "NB Yield State")static struct memtype MTYPE_NB_YIELD_STATE[1] __attribute__((
section(".data.mtypes"))) = { { .name = "NB Yield State", .next
= ((void*)0), .n_alloc = 0, .size = 0, .ref = ((void*)0), } }
; static void _mtinit_NB_YIELD_STATE(void) __attribute__((constructor
(1001))); static void _mtinit_NB_YIELD_STATE(void) { if (_mg_LIB
.insert == ((void*)0)) _mg_LIB.insert = &_mg_LIB.types; MTYPE_NB_YIELD_STATE
->ref = _mg_LIB.insert; *_mg_LIB.insert = MTYPE_NB_YIELD_STATE
; _mg_LIB.insert = &MTYPE_NB_YIELD_STATE->next; } static
void _mtfini_NB_YIELD_STATE(void) __attribute__((destructor)
); static void _mtfini_NB_YIELD_STATE(void) { if (MTYPE_NB_YIELD_STATE
->next) MTYPE_NB_YIELD_STATE->next->ref = MTYPE_NB_YIELD_STATE
->ref; *MTYPE_NB_YIELD_STATE->ref = MTYPE_NB_YIELD_STATE
->next; } _Static_assert(1, "please add a semicolon after this macro"
)
;
39DEFINE_MTYPE_STATIC(LIB, NB_NODE_INFOS, "NB Node Infos")static struct memtype MTYPE_NB_NODE_INFOS[1] __attribute__((section
(".data.mtypes"))) = { { .name = "NB Node Infos", .next = ((void
*)0), .n_alloc = 0, .size = 0, .ref = ((void*)0), } }; static
void _mtinit_NB_NODE_INFOS(void) __attribute__((constructor(
1001))); static void _mtinit_NB_NODE_INFOS(void) { if (_mg_LIB
.insert == ((void*)0)) _mg_LIB.insert = &_mg_LIB.types; MTYPE_NB_NODE_INFOS
->ref = _mg_LIB.insert; *_mg_LIB.insert = MTYPE_NB_NODE_INFOS
; _mg_LIB.insert = &MTYPE_NB_NODE_INFOS->next; } static
void _mtfini_NB_NODE_INFOS(void) __attribute__((destructor))
; static void _mtfini_NB_NODE_INFOS(void) { if (MTYPE_NB_NODE_INFOS
->next) MTYPE_NB_NODE_INFOS->next->ref = MTYPE_NB_NODE_INFOS
->ref; *MTYPE_NB_NODE_INFOS->ref = MTYPE_NB_NODE_INFOS->
next; } _Static_assert(1, "please add a semicolon after this macro"
)
;
40
41/* Amount of time allowed to spend constructing oper-state prior to yielding */
42#define NB_OP_WALK_INTERVAL_MS50 50
43#define NB_OP_WALK_INTERVAL_US(50 * 1000) (NB_OP_WALK_INTERVAL_MS50 * 1000)
44
45/* ---------- */
46/* Data Types */
47/* ---------- */
48PREDECL_LIST(nb_op_walks)struct nb_op_walks_head { struct slist_head sh; }; struct nb_op_walks_item
{ struct slist_item si; }; _Static_assert(1, "please add a semicolon after this macro"
)
;
49
50/*
51 * This is our information about a node on the branch we are looking at
52 */
53struct nb_op_node_info {
54 struct lyd_node_inner *inner;
55 const struct lysc_node *schema; /* inner schema in case we rm inner */
56 struct yang_list_keys keys; /* if list, keys to locate element */
57 const void *list_entry; /* opaque entry from user or NULL */
58 uint xpath_len; /* length of the xpath string for this node */
59 uint niters; /* # list elems create this iteration */
60 uint nents; /* # list elems create so far */
61 bool_Bool query_specific_entry : 1; /* this info is specific specified */
62 bool_Bool has_lookup_next : 1; /* if this node support lookup next */
63 bool_Bool lookup_next_ok : 1; /* if this and all previous support */
64};
65
66/**
67 * struct nb_op_yield_state - tracking required state for yielding.
68 *
69 * @xpath: current xpath representing the node_info stack.
70 * @xpath_orig: the original query string from the user
71 * @node_infos: the container stack for the walk from root to current
72 * @schema_path: the schema nodes for each node in the query string.
73 # @query_tokstr: the query string tokenized with NUL bytes.
74 * @query_tokens: the string pointers to each query token (node).
75 * @walk_root_level: The topmost specific node, +1 is where we start walking.
76 * @walk_start_level: @walk_root_level + 1.
77 * @query_base_level: the level the query string stops at and full walks
78 * commence below that.
79 */
80struct nb_op_yield_state {
81 /* Walking state */
82 char *xpath;
83 char *xpath_orig;
84 struct nb_op_node_info *node_infos;
85 const struct lysc_node **schema_path;
86 char *query_tokstr;
87 char **query_tokens;
88 int walk_root_level;
89 int walk_start_level;
90 int query_base_level;
91 bool_Bool query_list_entry; /* XXX query was for a specific list entry */
92
93 /* Yielding state */
94 bool_Bool query_did_entry; /* currently processing the entry */
95 bool_Bool should_batch;
96 struct timeval start_time;
97 struct yang_translator *translator;
98 uint32_t flags;
99 nb_oper_data_cb cb;
100 void *cb_arg;
101 nb_oper_data_finish_cb finish;
102 void *finish_arg;
103 struct event *walk_ev;
104 struct nb_op_walks_item link;
105};
106
107DECLARE_LIST(nb_op_walks, struct nb_op_yield_state, link)static inline __attribute__((unused)) void nb_op_walks_init(struct
nb_op_walks_head *h) { memset(h, 0, sizeof(*h)); h->sh.first
= &typesafe_slist_sentinel; h->sh.last_next = &h->
sh.first; } static inline __attribute__((unused)) void nb_op_walks_fini
(struct nb_op_walks_head *h) { memset(h, 0, sizeof(*h)); } static
inline __attribute__((unused)) void nb_op_walks_add_head(struct
nb_op_walks_head *h, struct nb_op_yield_state *item) { typesafe_list_add
(&h->sh, &h->sh.first, &item->link.si); }
static inline __attribute__((unused)) void nb_op_walks_add_tail
(struct nb_op_walks_head *h, struct nb_op_yield_state *item) {
typesafe_list_add(&h->sh, h->sh.last_next, &item
->link.si); } static inline __attribute__((unused)) void nb_op_walks_add_after
(struct nb_op_walks_head *h, struct nb_op_yield_state *after,
struct nb_op_yield_state *item) { struct slist_item **nextp;
nextp = after ? &after->link.si.next : &h->sh.
first; typesafe_list_add(&h->sh, nextp, &item->
link.si); } static inline __attribute__((unused)) struct nb_op_yield_state
*nb_op_walks_del(struct nb_op_walks_head *h, struct nb_op_yield_state
*item) { struct slist_item **iter = &h->sh.first; while
(*iter != &typesafe_slist_sentinel && *iter != &
item->link.si) iter = &(*iter)->next; if (*iter == &
typesafe_slist_sentinel) return ((void*)0); h->sh.count--;
*iter = item->link.si.next; if (item->link.si.next == &
typesafe_slist_sentinel) h->sh.last_next = iter; item->
link.si.next = ((void*)0); return item; } static inline __attribute__
((unused)) struct nb_op_yield_state *nb_op_walks_pop(struct nb_op_walks_head
*h) { struct slist_item *sitem = h->sh.first; if (sitem ==
&typesafe_slist_sentinel) return ((void*)0); h->sh.count
--; h->sh.first = sitem->next; if (h->sh.first == &
typesafe_slist_sentinel) h->sh.last_next = &h->sh.first
; sitem->next = ((void*)0); return (__builtin_choose_expr(
__builtin_types_compatible_p(typeof(&((struct nb_op_yield_state
*)0)->link.si), typeof(sitem)) || __builtin_types_compatible_p
(void *, typeof(sitem)), ({ typeof(((struct nb_op_yield_state
*)0)->link.si) *__mptr = (void *)(sitem); (struct nb_op_yield_state
*)((char *)__mptr - __builtin_offsetof(struct nb_op_yield_state
, link.si)); }), ({ typeof(((const struct nb_op_yield_state *
)0)->link.si) *__mptr = (sitem); (const struct nb_op_yield_state
*)((const char *)__mptr - __builtin_offsetof(struct nb_op_yield_state
, link.si)); }) )); } static inline __attribute__((unused)) void
nb_op_walks_swap_all(struct nb_op_walks_head *a, struct nb_op_walks_head
*b) { struct nb_op_walks_head tmp = *a; *a = *b; *b = tmp; if
(a->sh.last_next == &b->sh.first) a->sh.last_next
= &a->sh.first; if (b->sh.last_next == &a->
sh.first) b->sh.last_next = &b->sh.first; } static inline
__attribute__((unused, pure)) const struct nb_op_yield_state
*nb_op_walks_const_first(const struct nb_op_walks_head *h) {
if (h->sh.first != &typesafe_slist_sentinel) return (
__builtin_choose_expr( __builtin_types_compatible_p(typeof(&
((struct nb_op_yield_state *)0)->link.si), typeof(h->sh
.first)) || __builtin_types_compatible_p(void *, typeof(h->
sh.first)), ({ typeof(((struct nb_op_yield_state *)0)->link
.si) *__mptr = (void *)(h->sh.first); (struct nb_op_yield_state
*)((char *)__mptr - __builtin_offsetof(struct nb_op_yield_state
, link.si)); }), ({ typeof(((const struct nb_op_yield_state *
)0)->link.si) *__mptr = (h->sh.first); (const struct nb_op_yield_state
*)((const char *)__mptr - __builtin_offsetof(struct nb_op_yield_state
, link.si)); }) )); return ((void*)0); } static inline __attribute__
((unused, pure)) const struct nb_op_yield_state *nb_op_walks_const_next
(const struct nb_op_walks_head *h, const struct nb_op_yield_state
*item) { const struct slist_item *sitem = &item->link
.si; if (sitem->next != &typesafe_slist_sentinel) return
(__builtin_choose_expr( __builtin_types_compatible_p(typeof(
&((struct nb_op_yield_state *)0)->link.si), typeof(sitem
->next)) || __builtin_types_compatible_p(void *, typeof(sitem
->next)), ({ typeof(((struct nb_op_yield_state *)0)->link
.si) *__mptr = (void *)(sitem->next); (struct nb_op_yield_state
*)((char *)__mptr - __builtin_offsetof(struct nb_op_yield_state
, link.si)); }), ({ typeof(((const struct nb_op_yield_state *
)0)->link.si) *__mptr = (sitem->next); (const struct nb_op_yield_state
*)((const char *)__mptr - __builtin_offsetof(struct nb_op_yield_state
, link.si)); }) )); return ((void*)0); } static inline __attribute__
((unused, pure)) struct nb_op_yield_state *nb_op_walks_first(
struct nb_op_walks_head *h) { return (struct nb_op_yield_state
*)nb_op_walks_const_first(h); } static inline __attribute__(
(unused, pure)) struct nb_op_yield_state *nb_op_walks_next(struct
nb_op_walks_head *h, struct nb_op_yield_state *item) { return
(struct nb_op_yield_state *)nb_op_walks_const_next(h, item);
} static inline __attribute__((unused, pure)) struct nb_op_yield_state
*nb_op_walks_next_safe(struct nb_op_walks_head *h, struct nb_op_yield_state
*item) { struct slist_item *sitem; if (!item) return ((void*
)0); sitem = &item->link.si; if (sitem->next != &
typesafe_slist_sentinel) return (__builtin_choose_expr( __builtin_types_compatible_p
(typeof(&((struct nb_op_yield_state *)0)->link.si), typeof
(sitem->next)) || __builtin_types_compatible_p(void *, typeof
(sitem->next)), ({ typeof(((struct nb_op_yield_state *)0)->
link.si) *__mptr = (void *)(sitem->next); (struct nb_op_yield_state
*)((char *)__mptr - __builtin_offsetof(struct nb_op_yield_state
, link.si)); }), ({ typeof(((const struct nb_op_yield_state *
)0)->link.si) *__mptr = (sitem->next); (const struct nb_op_yield_state
*)((const char *)__mptr - __builtin_offsetof(struct nb_op_yield_state
, link.si)); }) )); return ((void*)0); } static inline __attribute__
((unused, pure)) size_t nb_op_walks_count(const struct nb_op_walks_head
*h) { return h->sh.count; } static inline __attribute__((
unused, pure)) _Bool nb_op_walks_anywhere(const struct nb_op_yield_state
*item) { return item->link.si.next != ((void*)0); } static
inline __attribute__((unused, pure)) _Bool nb_op_walks_member
(const struct nb_op_walks_head *h, const struct nb_op_yield_state
*item) { return typesafe_list_member(&h->sh, &item
->link.si); } _Static_assert(1, "please add a semicolon after this macro"
)
;
108
109/* ---------------- */
110/* Global Variables */
111/* ---------------- */
112
113static struct event_loop *event_loop;
114static struct nb_op_walks_head nb_op_walks;
115
116/* --------------------- */
117/* Function Declarations */
118/* --------------------- */
119
120static enum nb_error nb_op_yield(struct nb_op_yield_state *ys);
121static struct lyd_node *ys_root_node(struct nb_op_yield_state *ys);
122
123/* -------------------- */
124/* Function Definitions */
125/* -------------------- */
126
127static inline struct nb_op_yield_state *
128nb_op_create_yield_state(const char *xpath, struct yang_translator *translator,
129 uint32_t flags, bool_Bool should_batch, nb_oper_data_cb cb,
130 void *cb_arg, nb_oper_data_finish_cb finish,
131 void *finish_arg)
132{
133 struct nb_op_yield_state *ys;
134
135 ys = XCALLOC(MTYPE_NB_YIELD_STATE, sizeof(*ys))qcalloc(MTYPE_NB_YIELD_STATE, sizeof(*ys));
136 ys->xpath = darr_strdup_cap(xpath, (size_t)XPATH_MAXLEN)({ size_t __size = strlen(xpath) + 1; char *__s = ((void*)0);
({ if ((ssize_t)(((__s) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(__s)) - 1)->cap) < (ssize_t)(((ssize_t)((size_t)1024
) > (ssize_t)__size) ? (size_t)((size_t)1024) : __size)) (
{ ((__s)) = __darr_resize((__s), (((ssize_t)((size_t)1024) >
(ssize_t)__size) ? (size_t)((size_t)1024) : __size), sizeof(
((__s))[0]), MTYPE_DARR_STR); }); (__s); }); strlcpy(__s, (xpath
), (((__s) == ((void*)0)) ? 0 : (((struct darr_metadata *)(__s
)) - 1)->cap)); do { ({ static const struct xref_assert _xref
__attribute__( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT
), 136, "lib/northbound_oper.c", __func__, }, .expr = "(__s) || !((size_t)__size)"
, }; static const struct xref * const xref_p_445 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((__s) || !((size_t)__size)) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while ((__s) || !((size_t)__size)
); }); if ((__s)) { ({ static const struct xref_assert _xref __attribute__
( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 136, "lib/northbound_oper.c"
, __func__, }, .expr = "(long long)darr_cap(__s) >= (long long)((size_t)__size)"
, }; static const struct xref * const xref_p_446 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)(((__s) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(__s)) - 1)->cap) >= (long long)((size_t)__size)) ? 0
: 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0)); }
while ((long long)(((__s) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(__s)) - 1)->cap) >= (long long)((size_t)__size)); }
); (((struct darr_metadata *)(__s)) - 1)->len = ((size_t)__size
); } } while (0); __s; })
;
137 ys->xpath_orig = darr_strdup(xpath)({ size_t __size = strlen(xpath) + 1; char *__s = ((void*)0);
({ if ((ssize_t)(((__s) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(__s)) - 1)->cap) < (ssize_t)(((ssize_t)(0) > (ssize_t
)__size) ? (size_t)(0) : __size)) ({ ((__s)) = __darr_resize(
(__s), (((ssize_t)(0) > (ssize_t)__size) ? (size_t)(0) : __size
), sizeof(((__s))[0]), MTYPE_DARR_STR); }); (__s); }); strlcpy
(__s, (xpath), (((__s) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(__s)) - 1)->cap)); do { ({ static const struct xref_assert
_xref __attribute__( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT
), 137, "lib/northbound_oper.c", __func__, }, .expr = "(__s) || !((size_t)__size)"
, }; static const struct xref * const xref_p_447 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((__s) || !((size_t)__size)) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while ((__s) || !((size_t)__size)
); }); if ((__s)) { ({ static const struct xref_assert _xref __attribute__
( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 137, "lib/northbound_oper.c"
, __func__, }, .expr = "(long long)darr_cap(__s) >= (long long)((size_t)__size)"
, }; static const struct xref * const xref_p_448 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)(((__s) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(__s)) - 1)->cap) >= (long long)((size_t)__size)) ? 0
: 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0)); }
while ((long long)(((__s) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(__s)) - 1)->cap) >= (long long)((size_t)__size)); }
); (((struct darr_metadata *)(__s)) - 1)->len = ((size_t)__size
); } } while (0); __s; })
;
138 ys->translator = translator;
139 ys->flags = flags;
140 ys->should_batch = should_batch;
141 ys->cb = cb;
142 ys->cb_arg = cb_arg;
143 ys->finish = finish;
144 ys->finish_arg = finish_arg;
145
146 nb_op_walks_add_tail(&nb_op_walks, ys);
147
148 return ys;
149}
150
151static inline void nb_op_free_yield_state(struct nb_op_yield_state *ys,
152 bool_Bool nofree_tree)
153{
154 if (ys) {
155 EVENT_OFF(ys->walk_ev)do { if ((ys->walk_ev)) event_cancel(&(ys->walk_ev)
); } while (0)
;
156 nb_op_walks_del(&nb_op_walks, ys);
157 /* if we have a branch then free up it's libyang tree */
158 if (!nofree_tree && ys_root_node(ys))
159 lyd_free_all(ys_root_node(ys));
160 darr_free(ys->query_tokens)do { if ((ys->query_tokens)) { struct darr_metadata *__meta
= (((struct darr_metadata *)(ys->query_tokens)) - 1); do {
qfree(__meta->mtype, __meta); __meta = ((void*)0); } while
(0); (ys->query_tokens) = ((void*)0); } } while (0)
;
161 darr_free(ys->query_tokstr)do { if ((ys->query_tokstr)) { struct darr_metadata *__meta
= (((struct darr_metadata *)(ys->query_tokstr)) - 1); do {
qfree(__meta->mtype, __meta); __meta = ((void*)0); } while
(0); (ys->query_tokstr) = ((void*)0); } } while (0)
;
162 darr_free(ys->schema_path)do { if ((ys->schema_path)) { struct darr_metadata *__meta
= (((struct darr_metadata *)(ys->schema_path)) - 1); do {
qfree(__meta->mtype, __meta); __meta = ((void*)0); } while
(0); (ys->schema_path) = ((void*)0); } } while (0)
;
163 darr_free(ys->node_infos)do { if ((ys->node_infos)) { struct darr_metadata *__meta =
(((struct darr_metadata *)(ys->node_infos)) - 1); do { qfree
(__meta->mtype, __meta); __meta = ((void*)0); } while (0);
(ys->node_infos) = ((void*)0); } } while (0)
;
164 darr_free(ys->xpath_orig)do { if ((ys->xpath_orig)) { struct darr_metadata *__meta =
(((struct darr_metadata *)(ys->xpath_orig)) - 1); do { qfree
(__meta->mtype, __meta); __meta = ((void*)0); } while (0);
(ys->xpath_orig) = ((void*)0); } } while (0)
;
165 darr_free(ys->xpath)do { if ((ys->xpath)) { struct darr_metadata *__meta = (((
struct darr_metadata *)(ys->xpath)) - 1); do { qfree(__meta
->mtype, __meta); __meta = ((void*)0); } while (0); (ys->
xpath) = ((void*)0); } } while (0)
;
166 XFREE(MTYPE_NB_YIELD_STATE, ys)do { qfree(MTYPE_NB_YIELD_STATE, ys); ys = ((void*)0); } while
(0)
;
167 }
168}
169
170static const struct lysc_node *ys_get_walk_stem_tip(struct nb_op_yield_state *ys)
171{
172 if (ys->walk_start_level <= 0)
173 return NULL((void*)0);
174 return ys->node_infos[ys->walk_start_level - 1].schema;
175}
176
177static struct lyd_node *ys_root_node(struct nb_op_yield_state *ys)
178{
179 if (!darr_len(ys->node_infos)(((ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len)
)
180 return NULL((void*)0);
181 return &ys->node_infos[0].inner->node;
182}
183
184static void ys_trim_xpath(struct nb_op_yield_state *ys)
185{
186 uint len = darr_len(ys->node_infos)(((ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len)
;
187
188 if (len == 0)
189 darr_setlen(ys->xpath, 1)do { ({ static const struct xref_assert _xref __attribute__( (
used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 189, "lib/northbound_oper.c"
, __func__, }, .expr = "(ys->xpath) || !(1)", }; static const
struct xref * const xref_p_449 __attribute__((used, section(
"xref_array"))) = &(_xref.xref); if (__builtin_expect(((ys
->xpath) || !(1)) ? 0 : 1, 0)) do { _zlog_assert_failed(&
_xref, ((void*)0)); } while ((ys->xpath) || !(1)); }); if (
(ys->xpath)) { ({ static const struct xref_assert _xref __attribute__
( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 189, "lib/northbound_oper.c"
, __func__, }, .expr = "(long long)darr_cap(ys->xpath) >= (long long)(1)"
, }; static const struct xref * const xref_p_450 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)(((ys->xpath) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->xpath)) - 1)->cap) >= (long long)(1)) ? 0 : 1
, 0)) do { _zlog_assert_failed(&_xref, ((void*)0)); } while
((long long)(((ys->xpath) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->xpath)) - 1)->cap) >= (long long)(1)); }); (
((struct darr_metadata *)(ys->xpath)) - 1)->len = (1); }
} while (0)
;
190 else
191 darr_setlen(ys->xpath, darr_last(ys->node_infos)->xpath_len + 1)do { ({ static const struct xref_assert _xref __attribute__( (
used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 191, "lib/northbound_oper.c"
, __func__, }, .expr = "(ys->xpath) || !(({ uint __len = (((ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata *)(ys->node_infos)) - 1)->len); ((__len > 0) ? &(ys->node_infos)[__len - 1] : ((void*)0)); })->xpath_len + 1)"
, }; static const struct xref * const xref_p_451 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((ys->xpath) || !(({ uint __len = (((ys->node_infos) ==
((void*)0)) ? 0 : (((struct darr_metadata *)(ys->node_infos
)) - 1)->len); ((__len > 0) ? &(ys->node_infos)[
__len - 1] : ((void*)0)); })->xpath_len + 1)) ? 0 : 1, 0))
do { _zlog_assert_failed(&_xref, ((void*)0)); } while ((
ys->xpath) || !(({ uint __len = (((ys->node_infos) == (
(void*)0)) ? 0 : (((struct darr_metadata *)(ys->node_infos
)) - 1)->len); ((__len > 0) ? &(ys->node_infos)[
__len - 1] : ((void*)0)); })->xpath_len + 1)); }); if ((ys
->xpath)) { ({ static const struct xref_assert _xref __attribute__
( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 191, "lib/northbound_oper.c"
, __func__, }, .expr = "(long long)darr_cap(ys->xpath) >= (long long)(({ uint __len = (((ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata *)(ys->node_infos)) - 1)->len); ((__len > 0) ? &(ys->node_infos)[__len - 1] : ((void*)0)); })->xpath_len + 1)"
, }; static const struct xref * const xref_p_452 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)(((ys->xpath) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->xpath)) - 1)->cap) >= (long long)(({ uint __len
= (((ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len); ((__len > 0) ? &
(ys->node_infos)[__len - 1] : ((void*)0)); })->xpath_len
+ 1)) ? 0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void
*)0)); } while ((long long)(((ys->xpath) == ((void*)0)) ? 0
: (((struct darr_metadata *)(ys->xpath)) - 1)->cap) >=
(long long)(({ uint __len = (((ys->node_infos) == ((void*
)0)) ? 0 : (((struct darr_metadata *)(ys->node_infos)) - 1
)->len); ((__len > 0) ? &(ys->node_infos)[__len -
1] : ((void*)0)); })->xpath_len + 1)); }); (((struct darr_metadata
*)(ys->xpath)) - 1)->len = (({ uint __len = (((ys->
node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata *)(ys
->node_infos)) - 1)->len); ((__len > 0) ? &(ys->
node_infos)[__len - 1] : ((void*)0)); })->xpath_len + 1); }
} while (0)
;
192 ys->xpath[darr_len(ys->xpath)(((ys->xpath) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->xpath)) - 1)->len)
- 1] = 0;
193}
194
195static void ys_pop_inner(struct nb_op_yield_state *ys)
196{
197 uint len = darr_len(ys->node_infos)(((ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len)
;
198
199 assert(len)({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 199, "lib/northbound_oper.c"
, __func__, }, .expr = "len", }; static const struct xref * const
xref_p_453 __attribute__((used, section("xref_array"))) = &
(_xref.xref); if (__builtin_expect((len) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while (len); })
;
200 darr_setlen(ys->node_infos, len - 1)do { ({ static const struct xref_assert _xref __attribute__( (
used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 200, "lib/northbound_oper.c"
, __func__, }, .expr = "(ys->node_infos) || !(len - 1)", }
; static const struct xref * const xref_p_454 __attribute__((
used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((ys->node_infos) || !(len - 1)) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while ((ys->node_infos) || !(len
- 1)); }); if ((ys->node_infos)) { ({ static const struct
xref_assert _xref __attribute__( (used)) = { .xref = { (((void
*)0)), (XREFT_ASSERT), 200, "lib/northbound_oper.c", __func__
, }, .expr = "(long long)darr_cap(ys->node_infos) >= (long long)(len - 1)"
, }; static const struct xref * const xref_p_455 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)(((ys->node_infos) == ((void*)0)) ? 0 : (((struct
darr_metadata *)(ys->node_infos)) - 1)->cap) >= (long
long)(len - 1)) ? 0 : 1, 0)) do { _zlog_assert_failed(&_xref
, ((void*)0)); } while ((long long)(((ys->node_infos) == (
(void*)0)) ? 0 : (((struct darr_metadata *)(ys->node_infos
)) - 1)->cap) >= (long long)(len - 1)); }); (((struct darr_metadata
*)(ys->node_infos)) - 1)->len = (len - 1); } } while (
0)
;
201 ys_trim_xpath(ys);
202}
203
204static void nb_op_get_keys(struct lyd_node_inner *list_node,
205 struct yang_list_keys *keys)
206{
207 struct lyd_node *child;
208 uint n = 0;
209
210 keys->num = 0;
211 LY_LIST_FOR (list_node->child, child)for ((child) = (list_node->child); (child); (child) = (child
)->next)
{
212 if (!lysc_is_key(child->schema)((!child->schema || (child->schema->nodetype != 0x0004
) || !(child->schema->flags & 0x0100)) ? 0 : 1)
)
213 break;
214 strlcpy(keys->key[n], yang_dnode_get_string(child, NULL((void*)0)),
215 sizeof(keys->key[n]));
216 n++;
217 }
218
219 keys->num = n;
220}
221
222/**
223 * __move_back_to_next() - move back to the next lookup-next schema
224 */
225static bool_Bool __move_back_to_next(struct nb_op_yield_state *ys, int i)
226{
227 struct nb_op_node_info *ni;
228 int j;
229
230 /*
231 * We will free the subtree we are trimming back to, or we will be done
232 * with the walk and will free the root on cleanup.
233 */
234
235 /* pop any node_info we dropped below on entry */
236 for (j = darr_ilen(ys->node_infos)(((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->node_infos)) - 1)->len)
- 1; j > i; j--)
237 ys_pop_inner(ys);
238
239 for (; i >= ys->walk_root_level; i--) {
240 if (ys->node_infos[i].has_lookup_next)
241 break;
242 ys_pop_inner(ys);
243 }
244
245 if (i < ys->walk_root_level)
246 return false0;
247
248 ni = &ys->node_infos[i];
249
250 /*
251 * The i'th node has been lost after a yield so trim it from the tree
252 * now.
253 */
254 lyd_free_tree(&ni->inner->node);
255 ni->inner = NULL((void*)0);
256 ni->list_entry = NULL((void*)0);
257
258 /*
259 * Leave the empty-of-data node_info on top, __walk will deal with
260 * this, by doing a lookup-next with the keys which we still have.
261 */
262
263 return true1;
264}
265
266static void nb_op_resume_data_tree(struct nb_op_yield_state *ys)
267{
268 struct nb_op_node_info *ni;
269 struct nb_node *nn;
270 const void *parent_entry;
271 const void *list_entry;
272 uint i;
273
274 /*
275 * IMPORTANT: On yielding: we always yield during list iteration and
276 * after the initial list element has been created and handled, so the
277 * top of the yield stack will always point at a list node.
278 *
279 * Additionally, that list node has been processed and was in the
280 * process of being "get_next"d when we yielded. We process the
281 * lookup-next list node last so all the rest of the data (to the left)
282 * has been gotten. NOTE: To keep this simple we will require only a
283 * single lookup-next sibling in any parents list of children.
284 *
285 * Walk the rightmost branch (the node info stack) from base to tip
286 * verifying all list nodes are still present. If not we backup to the
287 * node which has a lookup next, and we prune the branch to this node.
288 * If the list node that went away is the topmost we will be using
289 * lookup_next, but if it's a parent then the list_entry will have been
290 * restored.
291 */
292 darr_foreach_i (ys->node_infos, i)for ((i) = 0; (i) < (((ys->node_infos) == ((void*)0)) ?
0 : (((struct darr_metadata *)(ys->node_infos)) - 1)->
len); (i)++)
{
293 ni = &ys->node_infos[i];
294 nn = ni->schema->priv;
295
296 if (CHECK_FLAG(ni->schema->nodetype, LYS_CONTAINER)((ni->schema->nodetype) & (0x0001)))
297 continue;
298
299 assert(ni->list_entry != NULL ||({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 300, "lib/northbound_oper.c"
, __func__, }, .expr = "ni->list_entry != NULL || ni == darr_last(ys->node_infos)"
, }; static const struct xref * const xref_p_456 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((ni->list_entry != ((void*)0) || ni == ({ uint __len = ((
(ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len); ((__len > 0) ? &
(ys->node_infos)[__len - 1] : ((void*)0)); })) ? 0 : 1, 0)
) do { _zlog_assert_failed(&_xref, ((void*)0)); } while (
ni->list_entry != ((void*)0) || ni == ({ uint __len = (((ys
->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len); ((__len > 0) ? &
(ys->node_infos)[__len - 1] : ((void*)0)); })); })
300 ni == darr_last(ys->node_infos))({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 300, "lib/northbound_oper.c"
, __func__, }, .expr = "ni->list_entry != NULL || ni == darr_last(ys->node_infos)"
, }; static const struct xref * const xref_p_456 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((ni->list_entry != ((void*)0) || ni == ({ uint __len = ((
(ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len); ((__len > 0) ? &
(ys->node_infos)[__len - 1] : ((void*)0)); })) ? 0 : 1, 0)
) do { _zlog_assert_failed(&_xref, ((void*)0)); } while (
ni->list_entry != ((void*)0) || ni == ({ uint __len = (((ys
->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len); ((__len > 0) ? &
(ys->node_infos)[__len - 1] : ((void*)0)); })); })
;
301
302 /* Verify the entry is still present */
303 parent_entry = (i == 0 ? NULL((void*)0) : ni[-1].list_entry);
304 list_entry = nb_callback_lookup_entry(nn, parent_entry,
305 &ni->keys);
306 if (!list_entry || list_entry != ni->list_entry) {
307 /* May be NULL or a different pointer
308 * move back to first of
309 * container with last lookup_next list node
310 * (which may be this one) and get next.
311 */
312 if (!__move_back_to_next(ys, i))
313 DEBUGD(&nb_dbg_events,do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("%s: Nothing to resume after delete during walk (yield)"
), .hashu32 = {(7), (0)}, }, }; static const struct xref_logmsg
_xref __attribute__( (used)) = { .xref = { (&_xrefdata.xrefdata
), (XREFT_LOGMSG), 315, "lib/northbound_oper.c", __func__, },
.fmtstring = ("%s: Nothing to resume after delete during walk (yield)"
), .priority = (7), .ec = (0), .args = ("__func__"), }; static
const struct xref * const xref_p_457 __attribute__((used, section
("xref_array"))) = &(_xref.xref); zlog_ref(&_xref, ("%s: Nothing to resume after delete during walk (yield)"
), __func__); } while (0); } while (0)
314 "%s: Nothing to resume after delete during walk (yield)",do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("%s: Nothing to resume after delete during walk (yield)"
), .hashu32 = {(7), (0)}, }, }; static const struct xref_logmsg
_xref __attribute__( (used)) = { .xref = { (&_xrefdata.xrefdata
), (XREFT_LOGMSG), 315, "lib/northbound_oper.c", __func__, },
.fmtstring = ("%s: Nothing to resume after delete during walk (yield)"
), .priority = (7), .ec = (0), .args = ("__func__"), }; static
const struct xref * const xref_p_457 __attribute__((used, section
("xref_array"))) = &(_xref.xref); zlog_ref(&_xref, ("%s: Nothing to resume after delete during walk (yield)"
), __func__); } while (0); } while (0)
315 __func__)do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("%s: Nothing to resume after delete during walk (yield)"
), .hashu32 = {(7), (0)}, }, }; static const struct xref_logmsg
_xref __attribute__( (used)) = { .xref = { (&_xrefdata.xrefdata
), (XREFT_LOGMSG), 315, "lib/northbound_oper.c", __func__, },
.fmtstring = ("%s: Nothing to resume after delete during walk (yield)"
), .priority = (7), .ec = (0), .args = ("__func__"), }; static
const struct xref * const xref_p_457 __attribute__((used, section
("xref_array"))) = &(_xref.xref); zlog_ref(&_xref, ("%s: Nothing to resume after delete during walk (yield)"
), __func__); } while (0); } while (0)
;
316 return;
317 }
318 }
319}
320
321/*
322 * Can only yield if all list nodes to root have lookup_next() callbacks
323 *
324 * In order to support lookup_next() the list_node get_next() callback
325 * needs to return ordered (i.e., sorted) results.
326 */
327
328/* ======================= */
329/* Start of walk init code */
330/* ======================= */
331
332/**
333 * __xpath_pop_node() - remove the last node from xpath string
334 * @xpath: an xpath string
335 *
336 * Return: NB_OK or NB_ERR_NOT_FOUND if nothing left to pop.
337 */
338static int __xpath_pop_node(char *xpath)
339{
340 int len = strlen(xpath);
341 bool_Bool abs = xpath[0] == '/';
342 char *slash;
343
344 /* "//" or "/" => NULL */
345 if (abs && (len == 1 || (len == 2 && xpath[1] == '/')))
346 return NB_ERR_NOT_FOUND;
347
348 slash = (char *)frrstr_back_to_char(xpath, '/');
349 /* "/foo/bar/" or "/foo/bar//" => "/foo " */
350 if (slash && slash == &xpath[len - 1]) {
351 xpath[--len] = 0;
352 slash = (char *)frrstr_back_to_char(xpath, '/');
353 if (slash && slash == &xpath[len - 1]) {
354 xpath[--len] = 0;
355 slash = (char *)frrstr_back_to_char(xpath, '/');
356 }
357 }
358 if (!slash)
359 return NB_ERR_NOT_FOUND;
360 *slash = 0;
361 return NB_OK;
362}
363
364/**
365 * nb_op_xpath_to_trunk() - generate a lyd_node tree (trunk) using an xpath.
366 * @xpath_in: xpath query string to build trunk from.
367 * @dnode: resulting tree (trunk)
368 *
369 * Use the longest prefix of @xpath_in as possible to resolve to a tree (trunk).
370 * This is logically as if we walked along the xpath string resolving each
371 * nodename reference (in particular list nodes) until we could not.
372 *
373 * Return: error if any, if no error then @dnode contains the tree (trunk).
374 */
375static enum nb_error nb_op_xpath_to_trunk(const char *xpath_in,
376 struct lyd_node **trunk)
377{
378 char *xpath = NULL((void*)0);
379 enum nb_error ret = NB_OK;
380 LY_ERR err;
381
382 darr_in_strdup(xpath, xpath_in)({ size_t __size = strlen(xpath_in) + 1; do { if ((xpath)) ((
(struct darr_metadata *)(xpath)) - 1)->len = 0; } while (0
); ({ if ((ssize_t)(((xpath) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(xpath)) - 1)->cap) < (ssize_t)(((size_t)(1) > __size
) ? (size_t)(1) : __size)) ({ ((xpath)) = __darr_resize((xpath
), (((size_t)(1) > __size) ? (size_t)(1) : __size), sizeof
(((xpath))[0]), MTYPE_DARR_STR); }); (xpath); }); strlcpy(xpath
, (xpath_in), (((xpath) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(xpath)) - 1)->cap)); do { ({ static const struct xref_assert
_xref __attribute__( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT
), 382, "lib/northbound_oper.c", __func__, }, .expr = "((xpath)) || !((size_t)__size)"
, }; static const struct xref * const xref_p_458 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((((xpath)) || !((size_t)__size)) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while (((xpath)) || !((size_t)__size
)); }); if (((xpath))) { ({ static const struct xref_assert _xref
__attribute__( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT
), 382, "lib/northbound_oper.c", __func__, }, .expr = "(long long)darr_cap((xpath)) >= (long long)((size_t)__size)"
, }; static const struct xref * const xref_p_459 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)((((xpath)) == ((void*)0)) ? 0 : (((struct darr_metadata
*)((xpath))) - 1)->cap) >= (long long)((size_t)__size)
) ? 0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0
)); } while ((long long)((((xpath)) == ((void*)0)) ? 0 : (((struct
darr_metadata *)((xpath))) - 1)->cap) >= (long long)((
size_t)__size)); }); (((struct darr_metadata *)((xpath))) - 1
)->len = ((size_t)__size); } } while (0); xpath; })
;
383 for (;;) {
384 err = lyd_new_path2(NULL((void*)0), ly_native_ctx, xpath, NULL((void*)0), 0, 0,
385 LYD_NEW_PATH_UPDATE0x01, NULL((void*)0), trunk);
386 if (err == LY_SUCCESS)
387 break;
388
389 ret = __xpath_pop_node(xpath);
390 if (ret != NB_OK)
391 break;
392 }
393 darr_free(xpath)do { if ((xpath)) { struct darr_metadata *__meta = (((struct darr_metadata
*)(xpath)) - 1); do { qfree(__meta->mtype, __meta); __meta
= ((void*)0); } while (0); (xpath) = ((void*)0); } } while (
0)
;
394 return ret;
395}
396
397/*
398 * Finish initializing the node info based on the xpath string, and previous
399 * node_infos on the stack. If this node is a list node, obtain the specific
400 * list-entry object.
401 */
402static enum nb_error nb_op_ys_finalize_node_info(struct nb_op_yield_state *ys,
403 uint index)
404{
405 struct nb_op_node_info *ni = &ys->node_infos[index];
406 struct lyd_node_inner *inner = ni->inner;
407 struct nb_node *nn = ni->schema->priv;
408 bool_Bool yield_ok = ys->finish != NULL((void*)0);
409
410 ni->has_lookup_next = nn->cbs.lookup_next != NULL((void*)0);
411
412 /* track the last list_entry until updated by new list node */
413 ni->list_entry = index == 0 ? NULL((void*)0) : ni[-1].list_entry;
414
415 /* Assert that we are walking the rightmost branch */
416 assert(!inner->parent || &inner->node == inner->parent->child->prev)({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 416, "lib/northbound_oper.c"
, __func__, }, .expr = "!inner->parent || &inner->node == inner->parent->child->prev"
, }; static const struct xref * const xref_p_460 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((!inner->parent || &inner->node == inner->parent
->child->prev) ? 0 : 1, 0)) do { _zlog_assert_failed(&
_xref, ((void*)0)); } while (!inner->parent || &inner->
node == inner->parent->child->prev); })
;
417
418 if (CHECK_FLAG(inner->schema->nodetype, LYS_CONTAINER)((inner->schema->nodetype) & (0x0001))) {
419 /* containers have only zero or one child on a branch of a tree */
420 inner = (struct lyd_node_inner *)inner->child;
421 assert(!inner || inner->prev == &inner->node)({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 421, "lib/northbound_oper.c"
, __func__, }, .expr = "!inner || inner->prev == &inner->node"
, }; static const struct xref * const xref_p_461 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((!inner || inner->prev == &inner->node) ? 0 : 1, 0
)) do { _zlog_assert_failed(&_xref, ((void*)0)); } while (
!inner || inner->prev == &inner->node); })
;
422 ni->lookup_next_ok = yield_ok &&
423 (index == 0 || ni[-1].lookup_next_ok);
424 return NB_OK;
425 }
426
427 assert(CHECK_FLAG(inner->schema->nodetype, LYS_LIST))({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 427, "lib/northbound_oper.c"
, __func__, }, .expr = "CHECK_FLAG(inner->schema->nodetype, LYS_LIST)"
, }; static const struct xref * const xref_p_462 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((((inner->schema->nodetype) & (0x0010))) ? 0 : 1, 0
)) do { _zlog_assert_failed(&_xref, ((void*)0)); } while (
((inner->schema->nodetype) & (0x0010))); })
;
428
429 ni->lookup_next_ok = yield_ok && ni->has_lookup_next &&
430 (index == 0 || ni[-1].lookup_next_ok);
431
432 nb_op_get_keys(inner, &ni->keys);
433
434 /* A list entry cannot be present in a tree w/o it's keys */
435 assert(ni->keys.num == yang_snode_num_keys(inner->schema))({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 435, "lib/northbound_oper.c"
, __func__, }, .expr = "ni->keys.num == yang_snode_num_keys(inner->schema)"
, }; static const struct xref * const xref_p_463 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((ni->keys.num == yang_snode_num_keys(inner->schema)) ?
0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0));
} while (ni->keys.num == yang_snode_num_keys(inner->schema
)); })
;
436
437 /*
438 * Get this nodes opaque list_entry object
439 */
440
441 if (!nn->cbs.lookup_entry) {
442 flog_warn(EC_LIB_NB_OPERATIONAL_DATA,do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: data path doesn't support iteration over operational data: %s"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 444, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: data path doesn't support iteration over operational data: %s"
), .priority = (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args
= ("__func__, ys->xpath"), }; static const struct xref * const
xref_p_464 __attribute__((used, section("xref_array"))) = &
(_xref.xref); zlog_ref(&_xref, ("%s: data path doesn't support iteration over operational data: %s"
), __func__, ys->xpath); } while (0)
443 "%s: data path doesn't support iteration over operational data: %s",do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: data path doesn't support iteration over operational data: %s"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 444, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: data path doesn't support iteration over operational data: %s"
), .priority = (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args
= ("__func__, ys->xpath"), }; static const struct xref * const
xref_p_464 __attribute__((used, section("xref_array"))) = &
(_xref.xref); zlog_ref(&_xref, ("%s: data path doesn't support iteration over operational data: %s"
), __func__, ys->xpath); } while (0)
444 __func__, ys->xpath)do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: data path doesn't support iteration over operational data: %s"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 444, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: data path doesn't support iteration over operational data: %s"
), .priority = (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args
= ("__func__, ys->xpath"), }; static const struct xref * const
xref_p_464 __attribute__((used, section("xref_array"))) = &
(_xref.xref); zlog_ref(&_xref, ("%s: data path doesn't support iteration over operational data: %s"
), __func__, ys->xpath); } while (0)
;
445 return NB_ERR_NOT_FOUND;
446 }
447
448 /* ni->list_entry starts as the parent entry of this node */
449 ni->list_entry = nb_callback_lookup_entry(nn, ni->list_entry, &ni->keys);
450 if (ni->list_entry == NULL((void*)0)) {
451 flog_warn(EC_LIB_NB_OPERATIONAL_DATA,do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: list entry lookup failed"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 452, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: list entry lookup failed"),
.priority = (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args =
("__func__"), }; static const struct xref * const xref_p_465
__attribute__((used, section("xref_array"))) = &(_xref.xref
); zlog_ref(&_xref, ("%s: list entry lookup failed"), __func__
); } while (0)
452 "%s: list entry lookup failed", __func__)do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: list entry lookup failed"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 452, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: list entry lookup failed"),
.priority = (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args =
("__func__"), }; static const struct xref * const xref_p_465
__attribute__((used, section("xref_array"))) = &(_xref.xref
); zlog_ref(&_xref, ("%s: list entry lookup failed"), __func__
); } while (0)
;
453 return NB_ERR_NOT_FOUND;
454 }
455
456 /*
457 * By definition any list element we can get a specific list_entry for
458 * is specific.
459 */
460 ni->query_specific_entry = true1;
461
462 return NB_OK;
463}
464
465/**
466 * nb_op_ys_init_node_infos() - initialize the node info stack from the query.
467 * @ys: the yield state for this tree walk.
468 *
469 * On starting a walk we initialize the node_info stack as deeply as possible
470 * based on specific node references in the query string. We will stop at the
471 * point in the query string that is not specific (e.g., a list element without
472 * it's keys predicate)
473 *
474 * Return: northbound return value (enum nb_error)
475 */
476static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys)
477{
478 struct nb_op_node_info *ni;
479 struct lyd_node_inner *inner;
480 struct lyd_node *node;
481 enum nb_error ret;
482 uint i, len;
483 char *tmp;
484
485 /*
486 * Obtain the trunk of the data node tree of the query.
487 *
488 * These are the nodes from the root that could be specifically
489 * identified with the query string. The trunk ends when a no specific
490 * node could be identified (e.g., a list-node name with no keys).
491 */
492
493 ret = nb_op_xpath_to_trunk(ys->xpath, &node);
494 if (ret || !node) {
495 flog_warn(EC_LIB_LIBYANG,do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: can't instantiate concrete path using xpath: %s"
), .hashu32 = {(4), (EC_LIB_LIBYANG)}, }, }; static const struct
xref_logmsg _xref __attribute__( (used)) = { .xref = { (&
_xrefdata.xrefdata), (XREFT_LOGMSG), 497, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: can't instantiate concrete path using xpath: %s"
), .priority = (4), .ec = (EC_LIB_LIBYANG), .args = ("__func__, ys->xpath"
), }; static const struct xref * const xref_p_466 __attribute__
((used, section("xref_array"))) = &(_xref.xref); zlog_ref
(&_xref, ("%s: can't instantiate concrete path using xpath: %s"
), __func__, ys->xpath); } while (0)
496 "%s: can't instantiate concrete path using xpath: %s",do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: can't instantiate concrete path using xpath: %s"
), .hashu32 = {(4), (EC_LIB_LIBYANG)}, }, }; static const struct
xref_logmsg _xref __attribute__( (used)) = { .xref = { (&
_xrefdata.xrefdata), (XREFT_LOGMSG), 497, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: can't instantiate concrete path using xpath: %s"
), .priority = (4), .ec = (EC_LIB_LIBYANG), .args = ("__func__, ys->xpath"
), }; static const struct xref * const xref_p_466 __attribute__
((used, section("xref_array"))) = &(_xref.xref); zlog_ref
(&_xref, ("%s: can't instantiate concrete path using xpath: %s"
), __func__, ys->xpath); } while (0)
497 __func__, ys->xpath)do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: can't instantiate concrete path using xpath: %s"
), .hashu32 = {(4), (EC_LIB_LIBYANG)}, }, }; static const struct
xref_logmsg _xref __attribute__( (used)) = { .xref = { (&
_xrefdata.xrefdata), (XREFT_LOGMSG), 497, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: can't instantiate concrete path using xpath: %s"
), .priority = (4), .ec = (EC_LIB_LIBYANG), .args = ("__func__, ys->xpath"
), }; static const struct xref * const xref_p_466 __attribute__
((used, section("xref_array"))) = &(_xref.xref); zlog_ref
(&_xref, ("%s: can't instantiate concrete path using xpath: %s"
), __func__, ys->xpath); } while (0)
;
498 if (!ret)
499 ret = NB_ERR_NOT_FOUND;
500 return ret;
501 }
502 while (node &&
503 !CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST)((node->schema->nodetype) & (0x0001 | 0x0010)))
504 node = &node->parent->node;
505 if (!node)
506 return NB_ERR_NOT_FOUND;
507
508 inner = (struct lyd_node_inner *)node;
509 for (len = 1; inner->parent; len++)
510 inner = inner->parent;
511
512
513 darr_append_nz_mt(ys->node_infos, len, MTYPE_NB_NODE_INFOS)({ uint __len = (((ys->node_infos) == ((void*)0)) ? 0 : ((
(struct darr_metadata *)(ys->node_infos)) - 1)->len); (
{ if ((ssize_t)(((ys->node_infos) == ((void*)0)) ? 0 : (((
struct darr_metadata *)(ys->node_infos)) - 1)->cap) <
(ssize_t)(__len + (len))) ({ ((ys->node_infos)) = __darr_resize
((ys->node_infos), (__len + (len)), sizeof(((ys->node_infos
))[0]), MTYPE_NB_NODE_INFOS); }); (ys->node_infos); }); ((
(struct darr_metadata *)(ys->node_infos)) - 1)->len = __len
+ (len); if (1) memset(&(ys->node_infos)[__len], 0, (
len)*sizeof((ys->node_infos)[0])); &(ys->node_infos
)[__len]; })
;
514
515 /*
516 * For each node find the prefix of the xpath query that identified it
517 * -- save the prefix length.
518 */
519 inner = (struct lyd_node_inner *)node;
520 for (i = len; i > 0; i--, inner = inner->parent) {
521 ni = &ys->node_infos[i - 1];
522 ni->inner = inner;
523 ni->schema = inner->schema;
524 /*
525 * NOTE: we could build this by hand with a litte more effort,
526 * but this simple implementation works and won't be expensive
527 * since the number of nodes is small and only done once per
528 * query.
529 */
530 tmp = yang_dnode_get_path(&inner->node, NULL((void*)0), 0);
531 ni->xpath_len = strlen(tmp);
532
533 /* Replace users supplied xpath with the libyang returned value */
534 if (i == len)
535 darr_in_strdup(ys->xpath, tmp)({ size_t __size = strlen(tmp) + 1; do { if ((ys->xpath)) (
((struct darr_metadata *)(ys->xpath)) - 1)->len = 0; } while
(0); ({ if ((ssize_t)(((ys->xpath) == ((void*)0)) ? 0 : (
((struct darr_metadata *)(ys->xpath)) - 1)->cap) < (
ssize_t)(((size_t)(1) > __size) ? (size_t)(1) : __size)) (
{ ((ys->xpath)) = __darr_resize((ys->xpath), (((size_t)
(1) > __size) ? (size_t)(1) : __size), sizeof(((ys->xpath
))[0]), MTYPE_DARR_STR); }); (ys->xpath); }); strlcpy(ys->
xpath, (tmp), (((ys->xpath) == ((void*)0)) ? 0 : (((struct
darr_metadata *)(ys->xpath)) - 1)->cap)); do { ({ static
const struct xref_assert _xref __attribute__( (used)) = { .xref
= { (((void*)0)), (XREFT_ASSERT), 535, "lib/northbound_oper.c"
, __func__, }, .expr = "((ys->xpath)) || !((size_t)__size)"
, }; static const struct xref * const xref_p_467 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((((ys->xpath)) || !((size_t)__size)) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while (((ys->xpath)) || !((size_t
)__size)); }); if (((ys->xpath))) { ({ static const struct
xref_assert _xref __attribute__( (used)) = { .xref = { (((void
*)0)), (XREFT_ASSERT), 535, "lib/northbound_oper.c", __func__
, }, .expr = "(long long)darr_cap((ys->xpath)) >= (long long)((size_t)__size)"
, }; static const struct xref * const xref_p_468 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)((((ys->xpath)) == ((void*)0)) ? 0 : (((struct
darr_metadata *)((ys->xpath))) - 1)->cap) >= (long long
)((size_t)__size)) ? 0 : 1, 0)) do { _zlog_assert_failed(&
_xref, ((void*)0)); } while ((long long)((((ys->xpath)) ==
((void*)0)) ? 0 : (((struct darr_metadata *)((ys->xpath))
) - 1)->cap) >= (long long)((size_t)__size)); }); (((struct
darr_metadata *)((ys->xpath))) - 1)->len = ((size_t)__size
); } } while (0); ys->xpath; })
;
536
537 /* The prefix must match the prefix of the stored xpath */
538 assert(!strncmp(tmp, ys->xpath, ni->xpath_len))({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 538, "lib/northbound_oper.c"
, __func__, }, .expr = "!strncmp(tmp, ys->xpath, ni->xpath_len)"
, }; static const struct xref * const xref_p_469 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((!strncmp(tmp, ys->xpath, ni->xpath_len)) ? 0 : 1, 0))
do { _zlog_assert_failed(&_xref, ((void*)0)); } while (!
strncmp(tmp, ys->xpath, ni->xpath_len)); })
;
539 free(tmp);
540 }
541
542 /*
543 * Obtain the specific list-entry objects for each list node on the
544 * trunk and finish initializing the node_info structs.
545 */
546
547 darr_foreach_i (ys->node_infos, i)for ((i) = 0; (i) < (((ys->node_infos) == ((void*)0)) ?
0 : (((struct darr_metadata *)(ys->node_infos)) - 1)->
len); (i)++)
{
548 ret = nb_op_ys_finalize_node_info(ys, i);
549 if (ret != NB_OK) {
550 darr_free(ys->node_infos)do { if ((ys->node_infos)) { struct darr_metadata *__meta =
(((struct darr_metadata *)(ys->node_infos)) - 1); do { qfree
(__meta->mtype, __meta); __meta = ((void*)0); } while (0);
(ys->node_infos) = ((void*)0); } } while (0)
;
551 return ret;
552 }
553 }
554
555 ys->walk_start_level = darr_len(ys->node_infos)(((ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len)
;
556
557 ys->walk_root_level = (int)ys->walk_start_level - 1;
558
559 return NB_OK;
560}
561
562/* ================ */
563/* End of init code */
564/* ================ */
565
566/**
567 * nb_op_add_leaf() - Add leaf data to the get tree results
568 * @ys - the yield state for this tree walk.
569 * @nb_node - the northbound node representing this leaf.
570 * @xpath - the xpath (with key predicates) to this leaf value.
571 *
572 * Return: northbound return value (enum nb_error)
573 */
574static enum nb_error nb_op_iter_leaf(struct nb_op_yield_state *ys,
575 const struct nb_node *nb_node,
576 const char *xpath)
577{
578 const struct lysc_node *snode = nb_node->snode;
579 struct nb_op_node_info *ni = darr_last(ys->node_infos)({ uint __len = (((ys->node_infos) == ((void*)0)) ? 0 : ((
(struct darr_metadata *)(ys->node_infos)) - 1)->len); (
(__len > 0) ? &(ys->node_infos)[__len - 1] : ((void
*)0)); })
;
580 struct yang_data *data;
581 enum nb_error ret = NB_OK;
582 LY_ERR err;
583
584 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W)((snode->flags) & (0x01)))
585 return NB_OK;
586
587 /* Ignore list keys. */
588 if (lysc_is_key(snode)((!snode || (snode->nodetype != 0x0004) || !(snode->flags
& 0x0100)) ? 0 : 1)
)
589 return NB_OK;
590
591 data = nb_callback_get_elem(nb_node, xpath, ni->list_entry);
592 if (data == NULL((void*)0))
593 return NB_OK;
594
595 /* Add a dnode to our tree */
596 err = lyd_new_term(&ni->inner->node, snode->module, snode->name,
597 data->value, false0, NULL((void*)0));
598 if (err) {
599 yang_data_free(data);
600 return NB_ERR_RESOURCE;
601 }
602
603 if (ys->cb)
604 ret = (*ys->cb)(nb_node->snode, ys->translator, data,
605 ys->cb_arg);
606 yang_data_free(data);
607
608 return ret;
609}
610
611static enum nb_error nb_op_iter_leaflist(struct nb_op_yield_state *ys,
612 const struct nb_node *nb_node,
613 const char *xpath)
614{
615 const struct lysc_node *snode = nb_node->snode;
616 struct nb_op_node_info *ni = darr_last(ys->node_infos)({ uint __len = (((ys->node_infos) == ((void*)0)) ? 0 : ((
(struct darr_metadata *)(ys->node_infos)) - 1)->len); (
(__len > 0) ? &(ys->node_infos)[__len - 1] : ((void
*)0)); })
;
617 const void *list_entry = NULL((void*)0);
618 enum nb_error ret = NB_OK;
619 LY_ERR err;
620
621 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W)((snode->flags) & (0x01)))
622 return NB_OK;
623
624 do {
625 struct yang_data *data;
626
627 list_entry = nb_callback_get_next(nb_node, ni->list_entry,
628 list_entry);
629 if (!list_entry)
630 /* End of the list. */
631 break;
632
633 data = nb_callback_get_elem(nb_node, xpath, list_entry);
634 if (data == NULL((void*)0))
635 continue;
636
637 /* Add a dnode to our tree */
638 err = lyd_new_term(&ni->inner->node, snode->module, snode->name,
639 data->value, false0, NULL((void*)0));
640 if (err) {
641 yang_data_free(data);
642 return NB_ERR_RESOURCE;
643 }
644
645 if (ys->cb)
646 ret = (*ys->cb)(nb_node->snode, ys->translator, data,
647 ys->cb_arg);
648 yang_data_free(data);
649 } while (ret == NB_OK && list_entry);
650
651 return ret;
652}
653
654
655static bool_Bool nb_op_schema_path_has_predicate(struct nb_op_yield_state *ys,
656 int level)
657{
658 if (level > darr_lasti(ys->query_tokens)((((ys->query_tokens) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->query_tokens)) - 1)->len) - 1)
)
659 return false0;
660 return strchr(ys->query_tokens[level], '[') != NULL((void*)0);
661}
662
663/**
664 * nb_op_empty_container_ok() - determine if should keep empty container node.
665 *
666 * Return: true if the empty container should be kept.
667 */
668static bool_Bool nb_op_empty_container_ok(const struct lysc_node *snode,
669 const char *xpath, const void *list_entry)
670{
671 struct nb_node *nn = snode->priv;
672 struct yang_data *data;
673
674 if (!CHECK_FLAG(snode->flags, LYS_PRESENCE)((snode->flags) & (0x80)))
675 return false0;
676
677 if (!nn->cbs.get_elem)
678 return false0;
679
680 data = nb_callback_get_elem(nn, xpath, list_entry);
681 if (data) {
682 yang_data_free(data);
683 return true1;
684 }
685 return false0;
686}
687
688/**
689 * nb_op_get_child_path() - add child node name to the xpath.
690 * @xpath_parent - a darr string for the parent node.
691 * @schild - the child schema node.
692 * @xpath_child - a previous return value from this function to reuse.
693 */
694static char *nb_op_get_child_path(const char *xpath_parent,
695 const struct lysc_node *schild,
696 char *xpath_child)
697{
698 /* "/childname" */
699 uint space, extra = strlen(schild->name) + 1;
700 bool_Bool new_mod = (!schild->parent ||
701 schild->parent->module != schild->module);
702 int n;
703
704 if (new_mod)
705 /* "modulename:" */
706 extra += strlen(schild->module->name) + 1;
707 space = darr_len(xpath_parent)(((xpath_parent) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(xpath_parent)) - 1)->len)
+ extra;
708
709 if (xpath_parent == xpath_child)
710 darr_ensure_cap(xpath_child, space)({ if ((ssize_t)(((xpath_child) == ((void*)0)) ? 0 : (((struct
darr_metadata *)(xpath_child)) - 1)->cap) < (ssize_t)(
space)) ({ ((xpath_child)) = __darr_resize((xpath_child), (space
), sizeof(((xpath_child))[0]), MTYPE_DARR); }); (xpath_child)
; })
;
711 else
712 darr_in_strdup_cap(xpath_child, xpath_parent, space)({ size_t __size = strlen(xpath_parent) + 1; do { if ((xpath_child
)) (((struct darr_metadata *)(xpath_child)) - 1)->len = 0;
} while (0); ({ if ((ssize_t)(((xpath_child) == ((void*)0)) ?
0 : (((struct darr_metadata *)(xpath_child)) - 1)->cap) <
(ssize_t)(((size_t)(space) > __size) ? (size_t)(space) : __size
)) ({ ((xpath_child)) = __darr_resize((xpath_child), (((size_t
)(space) > __size) ? (size_t)(space) : __size), sizeof(((xpath_child
))[0]), MTYPE_DARR_STR); }); (xpath_child); }); strlcpy(xpath_child
, (xpath_parent), (((xpath_child) == ((void*)0)) ? 0 : (((struct
darr_metadata *)(xpath_child)) - 1)->cap)); do { ({ static
const struct xref_assert _xref __attribute__( (used)) = { .xref
= { (((void*)0)), (XREFT_ASSERT), 712, "lib/northbound_oper.c"
, __func__, }, .expr = "((xpath_child)) || !((size_t)__size)"
, }; static const struct xref * const xref_p_470 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((((xpath_child)) || !((size_t)__size)) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while (((xpath_child)) || !((size_t
)__size)); }); if (((xpath_child))) { ({ static const struct xref_assert
_xref __attribute__( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT
), 712, "lib/northbound_oper.c", __func__, }, .expr = "(long long)darr_cap((xpath_child)) >= (long long)((size_t)__size)"
, }; static const struct xref * const xref_p_471 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)((((xpath_child)) == ((void*)0)) ? 0 : (((struct
darr_metadata *)((xpath_child))) - 1)->cap) >= (long long
)((size_t)__size)) ? 0 : 1, 0)) do { _zlog_assert_failed(&
_xref, ((void*)0)); } while ((long long)((((xpath_child)) == (
(void*)0)) ? 0 : (((struct darr_metadata *)((xpath_child))) -
1)->cap) >= (long long)((size_t)__size)); }); (((struct
darr_metadata *)((xpath_child))) - 1)->len = ((size_t)__size
); } } while (0); xpath_child; })
;
713 if (new_mod)
714 n = snprintf(darr_strnul(xpath_child)({ uint __len = (((xpath_child) == ((void*)0)) ? 0 : (((struct
darr_metadata *)(xpath_child)) - 1)->len); ((__len > 0
) ? &(xpath_child)[__len - 1] : ((void*)0)); })
, extra + 1, "/%s:%s",
715 schild->module->name, schild->name);
716 else
717 n = snprintf(darr_strnul(xpath_child)({ uint __len = (((xpath_child) == ((void*)0)) ? 0 : (((struct
darr_metadata *)(xpath_child)) - 1)->len); ((__len > 0
) ? &(xpath_child)[__len - 1] : ((void*)0)); })
, extra + 1, "/%s",
718 schild->name);
719 assert(n == (int)extra)({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 719, "lib/northbound_oper.c"
, __func__, }, .expr = "n == (int)extra", }; static const struct
xref * const xref_p_472 __attribute__((used, section("xref_array"
))) = &(_xref.xref); if (__builtin_expect((n == (int)extra
) ? 0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0
)); } while (n == (int)extra); })
;
720 _darr_len(xpath_child)(((struct darr_metadata *)(xpath_child)) - 1)->len += extra;
721 return xpath_child;
722}
723
724static bool_Bool __is_yielding_node(const struct lysc_node *snode)
725{
726 struct nb_node *nn = snode->priv;
727
728 return nn->cbs.lookup_next != NULL((void*)0);
729}
730
731static const struct lysc_node *__sib_next(bool_Bool yn, const struct lysc_node *sib)
732{
733 for (; sib; sib = sib->next) {
734 /* Always skip keys. */
735 if (lysc_is_key(sib)((!sib || (sib->nodetype != 0x0004) || !(sib->flags &
0x0100)) ? 0 : 1)
)
736 continue;
737 if (yn == __is_yielding_node(sib))
738 return sib;
739 }
740 return NULL((void*)0);
741}
742
743/**
744 * nb_op_sib_next() - Return the next sibling to walk to
745 * @ys: the yield state for this tree walk.
746 * @sib: the currently being visited sibling
747 *
748 * Return: the next sibling to walk to, walking non-yielding before yielding.
749 */
750static const struct lysc_node *nb_op_sib_next(struct nb_op_yield_state *ys,
751 const struct lysc_node *sib)
752{
753 struct lysc_node *parent = sib->parent;
754 bool_Bool yn = __is_yielding_node(sib);
755
756 /*
757 * If the node info stack is shorter than the schema path then we are
758 * doign specific query still on the node from the schema path (should
759 * match) so just return NULL.
760 */
761 if (darr_len(ys->schema_path)(((ys->schema_path) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->schema_path)) - 1)->len)
> darr_len(ys->node_infos)(((ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len)
)
762 return NULL((void*)0);
763 /*
764 * If sib is on top of the node info stack then
765 * 1) it's a container node -or-
766 * 2) it's a list node that we were walking and we've reach the last entry
767 * 3) if sib is a list and the list was empty we never would have
768 * pushed sib on the stack so the top of the stack is the parent
769 *
770 * If the query string included this node then we do not process any
771 * siblings as we are not walking all the parent's children just this
772 * specified one give by the query string.
773 */
774 if (sib == darr_last(ys->node_infos)({ uint __len = (((ys->node_infos) == ((void*)0)) ? 0 : ((
(struct darr_metadata *)(ys->node_infos)) - 1)->len); (
(__len > 0) ? &(ys->node_infos)[__len - 1] : ((void
*)0)); })
->schema &&
775 darr_len(ys->schema_path)(((ys->schema_path) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->schema_path)) - 1)->len)
>= darr_len(ys->node_infos)(((ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len)
)
776 return NULL((void*)0);
777 /* case (3) */
778 else if (sib->nodetype == LYS_LIST0x0010 &&
779 parent == darr_last(ys->node_infos)({ uint __len = (((ys->node_infos) == ((void*)0)) ? 0 : ((
(struct darr_metadata *)(ys->node_infos)) - 1)->len); (
(__len > 0) ? &(ys->node_infos)[__len - 1] : ((void
*)0)); })
->schema &&
780 darr_len(ys->schema_path)(((ys->schema_path) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->schema_path)) - 1)->len)
> darr_len(ys->node_infos)(((ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len)
)
781 return NULL((void*)0);
782
783 sib = __sib_next(yn, sib->next);
784 if (sib)
785 return sib;
786 if (yn)
787 return NULL((void*)0);
788 return __sib_next(true1, lysc_node_child(parent));
789}
790/*
791 * sib_walk((struct lyd_node *)ni->inner->node.parent->parent->parent->parent->parent->parent->parent)
792 */
793
794/**
795 * nb_op_sib_first() - obtain the first child to walk to
796 * @ys: the yield state for this tree walk.
797 * @parent: the parent whose child we seek
798 * @skip_keys: if should skip over keys
799 *
800 * Return: the first child to continue the walk to, starting with non-yielding
801 * siblings then yielding ones. There should be no more than 1 yielding sibling.
802 */
803static const struct lysc_node *nb_op_sib_first(struct nb_op_yield_state *ys,
804 const struct lysc_node *parent)
805{
806 const struct lysc_node *sib = lysc_node_child(parent);
807 const struct lysc_node *first_sib;
808
809 /*
810 * The top of the node stack points at @parent.
811 *
812 * If the schema path (original query) is longer than our current node
813 * info stack (current xpath location), we are building back up to the
814 * base of the user query, return the next schema node from the query
815 * string (schema_path).
816 */
817 assert(darr_last(ys->node_infos)->schema == parent)({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 817, "lib/northbound_oper.c"
, __func__, }, .expr = "darr_last(ys->node_infos)->schema == parent"
, }; static const struct xref * const xref_p_473 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((({ uint __len = (((ys->node_infos) == ((void*)0)) ? 0 : (
((struct darr_metadata *)(ys->node_infos)) - 1)->len); (
(__len > 0) ? &(ys->node_infos)[__len - 1] : ((void
*)0)); })->schema == parent) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while (({ uint __len = (((ys->
node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata *)(ys
->node_infos)) - 1)->len); ((__len > 0) ? &(ys->
node_infos)[__len - 1] : ((void*)0)); })->schema == parent
); })
;
818 if (darr_lasti(ys->node_infos)((((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->node_infos)) - 1)->len) - 1)
< ys->query_base_level)
819 return ys->schema_path[darr_lasti(ys->node_infos)((((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->node_infos)) - 1)->len) - 1)
+ 1];
820
821 /* We always skip keys. */
822 while (sib && lysc_is_key(sib)((!sib || (sib->nodetype != 0x0004) || !(sib->flags &
0x0100)) ? 0 : 1)
)
823 sib = sib->next;
824 if (!sib)
825 return NULL((void*)0);
826
827 /* Return non-yielding node's first */
828 first_sib = sib;
829 if (__is_yielding_node(sib)) {
830 sib = __sib_next(false0, sib);
831 if (sib)
832 return sib;
833 }
834 return first_sib;
835}
836
837/*
838 * "3-dimensional" walk from base of the tree to the tip in-order.
839 *
840 * The actual tree is only 2-dimensional as list nodes are organized as adjacent
841 * siblings under a common parent perhaps with other siblings to each side;
842 * however, using 3d view here is easier to diagram.
843 *
844 * - A list node is yielding if it has a lookup_next callback.
845 * - All other node types are not yielding.
846 * - There's only one yielding node in a list of children (i.e., siblings).
847 *
848 * We visit all non-yielding children prior to the yielding child.
849 * That way we have the fullest tree possible even when something is deleted
850 * during a yield.
851 * --- child/parent descendant poinilnters
852 * ... next/prev sibling pointers
853 * o.o list entries pointers
854 * ~~~ diagram extension connector
855 * 1
856 * / \
857 * / \ o~~~~12
858 * / \ . / \
859 * 2.......5 o~~~9 13...14
860 * / \ | . / \
861 * 3...4 6 10...11 Cont Nodes: 1,2,5
862 * / \ List Nodes: 6,9,12
863 * 7...8 Leaf Nodes: 3,4,7,8,10,11,13,14
864 * Schema Leaf A: 3
865 * Schema Leaf B: 4
866 * Schema Leaf C: 7,10,13
867 * Schema Leaf D: 8,11,14
868 */
869static enum nb_error __walk(struct nb_op_yield_state *ys, bool_Bool is_resume)
870{
871 const struct lysc_node *walk_stem_tip = ys_get_walk_stem_tip(ys);
872 const struct lysc_node *sib;
873 const void *parent_list_entry = NULL((void*)0);
874 const void *list_entry = NULL((void*)0);
875 struct nb_op_node_info *ni, *pni;
876 struct lyd_node *node;
877 struct nb_node *nn;
878 char *xpath_child = NULL((void*)0);
879 // bool at_query_base;
880 bool_Bool at_root_level, list_start, is_specific_node;
881 enum nb_error ret = NB_OK;
882 LY_ERR err;
883 int at_clevel;
884 uint len;
885
886
887 monotime(&ys->start_time);
888
889 /* Don't currently support walking all root nodes */
890 if (!walk_stem_tip)
891 return NB_ERR_NOT_FOUND;
892
893 /*
894 * If we are resuming then start with the list container on top.
895 * Otherwise get the first child of the container we are walking,
896 * starting with non-yielding children.
897 */
898 if (is_resume)
899 sib = darr_last(ys->node_infos)({ uint __len = (((ys->node_infos) == ((void*)0)) ? 0 : ((
(struct darr_metadata *)(ys->node_infos)) - 1)->len); (
(__len > 0) ? &(ys->node_infos)[__len - 1] : ((void
*)0)); })
->schema;
900 else {
901 /*
902 * Start with non-yielding children first.
903 *
904 * When adding root level walks, the sibling list are the root
905 * level nodes of all modules
906 */
907 sib = nb_op_sib_first(ys, walk_stem_tip);
908 if (!sib)
909 return NB_ERR_NOT_FOUND;
910 }
911
912
913 while (true1) {
914 /* Grab the top container/list node info on the stack */
915 at_clevel = darr_lasti(ys->node_infos)((((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->node_infos)) - 1)->len) - 1)
;
916 ni = &ys->node_infos[at_clevel];
917
918 /*
919 * This is the level of the last specific node at init
920 * time. +1 would be the first non-specific list or
921 * non-container if present in the container node.
922 */
923 at_root_level = at_clevel == ys->walk_root_level;
924
925 if (!sib) {
926 /*
927 * We've reached the end of the siblings inside a
928 * containing node; either a container or a specific
929 * list node entry.
930 *
931 * We handle container node inline; however, for lists
932 * we are only done with a specific entry and need to
933 * move to the next element on the list so we drop down
934 * into the switch for that case.
935 */
936
937 /* Grab the containing node. */
938 sib = ni->schema;
939
940 if (sib->nodetype == LYS_CONTAINER0x0001) {
941 /* If we added an empty container node (no
942 * children) and it's not a presence container
943 * or it's not backed by the get_elem callback,
944 * remove the node from the tree.
945 */
946 if (!lyd_child(&ni->inner->node) &&
947 !nb_op_empty_container_ok(sib, ys->xpath,
948 ni->list_entry))
949 lyd_free_tree(&ni->inner->node);
950
951 /* If we have returned to our original walk base,
952 * then we are done with the walk.
953 */
954 if (at_root_level) {
955 ret = NB_OK;
956 goto done;
957 }
958 /*
959 * Grab the sibling of the container we are
960 * about to pop, so we will be mid-walk on the
961 * parent containers children.
962 */
963 sib = nb_op_sib_next(ys, sib);
964
965 /* Pop container node to the parent container */
966 ys_pop_inner(ys);
967
968 /*
969 * If are were working on a user narrowed path
970 * then we are done with these siblings.
971 */
972 if (darr_len(ys->schema_path)(((ys->schema_path) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->schema_path)) - 1)->len)
>
973 darr_len(ys->node_infos)(((ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len)
)
974 sib = NULL((void*)0);
975
976 /* Start over */
977 continue;
978 }
979 /*
980 * If we are here we have reached the end of the
981 * children of a list entry node. sib points
982 * at the list node info.
983 */
984 }
985
986 /* TODO: old code checked for "first" here and skipped if set */
987 if (CHECK_FLAG(sib->nodetype,((sib->nodetype) & (0x0004 | 0x0008 | 0x0001))
988 LYS_LEAF | LYS_LEAFLIST | LYS_CONTAINER)((sib->nodetype) & (0x0004 | 0x0008 | 0x0001)))
989 xpath_child = nb_op_get_child_path(ys->xpath, sib,
990 xpath_child);
991 nn = sib->priv;
992
993 switch (sib->nodetype) {
994 case LYS_LEAF0x0004:
995 /*
996 * If we have a non-specific walk to a specific leaf
997 * (e.g., "..../route-entry/metric") and the leaf value
998 * is not present, then we are left with the data nodes
999 * of the stem of the branch to the missing leaf data.
1000 * For containers this will get cleaned up by the
1001 * container code above that looks for no children;
1002 * however, this doesn't work for lists.
1003 *
1004 * (FN:A) We need a similar check for empty list
1005 * elements. Empty list elements below the
1006 * query_base_level (i.e., the schema path length)
1007 * should be cleaned up as they don't support anything
1008 * the user is querying for, if they are above the
1009 * query_base_level then they are part of the walk and
1010 * should be kept.
1011 */
1012 ret = nb_op_iter_leaf(ys, nn, xpath_child);
1013 sib = nb_op_sib_next(ys, sib);
1014 continue;
1015 case LYS_LEAFLIST0x0008:
1016 ret = nb_op_iter_leaflist(ys, nn, xpath_child);
1017 sib = nb_op_sib_next(ys, sib);
1018 continue;
1019 case LYS_CONTAINER0x0001:
1020 if (CHECK_FLAG(nn->flags, F_NB_NODE_CONFIG_ONLY)((nn->flags) & (0x01))) {
1021 sib = nb_op_sib_next(ys, sib);
1022 continue;
1023 }
1024
1025 node = NULL((void*)0);
1026 err = lyd_new_inner(&ni->inner->node, sib->module,
1027 sib->name, false0, &node);
1028 if (err) {
1029 ret = NB_ERR_RESOURCE;
1030 goto done;
1031 }
1032
1033 /* push this container node on top of the stack */
1034 ni = darr_appendz(ys->node_infos)({ uint __len = (((ys->node_infos) == ((void*)0)) ? 0 : ((
(struct darr_metadata *)(ys->node_infos)) - 1)->len); (
{ if ((ssize_t)(((ys->node_infos) == ((void*)0)) ? 0 : (((
struct darr_metadata *)(ys->node_infos)) - 1)->cap) <
(ssize_t)(__len + (1))) ({ ((ys->node_infos)) = __darr_resize
((ys->node_infos), (__len + (1)), sizeof(((ys->node_infos
))[0]), MTYPE_DARR); }); (ys->node_infos); }); (((struct darr_metadata
*)(ys->node_infos)) - 1)->len = __len + (1); if (1) memset
(&(ys->node_infos)[__len], 0, (1)*sizeof((ys->node_infos
)[0])); &(ys->node_infos)[__len]; })
;
1035 ni->inner = (struct lyd_node_inner *)node;
1036 ni->schema = node->schema;
1037 ni->niters = 0;
1038 ni->nents = 0;
1039 ni->has_lookup_next = false0;
1040 ni->lookup_next_ok = ni[-1].lookup_next_ok;
1041 ni->list_entry = ni[-1].list_entry;
1042
1043 darr_in_strdup(ys->xpath, xpath_child)({ size_t __size = strlen(xpath_child) + 1; do { if ((ys->
xpath)) (((struct darr_metadata *)(ys->xpath)) - 1)->len
= 0; } while (0); ({ if ((ssize_t)(((ys->xpath) == ((void
*)0)) ? 0 : (((struct darr_metadata *)(ys->xpath)) - 1)->
cap) < (ssize_t)(((size_t)(1) > __size) ? (size_t)(1) :
__size)) ({ ((ys->xpath)) = __darr_resize((ys->xpath),
(((size_t)(1) > __size) ? (size_t)(1) : __size), sizeof((
(ys->xpath))[0]), MTYPE_DARR_STR); }); (ys->xpath); });
strlcpy(ys->xpath, (xpath_child), (((ys->xpath) == ((void
*)0)) ? 0 : (((struct darr_metadata *)(ys->xpath)) - 1)->
cap)); do { ({ static const struct xref_assert _xref __attribute__
( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1043, "lib/northbound_oper.c"
, __func__, }, .expr = "((ys->xpath)) || !((size_t)__size)"
, }; static const struct xref * const xref_p_474 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((((ys->xpath)) || !((size_t)__size)) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while (((ys->xpath)) || !((size_t
)__size)); }); if (((ys->xpath))) { ({ static const struct
xref_assert _xref __attribute__( (used)) = { .xref = { (((void
*)0)), (XREFT_ASSERT), 1043, "lib/northbound_oper.c", __func__
, }, .expr = "(long long)darr_cap((ys->xpath)) >= (long long)((size_t)__size)"
, }; static const struct xref * const xref_p_475 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)((((ys->xpath)) == ((void*)0)) ? 0 : (((struct
darr_metadata *)((ys->xpath))) - 1)->cap) >= (long long
)((size_t)__size)) ? 0 : 1, 0)) do { _zlog_assert_failed(&
_xref, ((void*)0)); } while ((long long)((((ys->xpath)) ==
((void*)0)) ? 0 : (((struct darr_metadata *)((ys->xpath))
) - 1)->cap) >= (long long)((size_t)__size)); }); (((struct
darr_metadata *)((ys->xpath))) - 1)->len = ((size_t)__size
); } } while (0); ys->xpath; })
;
1044 ni->xpath_len = darr_strlen(ys->xpath)({ uint __size = (((ys->xpath) == ((void*)0)) ? 0 : (((struct
darr_metadata *)(ys->xpath)) - 1)->len); if (__size) __size
-= 1; ({ static const struct xref_assert _xref __attribute__
( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1044, "lib/northbound_oper.c"
, __func__, }, .expr = "!(ys->xpath) || ((char *)(ys->xpath))[__size] == 0"
, }; static const struct xref * const xref_p_476 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((!(ys->xpath) || ((char *)(ys->xpath))[__size] == 0) ?
0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0));
} while (!(ys->xpath) || ((char *)(ys->xpath))[__size]
== 0); }); __size; })
;
1045
1046 sib = nb_op_sib_first(ys, sib);
1047 continue;
1048 case LYS_LIST0x0010:
1049
1050 /*
1051 * Notes:
1052 *
1053 * NOTE: ni->inner may be NULL here if we resumed and it
1054 * was gone. ni->schema and ni->keys will still be
1055 * valid.
1056 *
1057 * NOTE: At this point sib is never NULL; however, if it
1058 * was NULL at the top of the loop, then we were done
1059 * working on a list element's children and will be
1060 * attempting to get the next list element here so sib
1061 * == ni->schema (i.e., !list_start).
1062 *
1063 * (FN:A): Before doing this let's remove empty list
1064 * elements that are "inside" the query string as they
1065 * represent a stem which didn't lead to actual data
1066 * being requested by the user -- for example,
1067 * ".../route-entry/metric" if metric is not present we
1068 * don't want to return an empty route-entry to the
1069 * user.
1070 */
1071
1072 node = NULL((void*)0);
1073 list_start = ni->schema != sib;
1074 if (list_start) {
1075 /*
1076 * List iteration: First Element
1077 * -----------------------------
1078 *
1079 * Our node info wasn't on top (wasn't an entry
1080 * for sib) so this is a new list iteration, we
1081 * will push our node info below. The top is our
1082 * parent.
1083 */
1084 if (CHECK_FLAG(nn->flags,((nn->flags) & (0x01))
1085 F_NB_NODE_CONFIG_ONLY)((nn->flags) & (0x01))) {
1086 sib = nb_op_sib_next(ys, sib);
1087 continue;
1088 }
1089 /* we are now at one level higher */
1090 at_clevel += 1;
1091 pni = ni;
1092 ni = NULL((void*)0);
1093 } else {
1094 /*
1095 * List iteration: Next Element
1096 * ----------------------------
1097 *
1098 * This is the case where `sib == NULL` at the
1099 * top of the loop, so, we just completed the
1100 * walking the children of a list entry, i.e.,
1101 * we are done with that list entry.
1102 *
1103 * `sib` was reset to point at the our list node
1104 * at the top of node_infos.
1105 *
1106 * Within this node_info, `ys->xpath`, `inner`,
1107 * `list_entry`, and `xpath_len` are for the
1108 * previous list entry, and need to be updated.
1109 */
1110 pni = darr_len(ys->node_infos)(((ys->node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->node_infos)) - 1)->len)
> 1 ? &ni[-1]
1111 : NULL((void*)0);
1112 }
1113
1114 parent_list_entry = pni ? pni->list_entry : NULL((void*)0);
1115 list_entry = ni ? ni->list_entry : NULL((void*)0);
1116
1117 /*
1118 * Before yielding we check to see if we are doing a
1119 * specific list entry instead of a full list iteration.
1120 * We do not want to yield during specific list entry
1121 * processing.
1122 */
1123
1124 /*
1125 * If we are at a list start check to see if the node
1126 * has a predicate. If so we will try and fetch the data
1127 * node now that we've built part of the tree, if the
1128 * predicates are keys or only depend on the tree already
1129 * built, it should create the element for us.
1130 */
1131 is_specific_node = false0;
1132 if (list_start &&
1133 at_clevel <= darr_lasti(ys->query_tokens)((((ys->query_tokens) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->query_tokens)) - 1)->len) - 1)
&&
1134 nb_op_schema_path_has_predicate(ys, at_clevel)) {
1135 err = lyd_new_path(&pni->inner->node, NULL((void*)0),
1136 ys->query_tokens[at_clevel],
1137 NULL((void*)0), 0, &node);
1138 if (!err)
1139 /* predicate resolved to specific node */
1140 is_specific_node = true1;
1141 else {
1142 flog_warn(EC_LIB_NB_OPERATIONAL_DATA,do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: unable to create node for specific query string: %s"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 1145, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: unable to create node for specific query string: %s"
), .priority = (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args
= ("__func__, ys->query_tokens[at_clevel]"), }; static const
struct xref * const xref_p_477 __attribute__((used, section(
"xref_array"))) = &(_xref.xref); zlog_ref(&_xref, ("%s: unable to create node for specific query string: %s"
), __func__, ys->query_tokens[at_clevel]); } while (0)
1143 "%s: unable to create node for specific query string: %s",do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: unable to create node for specific query string: %s"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 1145, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: unable to create node for specific query string: %s"
), .priority = (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args
= ("__func__, ys->query_tokens[at_clevel]"), }; static const
struct xref * const xref_p_477 __attribute__((used, section(
"xref_array"))) = &(_xref.xref); zlog_ref(&_xref, ("%s: unable to create node for specific query string: %s"
), __func__, ys->query_tokens[at_clevel]); } while (0)
1144 __func__,do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: unable to create node for specific query string: %s"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 1145, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: unable to create node for specific query string: %s"
), .priority = (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args
= ("__func__, ys->query_tokens[at_clevel]"), }; static const
struct xref * const xref_p_477 __attribute__((used, section(
"xref_array"))) = &(_xref.xref); zlog_ref(&_xref, ("%s: unable to create node for specific query string: %s"
), __func__, ys->query_tokens[at_clevel]); } while (0)
1145 ys->query_tokens[at_clevel])do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: unable to create node for specific query string: %s"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 1145, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: unable to create node for specific query string: %s"
), .priority = (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args
= ("__func__, ys->query_tokens[at_clevel]"), }; static const
struct xref * const xref_p_477 __attribute__((used, section(
"xref_array"))) = &(_xref.xref); zlog_ref(&_xref, ("%s: unable to create node for specific query string: %s"
), __func__, ys->query_tokens[at_clevel]); } while (0)
;
1146 }
1147 }
1148
1149 if (list_entry && ni->query_specific_entry) {
1150 /*
1151 * Ending specific list entry processing.
1152 */
1153 assert(!list_start)({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1153, "lib/northbound_oper.c"
, __func__, }, .expr = "!list_start", }; static const struct xref
* const xref_p_478 __attribute__((used, section("xref_array"
))) = &(_xref.xref); if (__builtin_expect((!list_start) ?
0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0));
} while (!list_start); })
;
1154 is_specific_node = true1;
1155 list_entry = NULL((void*)0);
1156 }
1157
1158 /*
1159 * Should we yield?
1160 *
1161 * Don't yield if we have a specific entry.
1162 */
1163 if (!is_specific_node && ni && ni->lookup_next_ok &&
1164 // make sure we advance, if the interval is
1165 // fast and we are very slow.
1166 ((monotime_since(&ys->start_time, NULL((void*)0)) >
1167 NB_OP_WALK_INTERVAL_US(50 * 1000) &&
1168 ni->niters) ||
1169 (ni->niters + 1) % 10000 == 0)) {
1170 /* This is a yield supporting list node and
1171 * we've been running at least our yield
1172 * interval, so yield.
1173 *
1174 * NOTE: we never yield on list_start, and we
1175 * are always about to be doing a get_next.
1176 */
1177 DEBUGD(&nb_dbg_events,do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("%s: yielding after %u iterations"), .hashu32 = {(
7), (0)}, }, }; static const struct xref_logmsg _xref __attribute__
( (used)) = { .xref = { (&_xrefdata.xrefdata), (XREFT_LOGMSG
), 1179, "lib/northbound_oper.c", __func__, }, .fmtstring = (
"%s: yielding after %u iterations"), .priority = (7), .ec = (
0), .args = ("__func__, ni->niters"), }; static const struct
xref * const xref_p_479 __attribute__((used, section("xref_array"
))) = &(_xref.xref); zlog_ref(&_xref, ("%s: yielding after %u iterations"
), __func__, ni->niters); } while (0); } while (0)
1178 "%s: yielding after %u iterations",do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("%s: yielding after %u iterations"), .hashu32 = {(
7), (0)}, }, }; static const struct xref_logmsg _xref __attribute__
( (used)) = { .xref = { (&_xrefdata.xrefdata), (XREFT_LOGMSG
), 1179, "lib/northbound_oper.c", __func__, }, .fmtstring = (
"%s: yielding after %u iterations"), .priority = (7), .ec = (
0), .args = ("__func__, ni->niters"), }; static const struct
xref * const xref_p_479 __attribute__((used, section("xref_array"
))) = &(_xref.xref); zlog_ref(&_xref, ("%s: yielding after %u iterations"
), __func__, ni->niters); } while (0); } while (0)
1179 __func__, ni->niters)do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("%s: yielding after %u iterations"), .hashu32 = {(
7), (0)}, }, }; static const struct xref_logmsg _xref __attribute__
( (used)) = { .xref = { (&_xrefdata.xrefdata), (XREFT_LOGMSG
), 1179, "lib/northbound_oper.c", __func__, }, .fmtstring = (
"%s: yielding after %u iterations"), .priority = (7), .ec = (
0), .args = ("__func__, ni->niters"), }; static const struct
xref * const xref_p_479 __attribute__((used, section("xref_array"
))) = &(_xref.xref); zlog_ref(&_xref, ("%s: yielding after %u iterations"
), __func__, ni->niters); } while (0); } while (0)
;
1180
1181 ni->niters = 0;
1182 ret = NB_YIELD;
1183 goto done;
1184 }
1185
1186 /*
1187 * Now get the backend list_entry opaque object for
1188 * this list entry from the backend.
1189 */
1190
1191 if (is_specific_node) {
1192 /*
1193 * Specific List Entry:
1194 * --------------------
1195 */
1196 if (list_start) {
1197 list_entry =
1198 nb_callback_lookup_node_entry(
1199 node, parent_list_entry);
1200 /*
1201 * If the node we created from a
1202 * specific predicate entry is not
1203 * actually there we need to delete the
1204 * node from our data tree
1205 */
1206 if (!list_entry) {
1207 lyd_free_tree(node);
1208 node = NULL((void*)0);
1209 }
1210 }
1211 } else if (!list_start && !list_entry &&
1212 ni->has_lookup_next) {
1213 /*
1214 * After Yield:
1215 * ------------
1216 * After a yield the list_entry may have become
1217 * invalid, so use lookup_next callback with
1218 * parent and keys instead to find next element.
1219 */
1220 list_entry =
1221 nb_callback_lookup_next(nn,
1222 parent_list_entry,
1223 &ni->keys);
1224 } else {
1225 /*
1226 * Normal List Iteration:
1227 * ----------------------
1228 * Start (list_entry == NULL) or continue
1229 * (list_entry != NULL) the list iteration.
1230 */
1231 /* Obtain [next] list entry. */
1232 list_entry =
1233 nb_callback_get_next(nn,
1234 parent_list_entry,
1235 list_entry);
1236 }
1237
1238 /*
1239 * (FN:A) Reap empty list element? Check to see if we
1240 * should reap an empty list element. We do this if the
1241 * empty list element exists at or below the query base
1242 * (i.e., it's not part of the walk, but a failed find
1243 * on a more specific query e.g., for below the
1244 * `route-entry` element for a query
1245 * `.../route-entry/metric` where the list element had
1246 * no metric value.
1247 *
1248 * However, if the user query is for a key of a list
1249 * element, then when we reach that list element it will
1250 * have no non-key children, check for this condition
1251 * and do not reap if true.
1252 */
1253 if (!list_start && ni->inner &&
1254 !lyd_child_no_keys(&ni->inner->node) &&
1255 /* not the top element with a key match */
1256 !((darr_ilen(ys->node_infos)(((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->node_infos)) - 1)->len)
==
1257 darr_ilen(ys->schema_path)(((ys->schema_path) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->schema_path)) - 1)->len)
- 1) &&
1258 lysc_is_key((*darr_last(ys->schema_path)))((!(*({ uint __len = (((ys->schema_path) == ((void*)0)) ? 0
: (((struct darr_metadata *)(ys->schema_path)) - 1)->len
); ((__len > 0) ? &(ys->schema_path)[__len - 1] : (
(void*)0)); })) || ((*({ uint __len = (((ys->schema_path) ==
((void*)0)) ? 0 : (((struct darr_metadata *)(ys->schema_path
)) - 1)->len); ((__len > 0) ? &(ys->schema_path)
[__len - 1] : ((void*)0)); }))->nodetype != 0x0004) || !((
*({ uint __len = (((ys->schema_path) == ((void*)0)) ? 0 : (
((struct darr_metadata *)(ys->schema_path)) - 1)->len);
((__len > 0) ? &(ys->schema_path)[__len - 1] : ((void
*)0)); }))->flags & 0x0100)) ? 0 : 1)
) &&
1259 /* is this at or below the base? */
1260 darr_ilen(ys->node_infos)(((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->node_infos)) - 1)->len)
<= ys->query_base_level)
1261 lyd_free_tree(&ni->inner->node);
1262
1263
1264 if (!list_entry) {
1265 /*
1266 * List Iteration Done
1267 * -------------------
1268 */
1269
1270 /*
1271 * Grab next sibling of the list node
1272 */
1273 if (is_specific_node)
1274 sib = NULL((void*)0);
1275 else
1276 sib = nb_op_sib_next(ys, sib);
1277
1278 /*
1279 * If we are at the walk root (base) level then
1280 * that specifies a list and we are done iterating
1281 * the list, so we are done with the walk entirely.
1282 */
1283 if (!sib && at_clevel == ys->walk_root_level) {
1284 ret = NB_OK;
1285 goto done;
1286 }
1287
1288 /*
1289 * Pop the our list node info back to our
1290 * parent.
1291 *
1292 * We only do this if we've already pushed a
1293 * node for the current list schema. For
1294 * `list_start` this hasn't happened yet, as
1295 * would have happened below. So when list_start
1296 * is true but list_entry if NULL we
1297 * are processing an empty list.
1298 */
1299 if (!list_start)
1300 ys_pop_inner(ys);
1301
1302 /*
1303 * We should never be below the walk root
1304 */
1305 assert(darr_lasti(ys->node_infos) >=({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1306, "lib/northbound_oper.c"
, __func__, }, .expr = "darr_lasti(ys->node_infos) >= ys->walk_root_level"
, }; static const struct xref * const xref_p_480 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((((((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->node_infos)) - 1)->len) - 1) >=
ys->walk_root_level) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while (((((ys->node_infos) == (
(void*)0)) ? 0 : (ssize_t)(((struct darr_metadata *)(ys->node_infos
)) - 1)->len) - 1) >= ys->walk_root_level); })
1306 ys->walk_root_level)({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1306, "lib/northbound_oper.c"
, __func__, }, .expr = "darr_lasti(ys->node_infos) >= ys->walk_root_level"
, }; static const struct xref * const xref_p_480 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((((((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->node_infos)) - 1)->len) - 1) >=
ys->walk_root_level) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while (((((ys->node_infos) == (
(void*)0)) ? 0 : (ssize_t)(((struct darr_metadata *)(ys->node_infos
)) - 1)->len) - 1) >= ys->walk_root_level); })
;
1307
1308 /* Move on to the sibling of the list node */
1309 continue;
1310 }
1311
1312 /*
1313 * From here on, we have selected a new top node_info
1314 * list entry (either newly pushed or replacing the
1315 * previous entry in the walk), and we are filling in
1316 * the details.
1317 */
1318
1319 if (list_start) {
1320 /*
1321 * Starting iteration of a list type or
1322 * processing a specific entry, push the list
1323 * node_info on stack.
1324 */
1325 ni = darr_appendz(ys->node_infos)({ uint __len = (((ys->node_infos) == ((void*)0)) ? 0 : ((
(struct darr_metadata *)(ys->node_infos)) - 1)->len); (
{ if ((ssize_t)(((ys->node_infos) == ((void*)0)) ? 0 : (((
struct darr_metadata *)(ys->node_infos)) - 1)->cap) <
(ssize_t)(__len + (1))) ({ ((ys->node_infos)) = __darr_resize
((ys->node_infos), (__len + (1)), sizeof(((ys->node_infos
))[0]), MTYPE_DARR); }); (ys->node_infos); }); (((struct darr_metadata
*)(ys->node_infos)) - 1)->len = __len + (1); if (1) memset
(&(ys->node_infos)[__len], 0, (1)*sizeof((ys->node_infos
)[0])); &(ys->node_infos)[__len]; })
;
1326 pni = &ni[-1]; /* memory may have moved */
1327 ni->has_lookup_next = nn->cbs.lookup_next !=
1328 NULL((void*)0);
1329 ni->lookup_next_ok = ((!pni && ys->finish) ||
1330 pni->lookup_next_ok) &&
1331 ni->has_lookup_next;
1332 ni->query_specific_entry = is_specific_node;
1333 ni->niters = 0;
1334 ni->nents = 0;
1335
1336 /* this will be our predicate-less xpath */
1337 ys->xpath = nb_op_get_child_path(ys->xpath, sib,
1338 ys->xpath);
1339 } else {
1340 /*
1341 * Reset our xpath to the list node (i.e.,
1342 * remove the entry predicates)
1343 */
1344 if (ni->query_specific_entry) {
1345 flog_warn(EC_LIB_NB_OPERATIONAL_DATA,do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: unexpected state"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 1347, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: unexpected state"), .priority
= (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args = ("__func__"
), }; static const struct xref * const xref_p_481 __attribute__
((used, section("xref_array"))) = &(_xref.xref); zlog_ref
(&_xref, ("%s: unexpected state"), __func__); } while (0)
1346 "%s: unexpected state",do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: unexpected state"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 1347, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: unexpected state"), .priority
= (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args = ("__func__"
), }; static const struct xref * const xref_p_481 __attribute__
((used, section("xref_array"))) = &(_xref.xref); zlog_ref
(&_xref, ("%s: unexpected state"), __func__); } while (0)
1347 __func__)do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: unexpected state"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 1347, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: unexpected state"), .priority
= (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args = ("__func__"
), }; static const struct xref * const xref_p_481 __attribute__
((used, section("xref_array"))) = &(_xref.xref); zlog_ref
(&_xref, ("%s: unexpected state"), __func__); } while (0)
;
1348 }
1349 assert(!ni->query_specific_entry)({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1349, "lib/northbound_oper.c"
, __func__, }, .expr = "!ni->query_specific_entry", }; static
const struct xref * const xref_p_482 __attribute__((used, section
("xref_array"))) = &(_xref.xref); if (__builtin_expect((!
ni->query_specific_entry) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while (!ni->query_specific_entry
); })
;
1350 len = strlen(sib->name) + 1; /* "/sibname" */
1351 if (pni)
1352 len += pni->xpath_len;
1353 darr_setlen(ys->xpath, len + 1)do { ({ static const struct xref_assert _xref __attribute__( (
used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1353, "lib/northbound_oper.c"
, __func__, }, .expr = "(ys->xpath) || !(len + 1)", }; static
const struct xref * const xref_p_483 __attribute__((used, section
("xref_array"))) = &(_xref.xref); if (__builtin_expect(((
ys->xpath) || !(len + 1)) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while ((ys->xpath) || !(len + 1
)); }); if ((ys->xpath)) { ({ static const struct xref_assert
_xref __attribute__( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT
), 1353, "lib/northbound_oper.c", __func__, }, .expr = "(long long)darr_cap(ys->xpath) >= (long long)(len + 1)"
, }; static const struct xref * const xref_p_484 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)(((ys->xpath) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->xpath)) - 1)->cap) >= (long long)(len + 1)) ?
0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0));
} while ((long long)(((ys->xpath) == ((void*)0)) ? 0 : ((
(struct darr_metadata *)(ys->xpath)) - 1)->cap) >= (
long long)(len + 1)); }); (((struct darr_metadata *)(ys->xpath
)) - 1)->len = (len + 1); } } while (0)
;
1354 ys->xpath[len] = 0;
1355 ni->xpath_len = len;
1356 }
1357
1358 /* Need to get keys. */
1359
1360 if (!CHECK_FLAG(nn->flags, F_NB_NODE_KEYLESS_LIST)((nn->flags) & (0x02))) {
1361 ret = nb_callback_get_keys(nn, list_entry,
1362 &ni->keys);
1363 if (ret) {
1364 darr_pop(ys->node_infos)({ uint __len = (((struct darr_metadata *)(ys->node_infos)
) - 1)->len; ({ static const struct xref_assert _xref __attribute__
( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1364, "lib/northbound_oper.c"
, __func__, }, .expr = "__len", }; static const struct xref *
const xref_p_485 __attribute__((used, section("xref_array"))
) = &(_xref.xref); if (__builtin_expect((__len) ? 0 : 1, 0
)) do { _zlog_assert_failed(&_xref, ((void*)0)); } while (
__len); }); do { uint __i = (__len - 1); uint __n = (1); uint
__len = (((ys->node_infos) == ((void*)0)) ? 0 : (((struct
darr_metadata *)(ys->node_infos)) - 1)->len); if (!__len
) break; else if (__i + __n < __len) { memmove(&(ys->
node_infos)[__i], &(ys->node_infos)[__i + __n], sizeof
((ys->node_infos)[0]) * (__len - (__i + __n))); (((struct darr_metadata
*)(ys->node_infos)) - 1)->len = __len - __n; } else ((
(struct darr_metadata *)(ys->node_infos)) - 1)->len = __i
; } while (0); (ys->node_infos)[__len - 1]; })
;
1365 ret = NB_ERR_RESOURCE;
1366 goto done;
1367 }
1368 }
1369 /*
1370 * Append predicates to xpath.
1371 */
1372 len = darr_strlen(ys->xpath)({ uint __size = (((ys->xpath) == ((void*)0)) ? 0 : (((struct
darr_metadata *)(ys->xpath)) - 1)->len); if (__size) __size
-= 1; ({ static const struct xref_assert _xref __attribute__
( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1372, "lib/northbound_oper.c"
, __func__, }, .expr = "!(ys->xpath) || ((char *)(ys->xpath))[__size] == 0"
, }; static const struct xref * const xref_p_486 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((!(ys->xpath) || ((char *)(ys->xpath))[__size] == 0) ?
0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0));
} while (!(ys->xpath) || ((char *)(ys->xpath))[__size]
== 0); }); __size; })
;
1373 if (ni->keys.num) {
1374 yang_get_key_preds(ys->xpath + len, sib,
1375 &ni->keys,
1376 darr_cap(ys->xpath)(((ys->xpath) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->xpath)) - 1)->cap)
- len);
1377 } else {
1378 /* add a position predicate (1s based?) */
1379 darr_ensure_avail(ys->xpath, 10)({ ssize_t need = (ssize_t)(10) - (ssize_t)((((ys->xpath) ==
((void*)0)) ? 0 : (((struct darr_metadata *)(ys->xpath)) -
1)->cap) - (((ys->xpath) == ((void*)0)) ? 0 : (((struct
darr_metadata *)(ys->xpath)) - 1)->len)); if (need >
0) ({ ((ys->xpath)) = __darr_resize((ys->xpath), (((ys
->xpath) == ((void*)0)) ? 0 : (((struct darr_metadata *)(ys
->xpath)) - 1)->cap) + need, sizeof(((ys->xpath))[0]
), MTYPE_DARR); }); (ys->xpath); })
;
1380 snprintf(ys->xpath + len,
1381 darr_cap(ys->xpath)(((ys->xpath) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->xpath)) - 1)->cap)
- len + 1, "[%u]",
1382 ni->nents + 1);
1383 }
1384 darr_setlen(ys->xpath,do { ({ static const struct xref_assert _xref __attribute__( (
used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1385, "lib/northbound_oper.c"
, __func__, }, .expr = "(ys->xpath) || !(strlen(ys->xpath + len) + len + 1)"
, }; static const struct xref * const xref_p_487 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((ys->xpath) || !(strlen(ys->xpath + len) + len + 1)) ?
0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0));
} while ((ys->xpath) || !(strlen(ys->xpath + len) + len
+ 1)); }); if ((ys->xpath)) { ({ static const struct xref_assert
_xref __attribute__( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT
), 1385, "lib/northbound_oper.c", __func__, }, .expr = "(long long)darr_cap(ys->xpath) >= (long long)(strlen(ys->xpath + len) + len + 1)"
, }; static const struct xref * const xref_p_488 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)(((ys->xpath) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->xpath)) - 1)->cap) >= (long long)(strlen(ys->
xpath + len) + len + 1)) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while ((long long)(((ys->xpath
) == ((void*)0)) ? 0 : (((struct darr_metadata *)(ys->xpath
)) - 1)->cap) >= (long long)(strlen(ys->xpath + len)
+ len + 1)); }); (((struct darr_metadata *)(ys->xpath)) -
1)->len = (strlen(ys->xpath + len) + len + 1); } } while
(0)
1385 strlen(ys->xpath + len) + len + 1)do { ({ static const struct xref_assert _xref __attribute__( (
used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1385, "lib/northbound_oper.c"
, __func__, }, .expr = "(ys->xpath) || !(strlen(ys->xpath + len) + len + 1)"
, }; static const struct xref * const xref_p_487 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((ys->xpath) || !(strlen(ys->xpath + len) + len + 1)) ?
0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0));
} while ((ys->xpath) || !(strlen(ys->xpath + len) + len
+ 1)); }); if ((ys->xpath)) { ({ static const struct xref_assert
_xref __attribute__( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT
), 1385, "lib/northbound_oper.c", __func__, }, .expr = "(long long)darr_cap(ys->xpath) >= (long long)(strlen(ys->xpath + len) + len + 1)"
, }; static const struct xref * const xref_p_488 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)(((ys->xpath) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(ys->xpath)) - 1)->cap) >= (long long)(strlen(ys->
xpath + len) + len + 1)) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while ((long long)(((ys->xpath
) == ((void*)0)) ? 0 : (((struct darr_metadata *)(ys->xpath
)) - 1)->cap) >= (long long)(strlen(ys->xpath + len)
+ len + 1)); }); (((struct darr_metadata *)(ys->xpath)) -
1)->len = (strlen(ys->xpath + len) + len + 1); } } while
(0)
;
1386 ni->xpath_len = darr_strlen(ys->xpath)({ uint __size = (((ys->xpath) == ((void*)0)) ? 0 : (((struct
darr_metadata *)(ys->xpath)) - 1)->len); if (__size) __size
-= 1; ({ static const struct xref_assert _xref __attribute__
( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1386, "lib/northbound_oper.c"
, __func__, }, .expr = "!(ys->xpath) || ((char *)(ys->xpath))[__size] == 0"
, }; static const struct xref * const xref_p_489 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((!(ys->xpath) || ((char *)(ys->xpath))[__size] == 0) ?
0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0));
} while (!(ys->xpath) || ((char *)(ys->xpath))[__size]
== 0); }); __size; })
;
1387
1388 /*
1389 * Create the new list entry node.
1390 */
1391
1392 if (!node) {
1393 /* NOTE: can also use lyd_new_list2 here when available */
1394 err = yang_lyd_new_list(ni[-1].inner, sib,
1395 &ni->keys,
1396 (struct lyd_node_inner *
1397 *)&node);
1398 if (err) {
1399 darr_pop(ys->node_infos)({ uint __len = (((struct darr_metadata *)(ys->node_infos)
) - 1)->len; ({ static const struct xref_assert _xref __attribute__
( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1399, "lib/northbound_oper.c"
, __func__, }, .expr = "__len", }; static const struct xref *
const xref_p_490 __attribute__((used, section("xref_array"))
) = &(_xref.xref); if (__builtin_expect((__len) ? 0 : 1, 0
)) do { _zlog_assert_failed(&_xref, ((void*)0)); } while (
__len); }); do { uint __i = (__len - 1); uint __n = (1); uint
__len = (((ys->node_infos) == ((void*)0)) ? 0 : (((struct
darr_metadata *)(ys->node_infos)) - 1)->len); if (!__len
) break; else if (__i + __n < __len) { memmove(&(ys->
node_infos)[__i], &(ys->node_infos)[__i + __n], sizeof
((ys->node_infos)[0]) * (__len - (__i + __n))); (((struct darr_metadata
*)(ys->node_infos)) - 1)->len = __len - __n; } else ((
(struct darr_metadata *)(ys->node_infos)) - 1)->len = __i
; } while (0); (ys->node_infos)[__len - 1]; })
;
1400 ret = NB_ERR_RESOURCE;
1401 goto done;
1402 }
1403 }
1404
1405 /*
1406 * Save the new list entry with the list node info
1407 */
1408 ni->inner = (struct lyd_node_inner *)node;
1409 ni->schema = node->schema;
1410 ni->list_entry = list_entry;
1411 ni->niters += 1;
1412 ni->nents += 1;
1413
1414 /* Skip over the key children, they've been created. */
1415 sib = nb_op_sib_first(ys, sib);
1416 continue;
1417
1418 case LYS_CHOICE0x0002:
1419 /* Container type with no data */
1420 /*FALLTHROUGH*/
1421 case LYS_CASE0x0080:
1422 /* Container type with no data */
1423 /*FALLTHROUGH*/
1424 default:
1425 /*FALLTHROUGH*/
1426 case LYS_ANYXML0x0020:
1427 case LYS_ANYDATA0x0060:
1428 /* These schema types are not currently handled */
1429 flog_warn(EC_LIB_NB_OPERATIONAL_DATA,do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: unsupported schema node type: %s"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 1431, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: unsupported schema node type: %s"
), .priority = (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args
= ("__func__, lys_nodetype2str(sib->nodetype)"), }; static
const struct xref * const xref_p_491 __attribute__((used, section
("xref_array"))) = &(_xref.xref); zlog_ref(&_xref, ("%s: unsupported schema node type: %s"
), __func__, lys_nodetype2str(sib->nodetype)); } while (0)
1430 "%s: unsupported schema node type: %s",do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: unsupported schema node type: %s"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 1431, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: unsupported schema node type: %s"
), .priority = (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args
= ("__func__, lys_nodetype2str(sib->nodetype)"), }; static
const struct xref * const xref_p_491 __attribute__((used, section
("xref_array"))) = &(_xref.xref); zlog_ref(&_xref, ("%s: unsupported schema node type: %s"
), __func__, lys_nodetype2str(sib->nodetype)); } while (0)
1431 __func__, lys_nodetype2str(sib->nodetype))do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: unsupported schema node type: %s"
), .hashu32 = {(4), (EC_LIB_NB_OPERATIONAL_DATA)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 1431, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: unsupported schema node type: %s"
), .priority = (4), .ec = (EC_LIB_NB_OPERATIONAL_DATA), .args
= ("__func__, lys_nodetype2str(sib->nodetype)"), }; static
const struct xref * const xref_p_491 __attribute__((used, section
("xref_array"))) = &(_xref.xref); zlog_ref(&_xref, ("%s: unsupported schema node type: %s"
), __func__, lys_nodetype2str(sib->nodetype)); } while (0)
;
1432 sib = nb_op_sib_next(ys, sib);
1433 continue;
1434 }
1435 }
1436
1437done:
1438 darr_free(xpath_child)do { if ((xpath_child)) { struct darr_metadata *__meta = (((struct
darr_metadata *)(xpath_child)) - 1); do { qfree(__meta->mtype
, __meta); __meta = ((void*)0); } while (0); (xpath_child) = (
(void*)0); } } while (0)
;
1439 return ret;
1440}
1441
1442static void nb_op_walk_continue(struct event *thread)
1443{
1444 struct nb_op_yield_state *ys = EVENT_ARG(thread)((thread)->arg);
1445 enum nb_error ret = NB_OK;
1446
1447 DEBUGD(&nb_dbg_cbs_state, "northbound oper-state: resuming %s",do { if (((__c11_atomic_load(&(&nb_dbg_cbs_state)->
flags, memory_order_seq_cst)) & (((0x01000000 | 0x02000000
))&(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("northbound oper-state: resuming %s"), .hashu32 = {
(7), (0)}, }, }; static const struct xref_logmsg _xref __attribute__
( (used)) = { .xref = { (&_xrefdata.xrefdata), (XREFT_LOGMSG
), 1448, "lib/northbound_oper.c", __func__, }, .fmtstring = (
"northbound oper-state: resuming %s"), .priority = (7), .ec =
(0), .args = ("ys->xpath"), }; static const struct xref *
const xref_p_492 __attribute__((used, section("xref_array"))
) = &(_xref.xref); zlog_ref(&_xref, ("northbound oper-state: resuming %s"
), ys->xpath); } while (0); } while (0)
1448 ys->xpath)do { if (((__c11_atomic_load(&(&nb_dbg_cbs_state)->
flags, memory_order_seq_cst)) & (((0x01000000 | 0x02000000
))&(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("northbound oper-state: resuming %s"), .hashu32 = {
(7), (0)}, }, }; static const struct xref_logmsg _xref __attribute__
( (used)) = { .xref = { (&_xrefdata.xrefdata), (XREFT_LOGMSG
), 1448, "lib/northbound_oper.c", __func__, }, .fmtstring = (
"northbound oper-state: resuming %s"), .priority = (7), .ec =
(0), .args = ("ys->xpath"), }; static const struct xref *
const xref_p_492 __attribute__((used, section("xref_array"))
) = &(_xref.xref); zlog_ref(&_xref, ("northbound oper-state: resuming %s"
), ys->xpath); } while (0); } while (0)
;
1449
1450 nb_op_resume_data_tree(ys);
1451
1452 /* if we've popped past the walk start level we're done */
1453 if (darr_lasti(ys->node_infos)((((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->node_infos)) - 1)->len) - 1)
< ys->walk_root_level)
1454 goto finish;
1455
1456 /* otherwise we are at a resumable node */
1457 assert(darr_last(ys->node_infos)->has_lookup_next)({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1457, "lib/northbound_oper.c"
, __func__, }, .expr = "darr_last(ys->node_infos)->has_lookup_next"
, }; static const struct xref * const xref_p_493 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((({ uint __len = (((ys->node_infos) == ((void*)0)) ? 0 : (
((struct darr_metadata *)(ys->node_infos)) - 1)->len); (
(__len > 0) ? &(ys->node_infos)[__len - 1] : ((void
*)0)); })->has_lookup_next) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while (({ uint __len = (((ys->
node_infos) == ((void*)0)) ? 0 : (((struct darr_metadata *)(ys
->node_infos)) - 1)->len); ((__len > 0) ? &(ys->
node_infos)[__len - 1] : ((void*)0)); })->has_lookup_next)
; })
;
1458
1459 ret = __walk(ys, true1);
1460 if (ret == NB_YIELD) {
1461 if (nb_op_yield(ys) != NB_OK) {
1462 if (ys->should_batch)
1463 goto stopped;
1464 else
1465 goto finish;
1466 }
1467 return;
1468 }
1469finish:
1470 (*ys->finish)(ys_root_node(ys), ys->finish_arg, ret);
1471stopped:
1472 nb_op_free_yield_state(ys, false0);
1473}
1474
1475static void __free_siblings(struct lyd_node *this)
1476{
1477 struct lyd_node *next, *sib;
1478 uint count = 0;
1479
1480 LY_LIST_FOR_SAFE(lyd_first_sibling(this), next, sib)for ((sib) = (lyd_first_sibling(this)); (sib) ? (next = (sib)
->next, 1) : 0; (sib) = (next))
1481 {
1482 if (lysc_is_key(sib->schema)((!sib->schema || (sib->schema->nodetype != 0x0004) ||
!(sib->schema->flags & 0x0100)) ? 0 : 1)
)
1483 continue;
1484 if (sib == this)
1485 continue;
1486 lyd_free_tree(sib);
1487 count++;
1488 }
1489 DEBUGD(&nb_dbg_events, "NB oper-state: deleted %u siblings", count)do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("NB oper-state: deleted %u siblings"), .hashu32 = {
(7), (0)}, }, }; static const struct xref_logmsg _xref __attribute__
( (used)) = { .xref = { (&_xrefdata.xrefdata), (XREFT_LOGMSG
), 1489, "lib/northbound_oper.c", __func__, }, .fmtstring = (
"NB oper-state: deleted %u siblings"), .priority = (7), .ec =
(0), .args = ("count"), }; static const struct xref * const xref_p_494
__attribute__((used, section("xref_array"))) = &(_xref.xref
); zlog_ref(&_xref, ("NB oper-state: deleted %u siblings"
), count); } while (0); } while (0)
;
1490}
1491
1492/*
1493 * Trim Algorithm:
1494 *
1495 * Delete final lookup-next list node and subtree, leave stack slot with keys.
1496 *
1497 * Then walking up the stack, delete all siblings except:
1498 * 1. right-most container or list node (must be lookup-next by design)
1499 * 2. keys supporting existing parent list node.
1500 *
1501 * NOTE the topmost node on the stack will be the final lookup-nexxt list node,
1502 * as we only yield on lookup-next list nodes.
1503 *
1504 */
1505static void nb_op_trim_yield_state(struct nb_op_yield_state *ys)
1506{
1507 struct nb_op_node_info *ni;
1508 int i = darr_lasti(ys->node_infos)((((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->node_infos)) - 1)->len) - 1)
;
1509
1510 assert(i >= 0)({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1510, "lib/northbound_oper.c"
, __func__, }, .expr = "i >= 0", }; static const struct xref
* const xref_p_495 __attribute__((used, section("xref_array"
))) = &(_xref.xref); if (__builtin_expect((i >= 0) ? 0
: 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0)); }
while (i >= 0); })
;
1511
1512 DEBUGD(&nb_dbg_events, "NB oper-state: start trimming: top: %d", i)do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("NB oper-state: start trimming: top: %d"), .hashu32
= {(7), (0)}, }, }; static const struct xref_logmsg _xref __attribute__
( (used)) = { .xref = { (&_xrefdata.xrefdata), (XREFT_LOGMSG
), 1512, "lib/northbound_oper.c", __func__, }, .fmtstring = (
"NB oper-state: start trimming: top: %d"), .priority = (7), .
ec = (0), .args = ("i"), }; static const struct xref * const xref_p_496
__attribute__((used, section("xref_array"))) = &(_xref.xref
); zlog_ref(&_xref, ("NB oper-state: start trimming: top: %d"
), i); } while (0); } while (0)
;
1513
1514 ni = &ys->node_infos[i];
1515 assert(ni->has_lookup_next)({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1515, "lib/northbound_oper.c"
, __func__, }, .expr = "ni->has_lookup_next", }; static const
struct xref * const xref_p_497 __attribute__((used, section(
"xref_array"))) = &(_xref.xref); if (__builtin_expect((ni
->has_lookup_next) ? 0 : 1, 0)) do { _zlog_assert_failed(&
_xref, ((void*)0)); } while (ni->has_lookup_next); })
;
1516
1517 DEBUGD(&nb_dbg_events, "NB oper-state: deleting tree at level %d", i)do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("NB oper-state: deleting tree at level %d"), .hashu32
= {(7), (0)}, }, }; static const struct xref_logmsg _xref __attribute__
( (used)) = { .xref = { (&_xrefdata.xrefdata), (XREFT_LOGMSG
), 1517, "lib/northbound_oper.c", __func__, }, .fmtstring = (
"NB oper-state: deleting tree at level %d"), .priority = (7),
.ec = (0), .args = ("i"), }; static const struct xref * const
xref_p_498 __attribute__((used, section("xref_array"))) = &
(_xref.xref); zlog_ref(&_xref, ("NB oper-state: deleting tree at level %d"
), i); } while (0); } while (0)
;
1518 __free_siblings(&ni->inner->node);
1519 lyd_free_tree(&ni->inner->node);
1520 ni->inner = NULL((void*)0);
1521
1522 while (--i > 0) {
1523 DEBUGD(&nb_dbg_events,do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("NB oper-state: deleting siblings at level: %d"), .
hashu32 = {(7), (0)}, }, }; static const struct xref_logmsg _xref
__attribute__( (used)) = { .xref = { (&_xrefdata.xrefdata
), (XREFT_LOGMSG), 1524, "lib/northbound_oper.c", __func__, }
, .fmtstring = ("NB oper-state: deleting siblings at level: %d"
), .priority = (7), .ec = (0), .args = ("i"), }; static const
struct xref * const xref_p_499 __attribute__((used, section(
"xref_array"))) = &(_xref.xref); zlog_ref(&_xref, ("NB oper-state: deleting siblings at level: %d"
), i); } while (0); } while (0)
1524 "NB oper-state: deleting siblings at level: %d", i)do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("NB oper-state: deleting siblings at level: %d"), .
hashu32 = {(7), (0)}, }, }; static const struct xref_logmsg _xref
__attribute__( (used)) = { .xref = { (&_xrefdata.xrefdata
), (XREFT_LOGMSG), 1524, "lib/northbound_oper.c", __func__, }
, .fmtstring = ("NB oper-state: deleting siblings at level: %d"
), .priority = (7), .ec = (0), .args = ("i"), }; static const
struct xref * const xref_p_499 __attribute__((used, section(
"xref_array"))) = &(_xref.xref); zlog_ref(&_xref, ("NB oper-state: deleting siblings at level: %d"
), i); } while (0); } while (0)
;
1525 __free_siblings(&ys->node_infos[i].inner->node);
1526 }
1527 DEBUGD(&nb_dbg_events, "NB oper-state: stop trimming: new top: %d",do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("NB oper-state: stop trimming: new top: %d"), .hashu32
= {(7), (0)}, }, }; static const struct xref_logmsg _xref __attribute__
( (used)) = { .xref = { (&_xrefdata.xrefdata), (XREFT_LOGMSG
), 1528, "lib/northbound_oper.c", __func__, }, .fmtstring = (
"NB oper-state: stop trimming: new top: %d"), .priority = (7)
, .ec = (0), .args = ("(int)((((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct darr_metadata *)(ys->node_infos)) - 1)->len) - 1)"
), }; static const struct xref * const xref_p_500 __attribute__
((used, section("xref_array"))) = &(_xref.xref); zlog_ref
(&_xref, ("NB oper-state: stop trimming: new top: %d"), (
int)((((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->node_infos)) - 1)->len) - 1)); } while
(0); } while (0)
1528 (int)darr_lasti(ys->node_infos))do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("NB oper-state: stop trimming: new top: %d"), .hashu32
= {(7), (0)}, }, }; static const struct xref_logmsg _xref __attribute__
( (used)) = { .xref = { (&_xrefdata.xrefdata), (XREFT_LOGMSG
), 1528, "lib/northbound_oper.c", __func__, }, .fmtstring = (
"NB oper-state: stop trimming: new top: %d"), .priority = (7)
, .ec = (0), .args = ("(int)((((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct darr_metadata *)(ys->node_infos)) - 1)->len) - 1)"
), }; static const struct xref * const xref_p_500 __attribute__
((used, section("xref_array"))) = &(_xref.xref); zlog_ref
(&_xref, ("NB oper-state: stop trimming: new top: %d"), (
int)((((ys->node_infos) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->node_infos)) - 1)->len) - 1)); } while
(0); } while (0)
;
1529}
1530
1531static enum nb_error nb_op_yield(struct nb_op_yield_state *ys)
1532{
1533 enum nb_error ret;
1534 unsigned long min_us = MAX(1, NB_OP_WALK_INTERVAL_US / 50000)(((1)>((50 * 1000) / 50000))?(1):((50 * 1000) / 50000));
1535 struct timeval tv = { .tv_sec = 0, .tv_usec = min_us };
1536
1537 DEBUGD(&nb_dbg_events, "NB oper-state: yielding %s for %lus (should_batch %d)",do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("NB oper-state: yielding %s for %lus (should_batch %d)"
), .hashu32 = {(7), (0)}, }, }; static const struct xref_logmsg
_xref __attribute__( (used)) = { .xref = { (&_xrefdata.xrefdata
), (XREFT_LOGMSG), 1538, "lib/northbound_oper.c", __func__, }
, .fmtstring = ("NB oper-state: yielding %s for %lus (should_batch %d)"
), .priority = (7), .ec = (0), .args = ("ys->xpath, tv.tv_usec, ys->should_batch"
), }; static const struct xref * const xref_p_501 __attribute__
((used, section("xref_array"))) = &(_xref.xref); zlog_ref
(&_xref, ("NB oper-state: yielding %s for %lus (should_batch %d)"
), ys->xpath, tv.tv_usec, ys->should_batch); } while (0
); } while (0)
1538 ys->xpath, tv.tv_usec, ys->should_batch)do { if (((__c11_atomic_load(&(&nb_dbg_events)->flags
, memory_order_seq_cst)) & (((0x01000000 | 0x02000000))&
(0x01000000 | 0x02000000)))) do { static struct xrefdata_logmsg
_xrefdata = { .xrefdata = { .xref = ((void*)0), .uid = {}, .
hashstr = ("NB oper-state: yielding %s for %lus (should_batch %d)"
), .hashu32 = {(7), (0)}, }, }; static const struct xref_logmsg
_xref __attribute__( (used)) = { .xref = { (&_xrefdata.xrefdata
), (XREFT_LOGMSG), 1538, "lib/northbound_oper.c", __func__, }
, .fmtstring = ("NB oper-state: yielding %s for %lus (should_batch %d)"
), .priority = (7), .ec = (0), .args = ("ys->xpath, tv.tv_usec, ys->should_batch"
), }; static const struct xref * const xref_p_501 __attribute__
((used, section("xref_array"))) = &(_xref.xref); zlog_ref
(&_xref, ("NB oper-state: yielding %s for %lus (should_batch %d)"
), ys->xpath, tv.tv_usec, ys->should_batch); } while (0
); } while (0)
;
1539
1540 if (ys->should_batch) {
1541 /*
1542 * TODO: add ability of finish to influence the timer.
1543 * This will allow, for example, flow control based on how long
1544 * it takes finish to process the batch.
1545 */
1546 ret = (*ys->finish)(ys_root_node(ys), ys->finish_arg, NB_YIELD);
1547 if (ret != NB_OK)
1548 return ret;
1549 /* now trim out that data we just "finished" */
1550 nb_op_trim_yield_state(ys);
1551
1552 }
1553
1554 event_add_timer_tv(event_loop, nb_op_walk_continue, ys, &tv,({ static const struct xref_eventsched _xref __attribute__( (
used)) = { .xref = { (((void*)0)), (XREFT_EVENTSCHED), 1555, "lib/northbound_oper.c"
, __func__, }, .funcname = "nb_op_walk_continue", .dest = "&ys->walk_ev"
, .event_type = EVENT_TIMER, }; static const struct xref * const
xref_p_502 __attribute__((used, section("xref_array"))) = &
(_xref.xref); _event_add_timer_tv(&_xref, event_loop, nb_op_walk_continue
, ys, &tv, &ys->walk_ev); })
1555 &ys->walk_ev)({ static const struct xref_eventsched _xref __attribute__( (
used)) = { .xref = { (((void*)0)), (XREFT_EVENTSCHED), 1555, "lib/northbound_oper.c"
, __func__, }, .funcname = "nb_op_walk_continue", .dest = "&ys->walk_ev"
, .event_type = EVENT_TIMER, }; static const struct xref * const
xref_p_502 __attribute__((used, section("xref_array"))) = &
(_xref.xref); _event_add_timer_tv(&_xref, event_loop, nb_op_walk_continue
, ys, &tv, &ys->walk_ev); })
;
1556 return NB_OK;
1557}
1558
1559static enum nb_error nb_op_ys_init_schema_path(struct nb_op_yield_state *ys,
1560 struct nb_node **last)
1561{
1562 const struct lysc_node *sn;
1563 struct nb_node *nblast;
1564 char *s, *s2;
1565 int count;
1566 uint i;
1567
1568 /*
1569 * Get the schema node stack for the entire query string
1570 *
1571 * The user might pass in something like "//metric" which may resolve to
1572 * more than one schema node ("trunks"). nb_node_find() returns a single
1573 * node though. We should expand the functionality to get the set of
1574 * nodes that matches the xpath (not path) query and save that set in
1575 * the yield state. Then we should do a walk using the users query
1576 * string over each schema trunk in the set.
1577 */
1578 nblast = nb_node_find(ys->xpath);
1579 if (!nblast) {
1
Assuming 'nblast' is non-null
2
Taking false branch
1580 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: unknown data path: %s"
), .hashu32 = {(4), (EC_LIB_YANG_UNKNOWN_DATA_PATH)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 1581, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: unknown data path: %s"), .priority
= (4), .ec = (EC_LIB_YANG_UNKNOWN_DATA_PATH), .args = ("__func__, ys->xpath"
), }; static const struct xref * const xref_p_503 __attribute__
((used, section("xref_array"))) = &(_xref.xref); zlog_ref
(&_xref, ("%s: unknown data path: %s"), __func__, ys->
xpath); } while (0)
1581 "%s: unknown data path: %s", __func__, ys->xpath)do { static struct xrefdata_logmsg _xrefdata = { .xrefdata = {
.xref = ((void*)0), .uid = {}, .hashstr = ("%s: unknown data path: %s"
), .hashu32 = {(4), (EC_LIB_YANG_UNKNOWN_DATA_PATH)}, }, }; static
const struct xref_logmsg _xref __attribute__( (used)) = { .xref
= { (&_xrefdata.xrefdata), (XREFT_LOGMSG), 1581, "lib/northbound_oper.c"
, __func__, }, .fmtstring = ("%s: unknown data path: %s"), .priority
= (4), .ec = (EC_LIB_YANG_UNKNOWN_DATA_PATH), .args = ("__func__, ys->xpath"
), }; static const struct xref * const xref_p_503 __attribute__
((used, section("xref_array"))) = &(_xref.xref); zlog_ref
(&_xref, ("%s: unknown data path: %s"), __func__, ys->
xpath); } while (0)
;
1582 return NB_ERR;
1583 }
1584 *last = nblast;
1585
1586 /*
1587 * Create a stack of schema nodes one element per node in the query
1588 * path, only the top (last) element may be a non-container type.
1589 *
1590 * NOTE: appears to be a bug in nb_node linkage where parent can be NULL,
1591 * or I'm misunderstanding the code, in any case we use the libyang
1592 * linkage to walk which works fine.
1593 *
1594 * XXX: we don't actually support choice/case yet, they are container
1595 * types in the libyang schema, but won't be in data so our length
1596 * checking gets messed up.
1597 */
1598 for (sn = nblast->snode, count = 0; sn; count++, sn = sn->parent)
1599 if (sn != nblast->snode)
1600 assert(CHECK_FLAG(sn->nodetype,({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1602, "lib/northbound_oper.c"
, __func__, }, .expr = "CHECK_FLAG(sn->nodetype, LYS_CONTAINER | LYS_LIST | LYS_CHOICE | LYS_CASE)"
, }; static const struct xref * const xref_p_504 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((((sn->nodetype) & (0x0001 | 0x0010 | 0x0002 | 0x0080
))) ? 0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*
)0)); } while (((sn->nodetype) & (0x0001 | 0x0010 | 0x0002
| 0x0080))); })
1601 LYS_CONTAINER | LYS_LIST |({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1602, "lib/northbound_oper.c"
, __func__, }, .expr = "CHECK_FLAG(sn->nodetype, LYS_CONTAINER | LYS_LIST | LYS_CHOICE | LYS_CASE)"
, }; static const struct xref * const xref_p_504 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((((sn->nodetype) & (0x0001 | 0x0010 | 0x0002 | 0x0080
))) ? 0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*
)0)); } while (((sn->nodetype) & (0x0001 | 0x0010 | 0x0002
| 0x0080))); })
1602 LYS_CHOICE | LYS_CASE))({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1602, "lib/northbound_oper.c"
, __func__, }, .expr = "CHECK_FLAG(sn->nodetype, LYS_CONTAINER | LYS_LIST | LYS_CHOICE | LYS_CASE)"
, }; static const struct xref * const xref_p_504 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
((((sn->nodetype) & (0x0001 | 0x0010 | 0x0002 | 0x0080
))) ? 0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*
)0)); } while (((sn->nodetype) & (0x0001 | 0x0010 | 0x0002
| 0x0080))); })
;
1603 /* create our arrays */
1604 darr_append_n(ys->schema_path, count)({ uint __len = (((ys->schema_path) == ((void*)0)) ? 0 : (
((struct darr_metadata *)(ys->schema_path)) - 1)->len);
({ if ((ssize_t)(((ys->schema_path) == ((void*)0)) ? 0 : (
((struct darr_metadata *)(ys->schema_path)) - 1)->cap) <
(ssize_t)(__len + (count))) ({ ((ys->schema_path)) = __darr_resize
((ys->schema_path), (__len + (count)), sizeof(((ys->schema_path
))[0]), MTYPE_DARR); }); (ys->schema_path); }); (((struct darr_metadata
*)(ys->schema_path)) - 1)->len = __len + (count); if (
0) memset(&(ys->schema_path)[__len], 0, (count)*sizeof
((ys->schema_path)[0])); &(ys->schema_path)[__len];
})
;
3
Loop condition is false. Execution continues on line 1604
4
Assuming field 'schema_path' is not equal to null
5
'?' condition is false
6
'?' condition is false
7
Assuming the condition is false
8
Taking false branch
9
Taking false branch
1605 darr_append_n(ys->query_tokens, count)({ uint __len = (((ys->query_tokens) == ((void*)0)) ? 0 : (
((struct darr_metadata *)(ys->query_tokens)) - 1)->len)
; ({ if ((ssize_t)(((ys->query_tokens) == ((void*)0)) ? 0 :
(((struct darr_metadata *)(ys->query_tokens)) - 1)->cap
) < (ssize_t)(__len + (count))) ({ ((ys->query_tokens))
= __darr_resize((ys->query_tokens), (__len + (count)), sizeof
(((ys->query_tokens))[0]), MTYPE_DARR); }); (ys->query_tokens
); }); (((struct darr_metadata *)(ys->query_tokens)) - 1)->
len = __len + (count); if (0) memset(&(ys->query_tokens
)[__len], 0, (count)*sizeof((ys->query_tokens)[0])); &
(ys->query_tokens)[__len]; })
;
10
Assuming field 'query_tokens' is not equal to null
11
'?' condition is false
12
'?' condition is false
13
Assuming the condition is false
14
Taking false branch
15
Taking false branch
1606 for (sn = nblast->snode; sn; sn = sn->parent)
16
Loop condition is false. Execution continues on line 1614
1607 ys->schema_path[--count] = sn;
1608
1609 /*
1610 * Now tokenize the query string and get pointers to each token
1611 */
1612
1613 /* Get copy of query string start after initial '/'s */
1614 s = ys->xpath;
1615 while (*s && *s == '/')
17
Assuming the condition is true
18
Assuming the condition is true
19
Loop condition is true. Entering loop body
20
Assuming the condition is false
1616 s++;
1617 ys->query_tokstr = darr_strdup(s)({ size_t __size = strlen(s) + 1; char *__s = ((void*)0); ({ if
((ssize_t)(((__s) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(__s)) - 1)->cap) < (ssize_t)(((ssize_t)(0) > (ssize_t
)__size) ? (size_t)(0) : __size)) ({ ((__s)) = __darr_resize(
(__s), (((ssize_t)(0) > (ssize_t)__size) ? (size_t)(0) : __size
), sizeof(((__s))[0]), MTYPE_DARR_STR); }); (__s); }); strlcpy
(__s, (s), (((__s) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(__s)) - 1)->cap)); do { ({ static const struct xref_assert
_xref __attribute__( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT
), 1617, "lib/northbound_oper.c", __func__, }, .expr = "(__s) || !((size_t)__size)"
, }; static const struct xref * const xref_p_505 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((__s) || !((size_t)__size)) ? 0 : 1, 0)) do { _zlog_assert_failed
(&_xref, ((void*)0)); } while ((__s) || !((size_t)__size)
); }); if ((__s)) { ({ static const struct xref_assert _xref __attribute__
( (used)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1617, "lib/northbound_oper.c"
, __func__, }, .expr = "(long long)darr_cap(__s) >= (long long)((size_t)__size)"
, }; static const struct xref * const xref_p_506 __attribute__
((used, section("xref_array"))) = &(_xref.xref); if (__builtin_expect
(((long long)(((__s) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(__s)) - 1)->cap) >= (long long)((size_t)__size)) ? 0
: 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0)); }
while ((long long)(((__s) == ((void*)0)) ? 0 : (((struct darr_metadata
*)(__s)) - 1)->cap) >= (long long)((size_t)__size)); }
); (((struct darr_metadata *)(__s)) - 1)->len = ((size_t)__size
); } } while (0); __s; })
;
21
'__s' initialized to a null pointer value
22
'?' condition is true
23
'?' condition is false
24
Assuming the condition is false
25
Taking false branch
26
'?' condition is true
27
Null pointer passed as 1st argument to string copy function
1618 s = ys->query_tokstr;
1619
1620 darr_foreach_i (ys->schema_path, i)for ((i) = 0; (i) < (((ys->schema_path) == ((void*)0)) ?
0 : (((struct darr_metadata *)(ys->schema_path)) - 1)->
len); (i)++)
{
1621 const char *modname = ys->schema_path[i]->module->name;
1622 const char *name = ys->schema_path[i]->name;
1623 int nlen = strlen(name);
1624 int mnlen = 0;
1625
1626 while (true1) {
1627 s2 = strstr(s, name);
1628 if (!s2)
1629 goto error;
1630
1631 if (s2[-1] == ':') {
1632 mnlen = strlen(modname) + 1;
1633 if (ys->query_tokstr > s2 - mnlen ||
1634 strncmp(s2 - mnlen, modname, mnlen - 1))
1635 goto error;
1636 s2 -= mnlen;
1637 nlen += mnlen;
1638 }
1639
1640 s = s2;
1641 if ((i == 0 || s[-1] == '/') &&
1642 (s[nlen] == 0 || s[nlen] == '[' || s[nlen] == '/'))
1643 break;
1644 /*
1645 * Advance past the incorrect match, must have been
1646 * part of previous predicate.
1647 */
1648 s += nlen;
1649 }
1650
1651 /* NUL terminate previous token and save this one */
1652 if (i > 0)
1653 s[-1] = 0;
1654 ys->query_tokens[i] = s;
1655 s += nlen;
1656 }
1657
1658 /* NOTE: need to subtract choice/case nodes when these are supported */
1659 ys->query_base_level = darr_lasti(ys->schema_path)((((ys->schema_path) == ((void*)0)) ? 0 : (ssize_t)(((struct
darr_metadata *)(ys->schema_path)) - 1)->len) - 1)
;
1660
1661 return NB_OK;
1662
1663error:
1664 darr_free(ys->query_tokstr)do { if ((ys->query_tokstr)) { struct darr_metadata *__meta
= (((struct darr_metadata *)(ys->query_tokstr)) - 1); do {
qfree(__meta->mtype, __meta); __meta = ((void*)0); } while
(0); (ys->query_tokstr) = ((void*)0); } } while (0)
;
1665 darr_free(ys->schema_path)do { if ((ys->schema_path)) { struct darr_metadata *__meta
= (((struct darr_metadata *)(ys->schema_path)) - 1); do {
qfree(__meta->mtype, __meta); __meta = ((void*)0); } while
(0); (ys->schema_path) = ((void*)0); } } while (0)
;
1666 darr_free(ys->query_tokens)do { if ((ys->query_tokens)) { struct darr_metadata *__meta
= (((struct darr_metadata *)(ys->query_tokens)) - 1); do {
qfree(__meta->mtype, __meta); __meta = ((void*)0); } while
(0); (ys->query_tokens) = ((void*)0); } } while (0)
;
1667 return NB_ERR;
1668}
1669
1670
1671/**
1672 * nb_op_walk_start() - Start walking oper-state directed by query string.
1673 * @ys: partially initialized yield state for this walk.
1674 *
1675 */
1676static enum nb_error nb_op_walk_start(struct nb_op_yield_state *ys)
1677{
1678 struct nb_node *nblast;
1679 enum nb_error ret;
1680
1681 /*
1682 * Get nb_node path (stack) corresponding to the xpath query
1683 */
1684 ret = nb_op_ys_init_schema_path(ys, &nblast);
1685 if (ret != NB_OK)
1686 return ret;
1687
1688
1689 /*
1690 * Get the node_info path (stack) corresponding to the uniquely
1691 * resolvable data nodes from the beginning of the xpath query.
1692 */
1693 ret = nb_op_ys_init_node_infos(ys);
1694 if (ret != NB_OK)
1695 return ret;
1696
1697 return __walk(ys, false0);
1698}
1699
1700
1701void *nb_oper_walk(const char *xpath, struct yang_translator *translator,
1702 uint32_t flags, bool_Bool should_batch, nb_oper_data_cb cb,
1703 void *cb_arg, nb_oper_data_finish_cb finish, void *finish_arg)
1704{
1705 struct nb_op_yield_state *ys;
1706 enum nb_error ret;
1707
1708 ys = nb_op_create_yield_state(xpath, translator, flags, should_batch,
1709 cb, cb_arg, finish, finish_arg);
1710
1711 ret = nb_op_walk_start(ys);
1712 if (ret == NB_YIELD) {
1713 if (nb_op_yield(ys) != NB_OK) {
1714 if (ys->should_batch)
1715 goto stopped;
1716 else
1717 goto finish;
1718 }
1719 return ys;
1720 }
1721finish:
1722 (void)(*ys->finish)(ys_root_node(ys), ys->finish_arg, ret);
1723stopped:
1724 nb_op_free_yield_state(ys, false0);
1725 return NULL((void*)0);
1726}
1727
1728
1729void nb_oper_cancel_walk(void *walk)
1730{
1731 if (walk)
1732 nb_op_free_yield_state(walk, false0);
1733}
1734
1735
1736void nb_oper_cancel_all_walks(void)
1737{
1738 struct nb_op_yield_state *ys;
1739
1740 frr_each_safe (nb_op_walks, &nb_op_walks, ys)for (typeof(nb_op_walks_next_safe(&nb_op_walks, ((void*)0
))) nb_op_walks_safe = nb_op_walks_next_safe(&nb_op_walks
, (ys = nb_op_walks_first(&nb_op_walks))); ys; ys = nb_op_walks_safe
, nb_op_walks_safe = nb_op_walks_next_safe(&nb_op_walks, nb_op_walks_safe
))
1741 nb_oper_cancel_walk(ys);
1742}
1743
1744
1745/*
1746 * The old API -- remove when we've update the users to yielding.
1747 */
1748enum nb_error nb_oper_iterate_legacy(const char *xpath,
1749 struct yang_translator *translator,
1750 uint32_t flags, nb_oper_data_cb cb,
1751 void *cb_arg, struct lyd_node **tree)
1752{
1753 struct nb_op_yield_state *ys;
1754 enum nb_error ret;
1755
1756 ys = nb_op_create_yield_state(xpath, translator, flags, false0, cb,
1757 cb_arg, NULL((void*)0), NULL((void*)0));
1758
1759 ret = nb_op_walk_start(ys);
1760 assert(ret != NB_YIELD)({ static const struct xref_assert _xref __attribute__( (used
)) = { .xref = { (((void*)0)), (XREFT_ASSERT), 1760, "lib/northbound_oper.c"
, __func__, }, .expr = "ret != NB_YIELD", }; static const struct
xref * const xref_p_507 __attribute__((used, section("xref_array"
))) = &(_xref.xref); if (__builtin_expect((ret != NB_YIELD
) ? 0 : 1, 0)) do { _zlog_assert_failed(&_xref, ((void*)0
)); } while (ret != NB_YIELD); })
;
1761
1762 if (tree && ret == NB_OK)
1763 *tree = ys_root_node(ys);
1764 else {
1765 if (ys_root_node(ys))
1766 yang_dnode_free(ys_root_node(ys));
1767 if (tree)
1768 *tree = NULL((void*)0);
1769 }
1770
1771 nb_op_free_yield_state(ys, true1);
1772 return ret;
1773}
1774
1775void nb_oper_init(struct event_loop *loop)
1776{
1777 event_loop = loop;
1778 nb_op_walks_init(&nb_op_walks);
1779}
1780
1781void nb_oper_terminate(void)
1782{
1783 nb_oper_cancel_all_walks();
1784}