(struct ovs_list) { (struct ovs_list *) (uintptr_t) 0xccccccccccccccccULL, \
(struct ovs_list *) (uintptr_t) 0xccccccccccccccccULL }
-static inline void list_init(struct ovs_list *);
-static inline void list_poison(struct ovs_list *);
+static inline void ovs_list_init(struct ovs_list *);
+static inline void ovs_list_poison(struct ovs_list *);
/* List insertion. */
-static inline void list_insert(struct ovs_list *, struct ovs_list *);
-static inline void list_splice(struct ovs_list *before, struct ovs_list *first,
+static inline void ovs_list_insert(struct ovs_list *, struct ovs_list *);
+static inline void ovs_list_splice(struct ovs_list *before, struct ovs_list *first,
struct ovs_list *last);
-static inline void list_push_front(struct ovs_list *, struct ovs_list *);
-static inline void list_push_back(struct ovs_list *, struct ovs_list *);
-static inline void list_replace(struct ovs_list *, const struct ovs_list *);
-static inline void list_moved(struct ovs_list *, const struct ovs_list *orig);
-static inline void list_move(struct ovs_list *dst, struct ovs_list *src);
+static inline void ovs_list_push_front(struct ovs_list *, struct ovs_list *);
+static inline void ovs_list_push_back(struct ovs_list *, struct ovs_list *);
+static inline void ovs_list_replace(struct ovs_list *, const struct ovs_list *);
+static inline void ovs_list_moved(struct ovs_list *, const struct ovs_list *orig);
+static inline void ovs_list_move(struct ovs_list *dst, struct ovs_list *src);
/* List removal. */
-static inline struct ovs_list *list_remove(struct ovs_list *);
-static inline struct ovs_list *list_pop_front(struct ovs_list *);
-static inline struct ovs_list *list_pop_back(struct ovs_list *);
+static inline struct ovs_list *ovs_list_remove(struct ovs_list *);
+static inline struct ovs_list *ovs_list_pop_front(struct ovs_list *);
+static inline struct ovs_list *ovs_list_pop_back(struct ovs_list *);
/* List elements. */
-static inline struct ovs_list *list_front(const struct ovs_list *);
-static inline struct ovs_list *list_back(const struct ovs_list *);
+static inline struct ovs_list *ovs_list_front(const struct ovs_list *);
+static inline struct ovs_list *ovs_list_back(const struct ovs_list *);
/* List properties. */
-static inline size_t list_size(const struct ovs_list *);
-static inline bool list_is_empty(const struct ovs_list *);
-static inline bool list_is_singleton(const struct ovs_list *);
-static inline bool list_is_short(const struct ovs_list *);
+static inline size_t ovs_list_size(const struct ovs_list *);
+static inline bool ovs_list_is_empty(const struct ovs_list *);
+static inline bool ovs_list_is_singleton(const struct ovs_list *);
+static inline bool ovs_list_is_short(const struct ovs_list *);
#define LIST_FOR_EACH(ITER, MEMBER, LIST) \
for (INIT_CONTAINER(ITER, (LIST)->next, MEMBER); \
: 0); \
(ITER) = (NEXT))
#define LIST_FOR_EACH_POP(ITER, MEMBER, LIST) \
- while (!list_is_empty(LIST) \
- && (INIT_CONTAINER(ITER, list_pop_front(LIST), MEMBER), 1))
+ while (!ovs_list_is_empty(LIST) \
+ && (INIT_CONTAINER(ITER, ovs_list_pop_front(LIST), MEMBER), 1))
\f
/* Inline implementations. */
/* Initializes 'list' as an empty list. */
static inline void
-list_init(struct ovs_list *list)
+ovs_list_init(struct ovs_list *list)
{
list->next = list->prev = list;
}
/* Initializes 'list' with pointers that will (probably) cause segfaults if
* dereferenced and, better yet, show up clearly in a debugger. */
static inline void
-list_poison(struct ovs_list *list)
+ovs_list_poison(struct ovs_list *list)
{
*list = OVS_LIST_POISON;
}
/* Inserts 'elem' just before 'before'. */
static inline void
-list_insert(struct ovs_list *before, struct ovs_list *elem)
+ovs_list_insert(struct ovs_list *before, struct ovs_list *elem)
{
elem->prev = before->prev;
elem->next = before;
/* Removes elements 'first' though 'last' (exclusive) from their current list,
then inserts them just before 'before'. */
static inline void
-list_splice(struct ovs_list *before, struct ovs_list *first, struct ovs_list *last)
+ovs_list_splice(struct ovs_list *before, struct ovs_list *first, struct ovs_list *last)
{
if (first == last) {
return;
/* Inserts 'elem' at the beginning of 'list', so that it becomes the front in
'list'. */
static inline void
-list_push_front(struct ovs_list *list, struct ovs_list *elem)
+ovs_list_push_front(struct ovs_list *list, struct ovs_list *elem)
{
- list_insert(list->next, elem);
+ ovs_list_insert(list->next, elem);
}
/* Inserts 'elem' at the end of 'list', so that it becomes the back in
* 'list'. */
static inline void
-list_push_back(struct ovs_list *list, struct ovs_list *elem)
+ovs_list_push_back(struct ovs_list *list, struct ovs_list *elem)
{
- list_insert(list, elem);
+ ovs_list_insert(list, elem);
}
/* Puts 'elem' in the position currently occupied by 'position'.
* Afterward, 'position' is not part of a list. */
static inline void
-list_replace(struct ovs_list *element, const struct ovs_list *position)
+ovs_list_replace(struct ovs_list *element, const struct ovs_list *position)
{
element->next = position->next;
element->next->prev = element;
* language lawyer sense, this still yields undefined behavior, but it works
* with actual compilers.) */
static inline void
-list_moved(struct ovs_list *list, const struct ovs_list *orig)
+ovs_list_moved(struct ovs_list *list, const struct ovs_list *orig)
{
if (list->next == orig) {
- list_init(list);
+ ovs_list_init(list);
} else {
list->prev->next = list->next->prev = list;
}
* around in memory. The effect is that, if 'src' was the head of a list, now
* 'dst' is the head of a list containing the same elements. */
static inline void
-list_move(struct ovs_list *dst, struct ovs_list *src)
+ovs_list_move(struct ovs_list *dst, struct ovs_list *src)
{
*dst = *src;
- list_moved(dst, src);
+ ovs_list_moved(dst, src);
}
/* Removes 'elem' from its list and returns the element that followed it.
Undefined behavior if 'elem' is not in a list. */
static inline struct ovs_list *
-list_remove(struct ovs_list *elem)
+ovs_list_remove(struct ovs_list *elem)
{
elem->prev->next = elem->next;
elem->next->prev = elem->prev;
/* Removes the front element from 'list' and returns it. Undefined behavior if
'list' is empty before removal. */
static inline struct ovs_list *
-list_pop_front(struct ovs_list *list)
+ovs_list_pop_front(struct ovs_list *list)
{
struct ovs_list *front = list->next;
- list_remove(front);
+ ovs_list_remove(front);
return front;
}
/* Removes the back element from 'list' and returns it.
Undefined behavior if 'list' is empty before removal. */
static inline struct ovs_list *
-list_pop_back(struct ovs_list *list)
+ovs_list_pop_back(struct ovs_list *list)
{
struct ovs_list *back = list->prev;
- list_remove(back);
+ ovs_list_remove(back);
return back;
}
/* Returns the front element in 'list_'.
Undefined behavior if 'list_' is empty. */
static inline struct ovs_list *
-list_front(const struct ovs_list *list_)
+ovs_list_front(const struct ovs_list *list_)
{
struct ovs_list *list = CONST_CAST(struct ovs_list *, list_);
- ovs_assert(!list_is_empty(list));
+ ovs_assert(!ovs_list_is_empty(list));
return list->next;
}
/* Returns the back element in 'list_'.
Undefined behavior if 'list_' is empty. */
static inline struct ovs_list *
-list_back(const struct ovs_list *list_)
+ovs_list_back(const struct ovs_list *list_)
{
struct ovs_list *list = CONST_CAST(struct ovs_list *, list_);
- ovs_assert(!list_is_empty(list));
+ ovs_assert(!ovs_list_is_empty(list));
return list->prev;
}
/* Returns the number of elements in 'list'.
Runs in O(n) in the number of elements. */
static inline size_t
-list_size(const struct ovs_list *list)
+ovs_list_size(const struct ovs_list *list)
{
const struct ovs_list *e;
size_t cnt = 0;
/* Returns true if 'list' is empty, false otherwise. */
static inline bool
-list_is_empty(const struct ovs_list *list)
+ovs_list_is_empty(const struct ovs_list *list)
{
return list->next == list;
}
/* Returns true if 'list' has exactly 1 element, false otherwise. */
static inline bool
-list_is_singleton(const struct ovs_list *list)
+ovs_list_is_singleton(const struct ovs_list *list)
{
- return list_is_short(list) && !list_is_empty(list);
+ return ovs_list_is_short(list) && !ovs_list_is_empty(list);
}
/* Returns true if 'list' has 0 or 1 elements, false otherwise. */
static inline bool
-list_is_short(const struct ovs_list *list)
+ovs_list_is_short(const struct ovs_list *list)
{
return list->next == list->prev;
}
ovs_mutex_init(&pmd->poll_mutex);
dpcls_init(&pmd->cls);
cmap_init(&pmd->flow_table);
- list_init(&pmd->poll_list);
+ ovs_list_init(&pmd->poll_list);
/* init the 'flow_cache' since there is no
* actual thread created for NON_PMD_CORE_ID. */
if (core_id == NON_PMD_CORE_ID) {
if (poll->port == port) {
found = true;
port_unref(poll->port);
- list_remove(&poll->node);
+ ovs_list_remove(&poll->node);
pmd->poll_cnt--;
free(poll);
}
poll->port = port;
poll->rx = rx;
- list_push_back(&pmd->poll_list, &poll->node);
+ ovs_list_push_back(&pmd->poll_list, &poll->node);
pmd->poll_cnt++;
}
abort();
}
- list_remove(&slot->list_node);
+ ovs_list_remove(&slot->list_node);
free_cacheline(slot);
}
ovsthread_key_create(&rwlock->key, slot_destructor);
ovs_mutex_init(&rwlock->mutex);
ovs_mutex_lock(&rwlock->mutex);
- list_init(&rwlock->threads);
+ ovs_list_init(&rwlock->threads);
ovs_mutex_unlock(&rwlock->mutex);
}
slot->depth = 0;
ovs_mutex_lock(&rwlock->mutex);
- list_push_back(&rwlock->threads, &slot->list_node);
+ ovs_list_push_back(&rwlock->threads, &slot->list_node);
ovs_mutex_unlock(&rwlock->mutex);
ovsthread_setspecific(rwlock->key, slot);
ovs_mutex_init(&slot->mutex);
slot->depth = 0;
- list_push_back(&rwlock->threads, &slot->list_node);
+ ovs_list_push_back(&rwlock->threads, &slot->list_node);
ovs_mutex_unlock(&rwlock->mutex);
ovsthread_setspecific(rwlock->key, slot);
}
guarded_list_init(struct guarded_list *list)
{
ovs_mutex_init(&list->mutex);
- list_init(&list->list);
+ ovs_list_init(&list->list);
list->n = 0;
}
ovs_mutex_lock(&list->mutex);
if (list->n < max) {
- list_push_back(&list->list, node);
+ ovs_list_push_back(&list->list, node);
retval = ++list->n;
}
ovs_mutex_unlock(&list->mutex);
ovs_mutex_lock(&list->mutex);
if (list->n) {
- node = list_pop_front(&list->list);
+ node = ovs_list_pop_front(&list->list);
list->n--;
}
ovs_mutex_unlock(&list->mutex);
size_t n;
ovs_mutex_lock(&list->mutex);
- list_move(elements, &list->list);
+ ovs_list_move(elements, &list->list);
n = list->n;
- list_init(&list->list);
+ ovs_list_init(&list->list);
list->n = 0;
ovs_mutex_unlock(&list->mutex);
rpc->name = xstrdup(stream_get_name(stream));
rpc->stream = stream;
byteq_init(&rpc->input, rpc->input_buffer, sizeof rpc->input_buffer);
- list_init(&rpc->output);
+ ovs_list_init(&rpc->output);
return rpc;
}
}
stream_run(rpc->stream);
- while (!list_is_empty(&rpc->output)) {
+ while (!ovs_list_is_empty(&rpc->output)) {
struct ofpbuf *buf = ofpbuf_from_list(rpc->output.next);
int retval;
rpc->backlog -= retval;
ofpbuf_pull(buf, retval);
if (!buf->size) {
- list_remove(&buf->list_node);
+ ovs_list_remove(&buf->list_node);
rpc->output_count--;
ofpbuf_delete(buf);
}
{
if (!rpc->status) {
stream_run_wait(rpc->stream);
- if (!list_is_empty(&rpc->output)) {
+ if (!ovs_list_is_empty(&rpc->output)) {
stream_send_wait(rpc->stream);
}
}
buf = xmalloc(sizeof *buf);
ofpbuf_use_ds(buf, &ds);
- list_push_back(&rpc->output, &buf->list_node);
+ ovs_list_push_back(&rpc->output, &buf->list_node);
rpc->output_count++;
rpc->backlog += length;
for (;;) {
jsonrpc_run(rpc);
- if (list_is_empty(&rpc->output) || rpc->status) {
+ if (ovs_list_is_empty(&rpc->output) || rpc->status) {
return rpc->status;
}
jsonrpc_wait(rpc);
ovs_refcount_init(&lacp->ref_cnt);
lacp_lock();
- list_push_back(all_lacps, &lacp->node);
+ ovs_list_push_back(all_lacps, &lacp->node);
lacp_unlock();
return lacp;
}
}
hmap_destroy(&lacp->slaves);
- list_remove(&lacp->node);
+ ovs_list_remove(&lacp->node);
free(lacp->name);
free(lacp);
lacp_unlock();
lldp_tlv_end(p, start);
}
- if (!list_is_empty(&port->p_isid_vlan_maps)) {
+ if (!ovs_list_is_empty(&port->p_isid_vlan_maps)) {
memset(msg_auth_digest, 0, sizeof msg_auth_digest);
VLOG_DBG("receive LLDP PDU on %s", hardware->h_ifname);
chassis = xzalloc(sizeof *chassis);
- list_init(&chassis->c_mgmt);
+ ovs_list_init(&chassis->c_mgmt);
port = xzalloc(sizeof *port);
- list_init(&port->p_isid_vlan_maps);
+ ovs_list_init(&port->p_isid_vlan_maps);
length = s;
pos = (u_int8_t*) frame;
VLOG_WARN("unable to allocate memory for management address");
goto malformed;
}
- list_push_back(&chassis->c_mgmt, &mgmt->m_entries);
+ ovs_list_push_back(&chassis->c_mgmt, &mgmt->m_entries);
break;
case LLDP_TLV_ORG:
PEEK_BYTES(isid, 3);
isid_vlan_map->isid_vlan_data.isid =
(isid[0] << 16) | (isid[1] << 8) | isid[2];
- list_push_back(&port->p_isid_vlan_maps,
+ ovs_list_push_back(&port->p_isid_vlan_maps,
&isid_vlan_map->m_entries);
isid_vlan_map = NULL;
}
free(mgmt);
}
- list_init(&chassis->c_mgmt);
+ ovs_list_init(&chassis->c_mgmt);
}
void
}
if (!all) {
- list_remove(&port->p_entries);
+ ovs_list_remove(&port->p_entries);
}
lldpd_port_cleanup(port, true);
free(port);
}
}
if (all) {
- list_init(&hw->h_rports);
+ ovs_list_init(&hw->h_rports);
}
}
struct lldpd_aa_isid_vlan_maps_tlv *isid_vlan_map = NULL;
struct lldpd_aa_isid_vlan_maps_tlv *isid_vlan_map_next = NULL;
- if (!list_is_empty(&port->p_isid_vlan_maps)) {
+ if (!ovs_list_is_empty(&port->p_isid_vlan_maps)) {
LIST_FOR_EACH_SAFE (isid_vlan_map, isid_vlan_map_next, m_entries,
&port->p_isid_vlan_maps) {
- list_remove(&isid_vlan_map->m_entries);
+ ovs_list_remove(&isid_vlan_map->m_entries);
free(isid_vlan_map);
}
- list_init(&port->p_isid_vlan_maps);
+ ovs_list_init(&port->p_isid_vlan_maps);
}
}
hw->h_cfg = cfg;
ovs_strlcpy(hw->h_ifname, name, sizeof hw->h_ifname);
hw->h_ifindex = index;
- hw->h_lport.p_chassis = CONTAINER_OF(list_front(&cfg->g_chassis),
+ hw->h_lport.p_chassis = CONTAINER_OF(ovs_list_front(&cfg->g_chassis),
struct lldpd_chassis, list);
hw->h_lport.p_chassis->c_refcount++;
- list_init(&hw->h_rports);
+ ovs_list_init(&hw->h_rports);
return hw;
}
LIST_FOR_EACH_SAFE (hw, hw_next, h_entries, &cfg->g_hardware) {
if (!hw->h_flags) {
- list_remove(&hw->h_entries);
+ ovs_list_remove(&hw->h_entries);
lldpd_remote_cleanup(hw, NULL, true);
lldpd_hardware_cleanup(cfg, hw);
} else {
LIST_FOR_EACH_SAFE (chassis, chassis_next, list, &cfg->g_chassis) {
if (chassis->c_refcount == 0) {
- list_remove(&chassis->list);
+ ovs_list_remove(&chassis->list);
lldpd_chassis_cleanup(chassis, 1);
}
}
* marshaling.
*/
memcpy(ochassis, chassis, sizeof *ochassis);
- list_init(&ochassis->c_mgmt);
+ ovs_list_init(&ochassis->c_mgmt);
/* Copy of management addresses */
LIST_FOR_EACH_POP (mgmt, m_entries, &chassis->c_mgmt) {
- list_insert(&ochassis->c_mgmt, &mgmt->m_entries);
+ ovs_list_insert(&ochassis->c_mgmt, &mgmt->m_entries);
}
/* Restore saved values */
if (oport) {
/* The port is known, remove it before adding it back */
- list_remove(&oport->p_entries);
+ ovs_list_remove(&oport->p_entries);
lldpd_port_cleanup(oport, 1);
free(oport);
}
VLOG_DBG("unknown chassis, add it to the list");
chassis->c_index = ++cfg->g_lastrid;
chassis->c_refcount = 0;
- list_push_back(&cfg->g_chassis, &chassis->list);
- listsize = list_size(&cfg->g_chassis);
+ ovs_list_push_back(&cfg->g_chassis, &chassis->list);
+ listsize = ovs_list_size(&cfg->g_chassis);
VLOG_DBG("%"PRIuSIZE " different systems are known", listsize);
}
port->p_lastframe = xmalloc(s + sizeof(struct lldpd_frame));
port->p_lastframe->size = s;
memcpy(port->p_lastframe->frame, frame, s);
- list_insert(&hw->h_rports, &port->p_entries);
+ ovs_list_insert(&hw->h_rports, &port->p_entries);
port->p_chassis = chassis;
port->p_chassis->c_refcount++;
* freed with lldpd_port_cleanup() and therefore, the refcount
* of the chassis that was attached to it is decreased.
*/
- i = list_size(&hw->h_rports);
+ i = ovs_list_size(&hw->h_rports);
VLOG_DBG("%"PRIuSIZE " neighbors for %s", i, hw->h_ifname);
if (!oport) {
static inline struct lldpd_hardware *
lldpd_first_hardware(struct lldpd *lldpd)
{
- return CONTAINER_OF(list_front(&lldpd->g_hardware),
+ return CONTAINER_OF(ovs_list_front(&lldpd->g_hardware),
struct lldpd_hardware, h_entries);
}
if (e->mlport) {
struct mac_learning_port *mlport = e->mlport;
- list_remove(&e->port_lru_node);
+ ovs_list_remove(&e->port_lru_node);
- if (list_is_empty(&mlport->port_lrus)) {
+ if (ovs_list_is_empty(&mlport->port_lrus)) {
ovs_assert(mlport->heap_node.priority == 1);
hmap_remove(&ml->ports_by_ptr, &mlport->hmap_node);
heap_remove(&ml->ports_by_usage, &mlport->heap_node);
hash_pointer(port, ml->secret));
heap_insert(&ml->ports_by_usage, &mlport->heap_node, 1);
mlport->port = port;
- list_init(&mlport->port_lrus);
+ ovs_list_init(&mlport->port_lrus);
} else {
heap_change(&ml->ports_by_usage, &mlport->heap_node,
mlport->heap_node.priority + 1);
}
- list_push_back(&mlport->port_lrus, &e->port_lru_node);
+ ovs_list_push_back(&mlport->port_lrus, &e->port_lru_node);
e->mlport = mlport;
}
}
mlport = CONTAINER_OF(heap_max(&ml->ports_by_usage),
struct mac_learning_port, heap_node);
- e = CONTAINER_OF(list_front(&mlport->port_lrus),
+ e = CONTAINER_OF(ovs_list_front(&mlport->port_lrus),
struct mac_entry, port_lru_node);
mac_learning_expire(ml, e);
}
get_lru(struct mac_learning *ml, struct mac_entry **e)
OVS_REQ_RDLOCK(ml->rwlock)
{
- if (!list_is_empty(&ml->lrus)) {
+ if (!ovs_list_is_empty(&ml->lrus)) {
*e = mac_entry_from_lru_node(ml->lrus.next);
return true;
} else {
struct mac_learning *ml;
ml = xmalloc(sizeof *ml);
- list_init(&ml->lrus);
+ ovs_list_init(&ml->lrus);
hmap_init(&ml->table);
ml->secret = random_uint32();
ml->flood_vlans = NULL;
e->mlport = NULL;
COVERAGE_INC(mac_learning_learned);
} else {
- list_remove(&e->lru_node);
+ ovs_list_remove(&e->lru_node);
}
/* Mark 'e' as recently used. */
- list_push_back(&ml->lrus, &e->lru_node);
+ ovs_list_push_back(&ml->lrus, &e->lru_node);
if (e->mlport) {
- list_remove(&e->port_lru_node);
- list_push_back(&e->mlport->port_lrus, &e->port_lru_node);
+ ovs_list_remove(&e->port_lru_node);
+ ovs_list_push_back(&e->mlport->port_lrus, &e->port_lru_node);
}
e->expires = time_now() + ml->idle_time;
ml->need_revalidate = true;
mac_entry_set_port(ml, e, NULL);
hmap_remove(&ml->table, &e->hmap_node);
- list_remove(&e->lru_node);
+ ovs_list_remove(&e->lru_node);
free(e);
}
if (hmap_count(&ml->table) > ml->max_entries
|| ml->need_revalidate) {
poll_immediate_wake();
- } else if (!list_is_empty(&ml->lrus)) {
+ } else if (!ovs_list_is_empty(&ml->lrus)) {
struct mac_entry *e = mac_entry_from_lru_node(ml->lrus.next);
poll_timer_wait_until(e->expires * 1000LL);
}
group_get_lru(const struct mcast_snooping *ms, struct mcast_group **grp)
OVS_REQ_RDLOCK(ms->rwlock)
{
- if (!list_is_empty(&ms->group_lru)) {
+ if (!ovs_list_is_empty(&ms->group_lru)) {
*grp = mcast_group_from_lru_node(ms->group_lru.next);
return true;
} else {
ms = xmalloc(sizeof *ms);
hmap_init(&ms->table);
- list_init(&ms->group_lru);
- list_init(&ms->mrouter_lru);
- list_init(&ms->fport_list);
- list_init(&ms->rport_list);
+ ovs_list_init(&ms->group_lru);
+ ovs_list_init(&ms->mrouter_lru);
+ ovs_list_init(&ms->fport_list);
+ ovs_list_init(&ms->rport_list);
ms->secret = random_uint32();
ms->idle_time = MCAST_ENTRY_DEFAULT_IDLE_TIME;
ms->max_entries = MCAST_DEFAULT_MAX_ENTRIES;
b = mcast_group_bundle_lookup(ms, grp, port);
if (b) {
- list_remove(&b->bundle_node);
+ ovs_list_remove(&b->bundle_node);
} else {
b = xmalloc(sizeof *b);
- list_init(&b->bundle_node);
+ ovs_list_init(&b->bundle_node);
b->port = port;
}
b->expires = time_now() + idle_time;
- list_push_back(&grp->bundle_lru, &b->bundle_node);
+ ovs_list_push_back(&grp->bundle_lru, &b->bundle_node);
return b;
}
static bool
mcast_group_has_bundles(struct mcast_group *grp)
{
- return !list_is_empty(&grp->bundle_lru);
+ return !ovs_list_is_empty(&grp->bundle_lru);
}
/* Delete 'grp' from the 'ms' hash table.
mcast_snooping_flush_group__(struct mcast_snooping *ms,
struct mcast_group *grp)
{
- ovs_assert(list_is_empty(&grp->bundle_lru));
+ ovs_assert(ovs_list_is_empty(&grp->bundle_lru));
hmap_remove(&ms->table, &grp->hmap_node);
- list_remove(&grp->group_node);
+ ovs_list_remove(&grp->group_node);
free(grp);
}
LIST_FOR_EACH (b, bundle_node, &grp->bundle_lru) {
if (b->port == port) {
- list_remove(&b->bundle_node);
+ ovs_list_remove(&b->bundle_node);
free(b);
return true;
}
if (b->expires > timenow) {
break;
}
- list_remove(&b->bundle_node);
+ ovs_list_remove(&b->bundle_node);
free(b);
expired++;
}
hmap_insert(&ms->table, &grp->hmap_node, hash);
grp->addr = *addr;
grp->vlan = vlan;
- list_init(&grp->bundle_lru);
+ ovs_list_init(&grp->bundle_lru);
learned = true;
ms->need_revalidate = true;
COVERAGE_INC(mcast_snooping_learned);
} else {
- list_remove(&grp->group_node);
+ ovs_list_remove(&grp->group_node);
}
mcast_group_insert_bundle(ms, grp, port, ms->idle_time);
/* Mark 'grp' as recently used. */
- list_push_back(&ms->group_lru, &grp->group_node);
+ ovs_list_push_back(&ms->group_lru, &grp->group_node);
return learned;
}
struct mcast_mrouter_bundle **m)
OVS_REQ_RDLOCK(ms->rwlock)
{
- if (!list_is_empty(&ms->mrouter_lru)) {
+ if (!ovs_list_is_empty(&ms->mrouter_lru)) {
*m = mcast_mrouter_from_lru_node(ms->mrouter_lru.next);
return true;
} else {
mrouter = mcast_snooping_mrouter_lookup(ms, vlan, port);
if (mrouter) {
- list_remove(&mrouter->mrouter_node);
+ ovs_list_remove(&mrouter->mrouter_node);
} else {
mrouter = xmalloc(sizeof *mrouter);
mrouter->vlan = vlan;
}
mrouter->expires = time_now() + MCAST_MROUTER_PORT_IDLE_TIME;
- list_push_back(&ms->mrouter_lru, &mrouter->mrouter_node);
+ ovs_list_push_back(&ms->mrouter_lru, &mrouter->mrouter_node);
return ms->need_revalidate;
}
static void
mcast_snooping_flush_mrouter(struct mcast_mrouter_bundle *mrouter)
{
- list_remove(&mrouter->mrouter_node);
+ ovs_list_remove(&mrouter->mrouter_node);
free(mrouter);
}
\f
mcast_snooping_port_get(const struct ovs_list *list,
struct mcast_port_bundle **f)
{
- if (!list_is_empty(list)) {
+ if (!ovs_list_is_empty(list)) {
*f = mcast_port_from_list_node(list->next);
return true;
} else {
pbundle = xmalloc(sizeof *pbundle);
pbundle->port = port;
- list_insert(list, &pbundle->node);
+ ovs_list_insert(list, &pbundle->node);
}
static void
mcast_snooping_flush_port(struct mcast_port_bundle *pbundle)
{
- list_remove(&pbundle->node);
+ ovs_list_remove(&pbundle->node);
free(pbundle);
}
long long int mrouter_msec;
long long int msec = 0;
- if (!list_is_empty(&ms->group_lru)) {
+ if (!ovs_list_is_empty(&ms->group_lru)) {
grp = mcast_group_from_lru_node(ms->group_lru.next);
bundle = mcast_group_bundle_from_lru_node(grp->bundle_lru.next);
msec = bundle->expires * 1000LL;
}
- if (!list_is_empty(&ms->mrouter_lru)) {
+ if (!ovs_list_is_empty(&ms->mrouter_lru)) {
mrouter = mcast_mrouter_from_lru_node(ms->mrouter_lru.next);
mrouter_msec = mrouter->expires * 1000LL;
msec = msec ? MIN(msec, mrouter_msec) : mrouter_msec;
VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
}
- list_push_back(&dpdk_mp_list, &dmp->list_node);
+ ovs_list_push_back(&dpdk_mp_list, &dmp->list_node);
return dmp;
}
netdev_dpdk_alloc_txq(netdev, OVS_VHOST_MAX_QUEUE_NUM);
}
- list_push_back(&dpdk_list, &netdev->list_node);
+ ovs_list_push_back(&dpdk_list, &netdev->list_node);
unlock:
if (err) {
ovs_mutex_lock(&dpdk_mutex);
rte_free(dev->tx_q);
- list_remove(&dev->list_node);
+ ovs_list_remove(&dev->list_node);
dpdk_mp_put(dev->dpdk_mp);
ovs_mutex_unlock(&dpdk_mutex);
}
ovs_mutex_lock(&dpdk_mutex);
rte_free(dev->tx_q);
- list_remove(&dev->list_node);
+ ovs_list_remove(&dev->list_node);
dpdk_mp_put(dev->dpdk_mp);
ovs_mutex_unlock(&dpdk_mutex);
}
ivshmem->user_port_id = port_no;
ivshmem->eth_port_id = rte_eth_dev_count() - 1;
- list_push_back(&dpdk_ring_list, &ivshmem->list_node);
+ ovs_list_push_back(&dpdk_ring_list, &ivshmem->list_node);
*eth_port_id = ivshmem->eth_port_id;
return 0;
struct netdev_rxq up;
struct ovs_list node; /* In netdev_dummy's "rxes" list. */
struct ovs_list recv_queue;
- int recv_queue_len; /* list_size(&recv_queue). */
+ int recv_queue_len; /* ovs_list_size(&recv_queue). */
struct seq *seq; /* Reports newly queued packets. */
};
int rxbuf_size = stream ? 2048 : 0;
s->stream = stream;
dp_packet_init(&s->rxbuf, rxbuf_size);
- list_init(&s->txq);
+ ovs_list_init(&s->txq);
}
static struct dummy_packet_stream *
dummy_packet_stream_wait(struct dummy_packet_stream *s)
{
stream_run_wait(s->stream);
- if (!list_is_empty(&s->txq)) {
+ if (!ovs_list_is_empty(&s->txq)) {
stream_send_wait(s->stream);
}
stream_recv_wait(s->stream);
static void
dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
{
- if (list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
+ if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
struct dp_packet *b;
struct pkt_list_node *node;
node = xmalloc(sizeof *node);
node->pkt = b;
- list_push_back(&s->txq, &node->list_node);
+ ovs_list_push_back(&s->txq, &node->list_node);
}
}
stream_run(s->stream);
- if (!list_is_empty(&s->txq)) {
+ if (!ovs_list_is_empty(&s->txq)) {
struct pkt_list_node *txbuf_node;
struct dp_packet *txbuf;
int retval;
- ASSIGN_CONTAINER(txbuf_node, list_front(&s->txq), list_node);
+ ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node);
txbuf = txbuf_node->pkt;
retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
if (retval > 0) {
dp_packet_pull(txbuf, retval);
if (!dp_packet_size(txbuf)) {
- list_remove(&txbuf_node->list_node);
+ ovs_list_remove(&txbuf_node->list_node);
free(txbuf_node);
dp_packet_delete(txbuf);
}
dummy_packet_conn_init(&netdev->conn);
- list_init(&netdev->rxes);
+ ovs_list_init(&netdev->rxes);
ovs_mutex_unlock(&netdev->mutex);
ovs_mutex_lock(&dummy_list_mutex);
- list_push_back(&dummy_list, &netdev->list_node);
+ ovs_list_push_back(&dummy_list, &netdev->list_node);
ovs_mutex_unlock(&dummy_list_mutex);
return 0;
struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
ovs_mutex_lock(&dummy_list_mutex);
- list_remove(&netdev->list_node);
+ ovs_list_remove(&netdev->list_node);
ovs_mutex_unlock(&dummy_list_mutex);
ovs_mutex_lock(&netdev->mutex);
struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
ovs_mutex_lock(&netdev->mutex);
- list_push_back(&netdev->rxes, &rx->node);
- list_init(&rx->recv_queue);
+ ovs_list_push_back(&netdev->rxes, &rx->node);
+ ovs_list_init(&rx->recv_queue);
rx->recv_queue_len = 0;
rx->seq = seq_create();
ovs_mutex_unlock(&netdev->mutex);
struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
ovs_mutex_lock(&netdev->mutex);
- list_remove(&rx->node);
+ ovs_list_remove(&rx->node);
pkt_list_delete(&rx->recv_queue);
ovs_mutex_unlock(&netdev->mutex);
seq_destroy(rx->seq);
struct dp_packet *packet;
ovs_mutex_lock(&netdev->mutex);
- if (!list_is_empty(&rx->recv_queue)) {
+ if (!ovs_list_is_empty(&rx->recv_queue)) {
struct pkt_list_node *pkt_node;
- ASSIGN_CONTAINER(pkt_node, list_pop_front(&rx->recv_queue), list_node);
+ ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node);
packet = pkt_node->pkt;
free(pkt_node);
rx->recv_queue_len--;
uint64_t seq = seq_read(rx->seq);
ovs_mutex_lock(&netdev->mutex);
- if (!list_is_empty(&rx->recv_queue)) {
+ if (!ovs_list_is_empty(&rx->recv_queue)) {
poll_immediate_wake();
} else {
seq_wait(rx->seq, seq);
struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
pkt_node->pkt = packet;
- list_push_back(&rx->recv_queue, &pkt_node->list_node);
+ ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node);
rx->recv_queue_len++;
seq_change(rx->seq);
}
netdev->n_rxq = netdev->netdev_class->rxq_alloc ? 1 : 0;
netdev->requested_n_rxq = netdev->n_rxq;
- list_init(&netdev->saved_flags_list);
+ ovs_list_init(&netdev->saved_flags_list);
error = rc->class->construct(netdev);
if (!error) {
netdev_change_seq_changed(netdev);
} else {
free(netdev->name);
- ovs_assert(list_is_empty(&netdev->saved_flags_list));
+ ovs_assert(ovs_list_is_empty(&netdev->saved_flags_list));
shash_delete(&netdev_shash, netdev->node);
rc->class->dealloc(netdev);
}
ovs_mutex_lock(&netdev_mutex);
*sfp = sf = xmalloc(sizeof *sf);
sf->netdev = netdev;
- list_push_front(&netdev->saved_flags_list, &sf->node);
+ ovs_list_push_front(&netdev->saved_flags_list, &sf->node);
sf->saved_flags = changed_flags;
sf->saved_values = changed_flags & new_flags;
&old_flags);
ovs_mutex_lock(&netdev_mutex);
- list_remove(&sf->node);
+ ovs_list_remove(&sf->node);
free(sf);
netdev_unref(netdev);
}
nln->change = change;
nln->has_run = false;
- list_init(&nln->all_notifiers);
+ ovs_list_init(&nln->all_notifiers);
return nln;
}
nln_destroy(struct nln *nln)
{
if (nln) {
- ovs_assert(list_is_empty(&nln->all_notifiers));
+ ovs_assert(ovs_list_is_empty(&nln->all_notifiers));
nl_sock_destroy(nln->notify_sock);
free(nln);
}
}
notifier = xmalloc(sizeof *notifier);
- list_push_back(&nln->all_notifiers, ¬ifier->node);
+ ovs_list_push_back(&nln->all_notifiers, ¬ifier->node);
notifier->cb = cb;
notifier->aux = aux;
notifier->nln = nln;
if (notifier) {
struct nln *nln = notifier->nln;
- list_remove(¬ifier->node);
- if (list_is_empty(&nln->all_notifiers)) {
+ ovs_list_remove(¬ifier->node);
+ if (ovs_list_is_empty(&nln->all_notifiers)) {
nl_sock_destroy(nln->notify_sock);
nln->notify_sock = NULL;
}
hmap_init(&nxm_header_map);
hmap_init(&nxm_name_map);
for (int i = 0; i < MFF_N_IDS; i++) {
- list_init(&nxm_mf_map[i]);
+ ovs_list_init(&nxm_mf_map[i]);
}
for (struct nxm_field_index *nfi = all_nxm_fields;
nfi < &all_nxm_fields[ARRAY_SIZE(all_nxm_fields)]; nfi++) {
hash_uint64(nxm_no_len(nfi->nf.header)));
hmap_insert(&nxm_name_map, &nfi->name_node,
hash_string(nfi->nf.name, 0));
- list_push_back(&nxm_mf_map[nfi->nf.id], &nfi->mf_node);
+ ovs_list_push_back(&nxm_mf_map[nfi->nf.id], &nfi->mf_node);
}
ovsthread_once_done(&once);
}
{
struct ofpbuf *msg;
- list_init(replies);
+ ovs_list_init(replies);
msg = ofpraw_alloc_stats_reply(request, 1000);
- list_push_back(replies, &msg->list_node);
+ ovs_list_push_back(replies, &msg->list_node);
}
/* Prepares to append up to 'len' bytes to the series of statistics replies in
struct ofpbuf *
ofpmp_reserve(struct ovs_list *replies, size_t len)
{
- struct ofpbuf *msg = ofpbuf_from_list(list_back(replies));
+ struct ofpbuf *msg = ofpbuf_from_list(ovs_list_back(replies));
if (msg->size + len <= UINT16_MAX) {
ofpbuf_prealloc_tailroom(msg, len);
ofpbuf_put(next, msg->data, hdrs_len);
next->header = next->data;
next->msg = ofpbuf_tail(next);
- list_push_back(replies, &next->list_node);
+ ovs_list_push_back(replies, &next->list_node);
*ofpmp_flags__(msg->data) |= htons(OFPSF_REPLY_MORE);
void
ofpmp_postappend(struct ovs_list *replies, size_t start_ofs)
{
- struct ofpbuf *msg = ofpbuf_from_list(list_back(replies));
+ struct ofpbuf *msg = ofpbuf_from_list(ovs_list_back(replies));
ovs_assert(start_ofs <= UINT16_MAX);
if (msg->size > UINT16_MAX) {
enum ofp_version
ofpmp_version(struct ovs_list *replies)
{
- struct ofpbuf *msg = ofpbuf_from_list(list_back(replies));
+ struct ofpbuf *msg = ofpbuf_from_list(ovs_list_back(replies));
const struct ofp_header *oh = msg->data;
return oh->version;
enum ofpraw
ofpmp_decode_raw(struct ovs_list *replies)
{
- struct ofpbuf *msg = ofpbuf_from_list(list_back(replies));
+ struct ofpbuf *msg = ofpbuf_from_list(ovs_list_back(replies));
enum ofperr error;
enum ofpraw raw;
gm->command = command;
gm->group_id = OFPG_ANY;
gm->command_bucket_id = OFPG15_BUCKET_ALL;
- list_init(&gm->buckets);
+ ovs_list_init(&gm->buckets);
if (command == OFPGC11_DELETE && string[0] == '\0') {
gm->group_id = OFPG_ALL;
return NULL;
free(bucket);
goto out;
}
- list_push_back(&gm->buckets, &bucket->list_node);
+ ovs_list_push_back(&gm->buckets, &bucket->list_node);
if (gm->type != OFPGT11_SELECT && bucket->weight) {
error = xstrdup("Only select groups can have bucket weights.");
bkt_str = next_bkt_str;
}
- if (gm->type == OFPGT11_INDIRECT && !list_is_short(&gm->buckets)) {
+ if (gm->type == OFPGT11_INDIRECT && !ovs_list_is_short(&gm->buckets)) {
error = xstrdup("Indirect groups can have at most one bucket.");
goto out;
}
new_gms = x2nrealloc(*gms, &allocated_gms, sizeof **gms);
for (i = 0; i < *n_gms; i++) {
- list_moved(&new_gms[i].buckets, &(*gms)[i].buckets);
+ ovs_list_moved(&new_gms[i].buckets, &(*gms)[i].buckets);
}
*gms = new_gms;
}
*usable_protocols = OFPUTIL_P_NXM_OXM_ANY;
ttm->command = command;
- list_init(&ttm->mappings);
+ ovs_list_init(&ttm->mappings);
while (*s) {
struct ofputil_tlv_map *map = xmalloc(sizeof *map);
s++;
}
- list_push_back(&ttm->mappings, &map->list_node);
+ ovs_list_push_back(&ttm->mappings, &map->list_node);
if (!ovs_scan(s, "{class=%"SCNi16",type=%"SCNi8",len=%"SCNi8"}->tun_metadata%"SCNi16"%n",
&map->option_class, &map->option_type, &map->option_len,
ofputil_append_meter_config(struct ovs_list *replies,
const struct ofputil_meter_config *mc)
{
- struct ofpbuf *msg = ofpbuf_from_list(list_back(replies));
+ struct ofpbuf *msg = ofpbuf_from_list(ovs_list_back(replies));
size_t start_ofs = msg->size;
struct ofp13_meter_config *reply;
OVS_NOT_REACHED();
}
- list_init(replies);
- list_push_back(replies, &reply->list_node);
+ ovs_list_init(replies);
+ ovs_list_push_back(replies, &reply->list_node);
}
static void
struct ovs_list *replies)
{
enum ofp_version ofp_version = ofpmp_version(replies);
- struct ofpbuf *reply = ofpbuf_from_list(list_back(replies));
+ struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies));
size_t start_ofs = reply->size;
size_t len_ofs;
ovs_be16 *len;
ofputil_append_flow_stats_reply(const struct ofputil_flow_stats *fs,
struct ovs_list *replies)
{
- struct ofpbuf *reply = ofpbuf_from_list(list_back(replies));
+ struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies));
size_t start_ofs = reply->size;
enum ofp_version version = ofpmp_version(replies);
enum ofpraw raw = ofpmp_decode_raw(replies);
ofputil_append_port_desc_stats_reply(const struct ofputil_phy_port *pp,
struct ovs_list *replies)
{
- struct ofpbuf *reply = ofpbuf_from_list(list_back(replies));
+ struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies));
size_t start_ofs = reply->size;
ofputil_put_phy_port(ofpmp_version(replies), pp, reply);
ofputil_append_table_features_reply(const struct ofputil_table_features *tf,
struct ovs_list *replies)
{
- struct ofpbuf *reply = ofpbuf_from_list(list_back(replies));
+ struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies));
enum ofp_version version = ofpmp_version(replies);
size_t start_ofs = reply->size;
struct ofp13_table_features *otf;
struct ovs_list *replies,
enum ofp_version version)
{
- struct ofpbuf *reply = ofpbuf_from_list(list_back(replies));
+ struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies));
size_t start_otd;
struct ofp14_table_desc *otd;
msg = ofpraw_alloc_xid(OFPRAW_NXST_FLOW_MONITOR_REPLY, OFP10_VERSION,
htonl(0), 1024);
- list_init(replies);
- list_push_back(replies, &msg->list_node);
+ ovs_list_init(replies);
+ ovs_list_push_back(replies, &msg->list_node);
}
void
struct ofpbuf *msg;
size_t start_ofs;
- msg = ofpbuf_from_list(list_back(replies));
+ msg = ofpbuf_from_list(ovs_list_back(replies));
start_ofs = msg->size;
if (update->event == NXFME_ABBREV) {
}
new_bucket = ofputil_bucket_clone_data(bucket);
- list_push_back(dest, &new_bucket->list_node);
+ ovs_list_push_back(dest, &new_bucket->list_node);
}
}
{
static struct ofputil_bucket *bucket;
- ASSIGN_CONTAINER(bucket, list_front(buckets), list_node);
+ ASSIGN_CONTAINER(bucket, ovs_list_front(buckets), list_node);
return bucket;
}
{
static struct ofputil_bucket *bucket;
- ASSIGN_CONTAINER(bucket, list_back(buckets), list_node);
+ ASSIGN_CONTAINER(bucket, ovs_list_back(buckets), list_node);
return bucket;
}
struct ovs_list *replies,
enum ofp_version version)
{
- struct ofpbuf *reply = ofpbuf_from_list(list_back(replies));
+ struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies));
struct ofp11_group_desc_stats *ogds;
struct ofputil_bucket *bucket;
size_t start_ogds;
struct ovs_list *replies,
enum ofp_version version)
{
- struct ofpbuf *reply = ofpbuf_from_list(list_back(replies));
+ struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies));
struct ofp15_group_desc_stats *ogds;
struct ofputil_bucket *bucket;
size_t start_ogds, start_buckets;
struct ofp11_bucket *ob;
uint32_t bucket_id = 0;
- list_init(buckets);
+ ovs_list_init(buckets);
while (buckets_length > 0) {
struct ofputil_bucket *bucket;
struct ofpbuf ofpacts;
bucket->ofpacts = ofpbuf_steal_data(&ofpacts);
bucket->ofpacts_len = ofpacts.size;
- list_push_back(buckets, &bucket->list_node);
+ ovs_list_push_back(buckets, &bucket->list_node);
}
return 0;
{
struct ofp15_bucket *ob;
- list_init(buckets);
+ ovs_list_init(buckets);
while (buckets_length > 0) {
struct ofputil_bucket *bucket = NULL;
struct ofpbuf ofpacts;
bucket->ofpacts = ofpbuf_steal_data(&ofpacts);
bucket->ofpacts_len = ofpacts.size;
- list_push_back(buckets, &bucket->list_node);
+ ovs_list_push_back(buckets, &bucket->list_node);
continue;
if (!error
&& ofp_version >= OFP13_VERSION
&& gm->command == OFPGC11_DELETE
- && !list_is_empty(&gm->buckets)) {
+ && !ovs_list_is_empty(&gm->buckets)) {
error = OFPERR_OFPGMFC_INVALID_GROUP;
}
switch (gm->type) {
case OFPGT11_INDIRECT:
- if (!list_is_singleton(&gm->buckets)) {
+ if (!ovs_list_is_singleton(&gm->buckets)) {
return OFPERR_OFPGMFC_INVALID_GROUP;
}
break;
case OFPGC15_INSERT_BUCKET:
break;
case OFPGC15_REMOVE_BUCKET:
- if (!list_is_empty(&gm->buckets)) {
+ if (!ovs_list_is_empty(&gm->buckets)) {
return OFPERR_OFPGMFC_BAD_BUCKET;
}
break;
decode_tlv_table_mappings(struct ofpbuf *msg, unsigned int max_fields,
struct ovs_list *mappings)
{
- list_init(mappings);
+ ovs_list_init(mappings);
while (msg->size) {
struct nx_tlv_map *nx_map;
nx_map = ofpbuf_pull(msg, sizeof *nx_map);
map = xmalloc(sizeof *map);
- list_push_back(mappings, &map->list_node);
+ ovs_list_push_back(mappings, &map->list_node);
map->option_class = ntohs(nx_map->option_class);
map->option_type = nx_map->option_type;
b->source = source;
b->header = NULL;
b->msg = NULL;
- list_poison(&b->list_node);
+ ovs_list_poison(&b->list_node);
}
static void
*
* if (stp && ovs_refcount_unref_relaxed(&stp->ref_cnt) == 1) {
* ovs_mutex_lock(&mutex);
- * list_remove(&stp->node);
+ * ovs_list_remove(&stp->node);
* ovs_mutex_unlock(&mutex);
* free(stp->name);
* free(stp);
{
struct lldpd_aa_isid_vlan_maps_tlv *mapping;
- if (list_is_empty(&port->p_isid_vlan_maps)) {
+ if (ovs_list_is_empty(&port->p_isid_vlan_maps)) {
return;
}
lm->isid_vlan_data.isid = m->isid;
lm->isid_vlan_data.vlan = m->vlan;
- list_push_back(&hardware->h_lport.p_isid_vlan_maps, &lm->m_entries);
+ ovs_list_push_back(&hardware->h_lport.p_isid_vlan_maps, &lm->m_entries);
/* TODO Should be done in the Auto Attach state machine when a mapping goes
* from "pending" to "active".
node->vlan = m->vlan;
node->oper = BRIDGE_AA_VLAN_OPER_ADD;
- list_push_back(&lldp->active_mapping_queue, &node->list_node);
+ ovs_list_push_back(&lldp->active_mapping_queue, &node->list_node);
}
/* Bridge will poll the list of VLAN that needs to be auto configure based on
copy->vlan = node->vlan;
copy->oper = node->oper;
- list_push_back(list, ©->list_node);
+ ovs_list_push_back(list, ©->list_node);
/* Cleanup */
free(node->port_name);
ovs_mutex_lock(&mutex);
HMAP_FOR_EACH (lldp, hmap_node, all_lldps) {
- size += list_size(&lldp->active_mapping_queue);
+ size += ovs_list_size(&lldp->active_mapping_queue);
}
ovs_mutex_unlock(&mutex);
isid,
lm->isid_vlan_data.vlan);
- list_remove(&lm->m_entries);
+ ovs_list_remove(&lm->m_entries);
/* TODO Should be done in the AA SM when a mapping goes
* from "pending" to "active".
node->vlan = m->vlan;
node->oper = BRIDGE_AA_VLAN_OPER_REMOVE;
- list_push_back(&lldp->active_mapping_queue, &node->list_node);
+ ovs_list_push_back(&lldp->active_mapping_queue, &node->list_node);
break;
}
hmap_init(&lldp->mappings_by_isid);
hmap_init(&lldp->mappings_by_aux);
- list_init(&lldp->active_mapping_queue);
+ ovs_list_init(&lldp->active_mapping_queue);
lchassis = xzalloc(sizeof *lchassis);
lchassis->c_cap_available = LLDP_CAP_BRIDGE;
netdev_get_etheraddr(netdev, mac);
lchassis->c_id = &mac->ea[0];
- list_init(&lchassis->c_mgmt);
+ ovs_list_init(&lchassis->c_mgmt);
lchassis->c_ttl = lldp->lldpd->g_config.c_tx_interval *
lldp->lldpd->g_config.c_tx_hold;
lchassis->c_ttl = LLDP_CHASSIS_TTL;
lldpd_assign_cfg_to_protocols(lldp->lldpd);
- list_init(&lldp->lldpd->g_chassis);
- list_push_back(&lldp->lldpd->g_chassis, &lchassis->list);
+ ovs_list_init(&lldp->lldpd->g_chassis);
+ ovs_list_push_back(&lldp->lldpd->g_chassis, &lchassis->list);
if ((hw = lldpd_alloc_hardware(lldp->lldpd,
(char *) netdev_get_name(netdev),
hw->h_lport.p_element.system_id.rsvd2[0] = 0;
hw->h_lport.p_element.system_id.rsvd2[1] = 0;
- list_init(&hw->h_lport.p_isid_vlan_maps);
- list_init(&lldp->lldpd->g_hardware);
- list_push_back(&lldp->lldpd->g_hardware, &hw->h_entries);
+ ovs_list_init(&hw->h_lport.p_isid_vlan_maps);
+ ovs_list_init(&lldp->lldpd->g_hardware);
+ ovs_list_push_back(&lldp->lldpd->g_hardware, &hw->h_entries);
ovs_mutex_lock(&mutex);
hmap_init(&lldp->mappings_by_isid);
hmap_init(&lldp->mappings_by_aux);
- list_init(&lldp->active_mapping_queue);
+ ovs_list_init(&lldp->active_mapping_queue);
lchassis = xzalloc(sizeof *lchassis);
lchassis->c_cap_available = LLDP_CAP_BRIDGE;
lchassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LLADDR;
lchassis->c_id_len = ETH_ADDR_LEN;
- list_init(&lchassis->c_mgmt);
+ ovs_list_init(&lchassis->c_mgmt);
lchassis->c_ttl = LLDP_CHASSIS_TTL;
lldpd_assign_cfg_to_protocols(lldp->lldpd);
- list_init(&lldp->lldpd->g_chassis);
- list_push_back(&lldp->lldpd->g_chassis, &lchassis->list);
+ ovs_list_init(&lldp->lldpd->g_chassis);
+ ovs_list_push_back(&lldp->lldpd->g_chassis, &lchassis->list);
hw = lldpd_alloc_hardware(lldp->lldpd, "dummy-hw", 0);
hw->h_lport.p_element.system_id.rsvd2[0] = 0;
hw->h_lport.p_element.system_id.rsvd2[1] = 0;
- list_init(&hw->h_lport.p_isid_vlan_maps);
- list_init(&lldp->lldpd->g_hardware);
- list_push_back(&lldp->lldpd->g_hardware, &hw->h_entries);
+ ovs_list_init(&hw->h_lport.p_isid_vlan_maps);
+ ovs_list_init(&lldp->lldpd->g_hardware);
+ ovs_list_push_back(&lldp->lldpd->g_hardware, &hw->h_entries);
return lldp;
}
cfg = lldp->lldpd;
LIST_FOR_EACH_SAFE (hw, hw_next, h_entries, &cfg->g_hardware) {
- list_remove(&hw->h_entries);
+ ovs_list_remove(&hw->h_entries);
free(hw->h_lport.p_lastframe);
free(hw);
}
LIST_FOR_EACH_SAFE (chassis, chassis_next, list, &cfg->g_chassis) {
- list_remove(&chassis->list);
+ ovs_list_remove(&chassis->list);
free(chassis);
}
struct dirent *subdir;
hmap_insert(&all_numa_nodes, &n->hmap_node, hash_int(i, 0));
- list_init(&n->cores);
+ ovs_list_init(&n->cores);
n->numa_id = i;
while ((subdir = readdir(dir)) != NULL) {
core_id = strtoul(subdir->d_name + 3, NULL, 10);
hmap_insert(&all_cpu_cores, &c->hmap_node,
hash_int(core_id, 0));
- list_insert(&n->cores, &c->list_node);
+ ovs_list_insert(&n->cores, &c->list_node);
c->core_id = core_id;
c->numa = n;
c->available = true;
}
}
VLOG_INFO("Discovered %"PRIuSIZE" CPU cores on NUMA node %d",
- list_size(&n->cores), n->numa_id);
+ ovs_list_size(&n->cores), n->numa_id);
closedir(dir);
} else if (errno != ENOENT) {
VLOG_WARN("opendir(%s) failed (%s)", path,
struct numa_node *numa = get_numa_by_numa_id(numa_id);
if (numa) {
- return list_size(&numa->cores);
+ return ovs_list_size(&numa->cores);
}
return OVS_CORE_UNSPEC;
struct cpu_core *core;
dump = xmalloc(sizeof *dump);
- list_init(&dump->dump);
+ ovs_list_init(&dump->dump);
LIST_FOR_EACH(core, list_node, &numa->cores) {
struct ovs_numa_info *info = xmalloc(sizeof *info);
info->numa_id = numa->numa_id;
info->core_id = core->core_id;
- list_insert(&dump->dump, &info->list_node);
+ ovs_list_insert(&dump->dump, &info->list_node);
}
}
sizeof perthread->name);
ovs_mutex_lock(&ovsrcu_threads_mutex);
- list_push_back(&ovsrcu_threads, &perthread->list_node);
+ ovs_list_push_back(&ovsrcu_threads, &perthread->list_node);
ovs_mutex_unlock(&ovsrcu_threads_mutex);
pthread_setspecific(perthread_key, perthread);
struct ovs_list cbsets;
guarded_list_pop_all(&flushed_cbsets, &cbsets);
- if (list_is_empty(&cbsets)) {
+ if (ovs_list_is_empty(&cbsets)) {
return false;
}
}
ovs_mutex_lock(&ovsrcu_threads_mutex);
- list_remove(&perthread->list_node);
+ ovs_list_remove(&perthread->list_node);
ovs_mutex_unlock(&ovsrcu_threads_mutex);
ovs_mutex_destroy(&perthread->mutex);
global_seqno = seq_create();
xpthread_key_create(&perthread_key, ovsrcu_thread_exit_cb);
fatal_signal_add_hook(ovsrcu_cancel_thread_exit_cb, NULL, NULL, true);
- list_init(&ovsrcu_threads);
+ ovs_list_init(&ovsrcu_threads);
ovs_mutex_init(&ovsrcu_threads_mutex);
guarded_list_init(&flushed_cbsets);
int i;
ovs_mutex_lock(&key_mutex);
- list_remove(&slots->list_node);
+ ovs_list_remove(&slots->list_node);
LIST_FOR_EACH (key, list_node, &inuse_keys) {
void *value = clear_slot(slots, key->index);
if (value && key->destructor) {
}
ovs_mutex_lock(&key_mutex);
- if (list_is_empty(&free_keys)) {
+ if (ovs_list_is_empty(&free_keys)) {
key = xmalloc(sizeof *key);
key->index = n_keys++;
if (key->index >= MAX_KEYS) {
abort();
}
} else {
- key = CONTAINER_OF(list_pop_back(&free_keys),
+ key = CONTAINER_OF(ovs_list_pop_back(&free_keys),
struct ovsthread_key, list_node);
}
- list_push_back(&inuse_keys, &key->list_node);
+ ovs_list_push_back(&inuse_keys, &key->list_node);
key->destructor = destructor;
ovs_mutex_unlock(&key_mutex);
ovs_mutex_lock(&key_mutex);
/* Move 'key' from 'inuse_keys' to 'free_keys'. */
- list_remove(&key->list_node);
- list_push_back(&free_keys, &key->list_node);
+ ovs_list_remove(&key->list_node);
+ ovs_list_push_back(&free_keys, &key->list_node);
/* Clear this slot in all threads. */
LIST_FOR_EACH (slots, list_node, &slots_list) {
ovs_mutex_lock(&key_mutex);
pthread_setspecific(tsd_key, slots);
- list_push_back(&slots_list, &slots->list_node);
+ ovs_list_push_back(&slots_list, &slots->list_node);
ovs_mutex_unlock(&key_mutex);
}
shash_add_assert(&table->columns, column->name, column);
}
hmap_init(&table->rows);
- list_init(&table->track_list);
+ ovs_list_init(&table->track_list);
table->change_seqno[OVSDB_IDL_CHANGE_INSERT]
= table->change_seqno[OVSDB_IDL_CHANGE_MODIFY]
= table->change_seqno[OVSDB_IDL_CHANGE_DELETE] = 0;
/* No need to do anything with dst_arcs: some node has those arcs
* as forward arcs and will destroy them itself. */
- if (!list_is_empty(&row->track_node)) {
- list_remove(&row->track_node);
+ if (!ovs_list_is_empty(&row->track_node)) {
+ ovs_list_remove(&row->track_node);
}
ovsdb_idl_row_destroy(row);
struct ovsdb_idl_table *table
= ovsdb_idl_table_from_class(idl, table_class);
- if (!list_is_empty(&table->track_list)) {
- return CONTAINER_OF(list_front(&table->track_list), struct ovsdb_idl_row, track_node);
+ if (!ovs_list_is_empty(&table->track_list)) {
+ return CONTAINER_OF(ovs_list_front(&table->track_list), struct ovsdb_idl_row, track_node);
}
return NULL;
}
for (i = 0; i < idl->class->n_tables; i++) {
struct ovsdb_idl_table *table = &idl->tables[i];
- if (!list_is_empty(&table->track_list)) {
+ if (!ovs_list_is_empty(&table->track_list)) {
struct ovsdb_idl_row *row, *next;
LIST_FOR_EACH_SAFE(row, next, track_node, &table->track_list) {
free(row->updated);
row->updated = NULL;
}
- list_remove(&row->track_node);
- list_init(&row->track_node);
+ ovs_list_remove(&row->track_node);
+ ovs_list_init(&row->track_node);
if (ovsdb_idl_row_is_orphan(row)) {
ovsdb_idl_row_clear_old(row);
free(row);
= row->table->change_seqno[change]
= row->table->idl->change_seqno + 1;
if (table->modes[column_idx] & OVSDB_IDL_TRACK) {
- if (!list_is_empty(&row->track_node)) {
- list_remove(&row->track_node);
+ if (!ovs_list_is_empty(&row->track_node)) {
+ ovs_list_remove(&row->track_node);
}
- list_push_back(&row->table->track_list,
+ ovs_list_push_back(&row->table->track_list,
&row->track_node);
if (!row->updated) {
row->updated = bitmap_allocate(class->n_columns);
* freed.
*/
LIST_FOR_EACH_SAFE (arc, next, src_node, &row->src_arcs) {
- list_remove(&arc->dst_node);
+ ovs_list_remove(&arc->dst_node);
if (destroy_dsts
&& ovsdb_idl_row_is_orphan(arc->dst)
- && list_is_empty(&arc->dst->dst_arcs)) {
+ && ovs_list_is_empty(&arc->dst->dst_arcs)) {
ovsdb_idl_row_destroy(arc->dst);
}
free(arc);
}
- list_init(&row->src_arcs);
+ ovs_list_init(&row->src_arcs);
}
/* Force nodes that reference 'row' to reparse. */
{
struct ovsdb_idl_row *row = xzalloc(class->allocation_size);
class->row_init(row);
- list_init(&row->src_arcs);
- list_init(&row->dst_arcs);
+ ovs_list_init(&row->src_arcs);
+ ovs_list_init(&row->dst_arcs);
hmap_node_nullify(&row->txn_node);
- list_init(&row->track_node);
+ ovs_list_init(&row->track_node);
return row;
}
= row->table->change_seqno[OVSDB_IDL_CHANGE_DELETE]
= row->table->idl->change_seqno + 1;
}
- if (list_is_empty(&row->track_node)) {
- list_push_back(&row->table->track_list, &row->track_node);
+ if (ovs_list_is_empty(&row->track_node)) {
+ ovs_list_push_back(&row->table->track_list, &row->track_node);
}
}
}
for (i = 0; i < idl->class->n_tables; i++) {
struct ovsdb_idl_table *table = &idl->tables[i];
- if (!list_is_empty(&table->track_list)) {
+ if (!ovs_list_is_empty(&table->track_list)) {
struct ovsdb_idl_row *row, *next;
LIST_FOR_EACH_SAFE(row, next, track_node, &table->track_list) {
if (!ovsdb_idl_track_is_set(row->table)) {
- list_remove(&row->track_node);
+ ovs_list_remove(&row->track_node);
free(row);
}
}
ovsdb_idl_row_unparse(row);
ovsdb_idl_row_clear_arcs(row, true);
ovsdb_idl_row_clear_old(row);
- if (list_is_empty(&row->dst_arcs)) {
+ if (ovs_list_is_empty(&row->dst_arcs)) {
ovsdb_idl_row_destroy(row);
} else {
ovsdb_idl_row_reparse_backrefs(row);
* at 'src', since we add all of the arcs from a given source in a clump
* (in a single call to ovsdb_idl_row_parse()) and new arcs are always
* added at the front of the dst_arcs list. */
- if (list_is_empty(&dst->dst_arcs)) {
+ if (ovs_list_is_empty(&dst->dst_arcs)) {
return true;
}
arc = CONTAINER_OF(dst->dst_arcs.next, struct ovsdb_idl_arc, dst_node);
/* The arc *must* be added at the front of the dst_arcs list. See
* ovsdb_idl_row_reparse_backrefs() for details. */
arc = xmalloc(sizeof *arc);
- list_push_front(&src->src_arcs, &arc->src_node);
- list_push_front(&dst->dst_arcs, &arc->dst_node);
+ ovs_list_push_front(&src->src_arcs, &arc->src_node);
+ ovs_list_push_front(&dst->dst_arcs, &arc->dst_node);
arc->src = src;
arc->dst = dst;
}
p->name = xstrdup(slash ? slash + 1 : name);
p->exited = false;
- list_push_back(&all_processes, &p->node);
+ ovs_list_push_back(&all_processes, &p->node);
return p;
}
process_destroy(struct process *p)
{
if (p) {
- list_remove(&p->node);
+ ovs_list_remove(&p->node);
free(p->name);
free(p);
}
#ifndef _WIN32
char buf[_POSIX_PIPE_BUF];
- if (!list_is_empty(&all_processes) && read(fds[0], buf, sizeof buf) > 0) {
+ if (!ovs_list_is_empty(&all_processes) && read(fds[0], buf, sizeof buf) > 0) {
struct process *p;
LIST_FOR_EACH (p, node, &all_processes) {
rc->target = xstrdup("void");
rc->reliable = false;
- list_init(&rc->txq);
+ ovs_list_init(&rc->txq);
rc->backoff = 0;
rc->max_backoff = max_backoff ? max_backoff : 8;
do_tx_work(struct rconn *rc)
OVS_REQUIRES(rc->mutex)
{
- if (list_is_empty(&rc->txq)) {
+ if (ovs_list_is_empty(&rc->txq)) {
return;
}
- while (!list_is_empty(&rc->txq)) {
+ while (!ovs_list_is_empty(&rc->txq)) {
int error = try_send(rc);
if (error) {
break;
}
rc->last_activity = time_now();
}
- if (list_is_empty(&rc->txq)) {
+ if (ovs_list_is_empty(&rc->txq)) {
poll_immediate_wake();
}
}
ovs_mutex_lock(&rc->mutex);
if (rc->vconn) {
vconn_run_wait(rc->vconn);
- if ((rc->state & (S_ACTIVE | S_IDLE)) && !list_is_empty(&rc->txq)) {
+ if ((rc->state & (S_ACTIVE | S_IDLE)) && !ovs_list_is_empty(&rc->txq)) {
vconn_wait(rc->vconn, WAIT_SEND);
}
}
/* Reuse 'frame' as a private pointer while 'b' is in txq. */
b->header = counter;
- list_push_back(&rc->txq, &b->list_node);
+ ovs_list_push_back(&rc->txq, &b->list_node);
/* If the queue was empty before we added 'b', try to send some
* packets. (But if the queue had packets in it, it's because the
unsigned int len;
ovs_mutex_lock(&rc->mutex);
- len = list_size(&rc->txq);
+ len = ovs_list_size(&rc->txq);
ovs_mutex_unlock(&rc->mutex);
return len;
/* Eagerly remove 'msg' from the txq. We can't remove it from the list
* after sending, if sending is successful, because it is then owned by the
* vconn, which might have freed it already. */
- list_remove(&msg->list_node);
+ ovs_list_remove(&msg->list_node);
msg->header = NULL;
retval = vconn_send(rc->vconn, msg);
if (retval) {
msg->header = counter;
- list_push_front(&rc->txq, &msg->list_node);
+ ovs_list_push_front(&rc->txq, &msg->list_node);
if (retval != EAGAIN) {
report_error(rc, retval);
disconnect(rc, retval);
flush_queue(struct rconn *rc)
OVS_REQUIRES(rc->mutex)
{
- if (list_is_empty(&rc->txq)) {
+ if (ovs_list_is_empty(&rc->txq)) {
return;
}
- while (!list_is_empty(&rc->txq)) {
- struct ofpbuf *b = ofpbuf_from_list(list_pop_front(&rc->txq));
+ while (!ovs_list_is_empty(&rc->txq)) {
+ struct ofpbuf *b = ofpbuf_from_list(ovs_list_pop_front(&rc->txq));
struct rconn_packet_counter *counter = b->header;
if (counter) {
rconn_packet_counter_dec(counter, b->size);
* means that there should be not ports at this time. */
ovs_assert(hmap_is_empty(&rstp->ports));
- list_remove(&rstp->node);
+ ovs_list_remove(&rstp->node);
ovs_mutex_unlock(&rstp_mutex);
hmap_destroy(&rstp->ports);
free(rstp->name);
ovs_refcount_init(&rstp->ref_cnt);
- list_push_back(all_rstps, &rstp->node);
+ ovs_list_push_back(all_rstps, &rstp->node);
ovs_mutex_unlock(&rstp_mutex);
VLOG_DBG("RSTP instance creation done");
}
}
- list_push_back(&all_notifiers, ¬ifier->node);
+ ovs_list_push_back(&all_notifiers, ¬ifier->node);
notifier->cb = cb;
notifier->aux = aux;
OVS_EXCLUDED(rtbsd_mutex)
{
ovs_mutex_lock(&rtbsd_mutex);
- list_remove(¬ifier->node);
- if (list_is_empty(&all_notifiers)) {
+ ovs_list_remove(¬ifier->node);
+ if (ovs_list_is_empty(&all_notifiers)) {
close(notify_sock);
notify_sock = -1;
}
waiter->ovsthread_id = id;
waiter->value = value;
waiter->thread = seq_thread_get();
- list_push_back(&waiter->thread->waiters, &waiter->list_node);
+ ovs_list_push_back(&waiter->thread->waiters, &waiter->list_node);
if (!waiter->thread->waiting) {
latch_wait_at(&waiter->thread->latch, where);
struct seq_thread *thread = pthread_getspecific(seq_thread_key);
if (!thread) {
thread = xmalloc(sizeof *thread);
- list_init(&thread->waiters);
+ ovs_list_init(&thread->waiters);
latch_init(&thread->latch);
thread->waiting = false;
OVS_REQUIRES(seq_mutex)
{
hmap_remove(&waiter->seq->waiters, &waiter->hmap_node);
- list_remove(&waiter->list_node);
+ ovs_list_remove(&waiter->list_node);
free(waiter);
}
* To add an element to the queue:
*
* ovs_mutex_lock(&mutex);
- * list_push_back(&queue, ...element...);
- * if (list_is_singleton(&queue)) { // The 'if' test here is optional.
+ * ovs_list_push_back(&queue, ...element...);
+ * if (ovs_list_is_singleton(&queue)) { // The 'if' test here is optional.
* seq_change(&nonempty_seq);
* }
* ovs_mutex_unlock(&mutex);
* To wait for the queue to become nonempty:
*
* ovs_mutex_lock(&mutex);
- * if (list_is_empty(&queue)) {
+ * if (ovs_list_is_empty(&queue)) {
* seq_wait(&nonempty_seq, seq_read(&nonempty_seq));
* } else {
* poll_immediate_wake();
}
ovs_refcount_init(&stp->ref_cnt);
- list_push_back(all_stps, &stp->node);
+ ovs_list_push_back(all_stps, &stp->node);
ovs_mutex_unlock(&mutex);
return stp;
}
size_t i;
ovs_mutex_lock(&mutex);
- list_remove(&stp->node);
+ ovs_list_remove(&stp->node);
ovs_mutex_unlock(&mutex);
free(stp->name);
p->port = port;
p->udp_port = udp_port;
ovs_strlcpy(p->dev_name, dev_name, sizeof p->dev_name);
- list_insert(&port_list, &p->node);
+ ovs_list_insert(&port_list, &p->node);
LIST_FOR_EACH(ip_dev, node, &addr_list) {
map_insert_ipdev__(ip_dev, p->dev_name, p->port, p->udp_port);
ovs_mutex_lock(&mutex);
LIST_FOR_EACH_SAFE(p, next, node, &port_list) {
if (p->udp_port == udp_port) {
- list_remove(&p->node);
+ ovs_list_remove(&p->node);
found = true;
break;
}
ip_dev->addr = addr;
ip_dev->n_addr = n_addr;
ovs_strlcpy(ip_dev->dev_name, netdev_get_name(dev), sizeof ip_dev->dev_name);
- list_insert(&addr_list, &ip_dev->node);
+ ovs_list_insert(&addr_list, &ip_dev->node);
map_insert_ipdev(ip_dev);
return;
ipdev_map_delete(ip_dev, p->udp_port);
}
- list_remove(&ip_dev->node);
+ ovs_list_remove(&ip_dev->node);
netdev_close(ip_dev->dev);
free(ip_dev->addr);
free(ip_dev);
tnl_port_map_init(void)
{
classifier_init(&cls, flow_segment_u64s);
- list_init(&addr_list);
- list_init(&port_list);
+ ovs_list_init(&addr_list);
+ ovs_list_init(&port_list);
unixctl_command_register("tnl/ports/show", "-v", 0, 1, tnl_port_show, NULL);
}
ttr->max_option_space = TUN_METADATA_TOT_OPT_SIZE;
ttr->max_fields = TUN_METADATA_NUM_OPTS;
- list_init(&ttr->mappings);
+ ovs_list_init(&ttr->mappings);
for (i = 0; i < TUN_METADATA_NUM_OPTS; i++) {
struct tun_meta_entry *entry = &map->entries[i];
map->option_len = entry->loc.len;
map->index = i;
- list_push_back(&ttr->mappings, &map->list_node);
+ ovs_list_push_back(&ttr->mappings, &map->list_node);
}
}
server = xmalloc(sizeof *server);
server->listener = listener;
- list_init(&server->conns);
+ ovs_list_init(&server->conns);
*serverp = server;
exit:
static void
kill_connection(struct unixctl_conn *conn)
{
- list_remove(&conn->node);
+ ovs_list_remove(&conn->node);
jsonrpc_close(conn->rpc);
json_destroy(conn->request_id);
free(conn);
error = pstream_accept(server->listener, &stream);
if (!error) {
struct unixctl_conn *conn = xzalloc(sizeof *conn);
- list_push_back(&server->conns, &conn->node);
+ ovs_list_push_back(&server->conns, &conn->node);
conn->rpc = jsonrpc_open(stream);
} else if (error == EAGAIN) {
break;
vlog_insert_module(struct ovs_list *vlog)
{
ovs_mutex_lock(&log_file_mutex);
- list_insert(&vlog_modules, vlog);
+ ovs_list_insert(&vlog_modules, vlog);
ovs_mutex_unlock(&log_file_mutex);
}
bond = xzalloc(sizeof *bond);
bond->ofproto = ofproto;
hmap_init(&bond->slaves);
- list_init(&bond->enabled_slaves);
+ ovs_list_init(&bond->enabled_slaves);
ovs_mutex_init(&bond->mutex);
ovs_refcount_init(&bond->ref_cnt);
hmap_init(&bond->pr_rule_ops);
if (!slave->enabled) {
ds_put_cstr(&ds, " (disabled)");
}
- if (!list_is_empty(&slave->entries)) {
+ if (!ovs_list_is_empty(&slave->entries)) {
struct bond_entry *e;
ds_put_cstr(&ds, " (");
LIST_FOR_EACH (e, list_node, &slave->entries) {
- if (&e->list_node != list_front(&slave->entries)) {
+ if (&e->list_node != ovs_list_front(&slave->entries)) {
ds_put_cstr(&ds, " + ");
}
ds_put_format(&ds, "h%"PRIdPTR": %"PRIu64"kB",
{
struct bond_entry *e;
- if (list_is_short(&from->entries)) {
+ if (ovs_list_is_short(&from->entries)) {
/* 'from' carries no more than one MAC hash, so shifting load away from
* it would be pointless. */
return NULL;
break;
}
}
- list_insert(&pos->bal_node, &slave->bal_node);
+ ovs_list_insert(&pos->bal_node, &slave->bal_node);
}
/* Removes 'slave' from its current list and then inserts it into 'bals' so
static void
reinsert_bal(struct ovs_list *bals, struct bond_slave *slave)
{
- list_remove(&slave->bal_node);
+ ovs_list_remove(&slave->bal_node);
insert_bal(bals, slave);
}
* Compute each slave's tx_bytes as the sum of its entries' tx_bytes. */
HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
slave->tx_bytes = 0;
- list_init(&slave->entries);
+ ovs_list_init(&slave->entries);
}
for (e = &bond->hash[0]; e <= &bond->hash[BOND_MASK]; e++) {
if (e->slave && e->tx_bytes) {
e->slave->tx_bytes += e->tx_bytes;
- list_push_back(&e->slave->entries, &e->list_node);
+ ovs_list_push_back(&e->slave->entries, &e->list_node);
}
}
*
* XXX This is O(n**2) in the number of slaves but it could be O(n lg n)
* with a proper list sort algorithm. */
- list_init(&bals);
+ ovs_list_init(&bals);
HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
if (slave->enabled) {
insert_bal(&bals, slave);
log_bals(bond, &bals);
/* Shift load from the most-loaded slaves to the least-loaded slaves. */
- while (!list_is_short(&bals)) {
- struct bond_slave *from = bond_slave_from_bal_node(list_front(&bals));
- struct bond_slave *to = bond_slave_from_bal_node(list_back(&bals));
+ while (!ovs_list_is_short(&bals)) {
+ struct bond_slave *from = bond_slave_from_bal_node(ovs_list_front(&bals));
+ struct bond_slave *to = bond_slave_from_bal_node(ovs_list_back(&bals));
uint64_t overload;
overload = from->tx_bytes - to->tx_bytes;
* We don't add the element to to->hashes. That would only allow
* 'e' to be migrated to another slave in this rebalancing run, and
* there is no point in doing that. */
- list_remove(&e->list_node);
+ ovs_list_remove(&e->list_node);
/* Re-sort 'bals'. */
reinsert_bal(&bals, from);
} else {
/* Can't usefully migrate anything away from 'from'.
* Don't reconsider it. */
- list_remove(&from->bal_node);
+ ovs_list_remove(&from->bal_node);
}
}
ovs_mutex_lock(&slave->bond->mutex);
if (enable) {
- list_insert(&slave->bond->enabled_slaves, &slave->list_node);
+ ovs_list_insert(&slave->bond->enabled_slaves, &slave->list_node);
} else {
- list_remove(&slave->list_node);
+ ovs_list_remove(&slave->list_node);
}
ovs_mutex_unlock(&slave->bond->mutex);
struct ovs_list *node;
ovs_mutex_lock(&bond->mutex);
- if (list_is_empty(&bond->enabled_slaves)) {
+ if (ovs_list_is_empty(&bond->enabled_slaves)) {
ovs_mutex_unlock(&bond->mutex);
return NULL;
}
- node = list_pop_front(&bond->enabled_slaves);
- list_push_back(&bond->enabled_slaves, node);
+ node = ovs_list_pop_front(&bond->enabled_slaves);
+ ovs_list_push_back(&bond->enabled_slaves, node);
ovs_mutex_unlock(&bond->mutex);
return CONTAINER_OF(node, struct bond_slave, list_node);
bundle->flags = flags;
bundle->state = BS_OPEN;
- list_init(&bundle->msg_list);
+ ovs_list_init(&bundle->msg_list);
return bundle;
}
return OFPERR_OFPBFC_BAD_FLAGS;
}
- list_push_back(&bundle->msg_list, &bmsg->node);
+ ovs_list_push_back(&bundle->msg_list, &bmsg->node);
return 0;
}
mgr->local_port_name = xstrdup(local_port_name);
hmap_init(&mgr->controllers);
- list_init(&mgr->all_conns);
+ ovs_list_init(&mgr->all_conns);
mgr->master_election_id = 0;
mgr->master_election_id_defined = false;
ofconn = xzalloc(sizeof *ofconn);
ofconn->connmgr = mgr;
- list_push_back(&mgr->all_conns, &ofconn->node);
+ ovs_list_push_back(&mgr->all_conns, &ofconn->node);
ofconn->rconn = rconn;
ofconn->type = type;
ofconn->enable_async_msgs = enable_async_msgs;
hmap_init(&ofconn->monitors);
- list_init(&ofconn->updates);
+ ovs_list_init(&ofconn->updates);
hmap_init(&ofconn->bundles);
hmap_destroy(&ofconn->bundles);
hmap_destroy(&ofconn->monitors);
- list_remove(&ofconn->node);
+ ovs_list_remove(&ofconn->node);
rconn_destroy(ofconn->rconn);
rconn_packet_counter_destroy(ofconn->packet_in_counter);
rconn_packet_counter_destroy(ofconn->reply_counter);
}
if (flags) {
- if (list_is_empty(&ofconn->updates)) {
+ if (ovs_list_is_empty(&ofconn->updates)) {
ofputil_start_flow_update(&ofconn->updates);
ofconn->sent_abbrev_update = false;
}
ofmonitor_collect_resume_rules(m, ofconn->monitor_paused, &rules);
}
- list_init(&msgs);
+ ovs_list_init(&msgs);
ofmonitor_compose_refresh_updates(&rules, &msgs);
resumed = ofpraw_alloc_xid(OFPRAW_NXT_FLOW_MONITOR_RESUMED, OFP10_VERSION,
htonl(0), 0);
- list_push_back(&msgs, &resumed->list_node);
+ ovs_list_push_back(&msgs, &resumed->list_node);
ofconn_send_replies(ofconn, &msgs);
ofconn->monitor_paused = 0;
exporter->seq_number = 1;
exporter->last_template_set_time = TIME_MIN;
hmap_init(&exporter->cache_flow_key_map);
- list_init(&exporter->cache_flow_start_timestamp_list);
+ ovs_list_init(&exporter->cache_flow_start_timestamp_list);
exporter->cache_active_timeout = 0;
exporter->cache_max_flows = 0;
}
/* As the latest entry added into the cache, it should
* logically have the highest flow_start_timestamp_usec, so
* append it at the tail. */
- list_push_back(&exporter->cache_flow_start_timestamp_list,
+ ovs_list_push_back(&exporter->cache_flow_start_timestamp_list,
&entry->cache_flow_start_timestamp_list_node);
/* Enforce exporter->cache_max_flows limit. */
bool template_msg_sent = false;
enum ipfix_flow_end_reason flow_end_reason;
- if (list_is_empty(&exporter->cache_flow_start_timestamp_list)) {
+ if (ovs_list_is_empty(&exporter->cache_flow_start_timestamp_list)) {
return;
}
break;
}
- list_remove(&entry->cache_flow_start_timestamp_list_node);
+ ovs_list_remove(&entry->cache_flow_start_timestamp_list_node);
hmap_remove(&exporter->cache_flow_key_map,
&entry->flow_key_map_node);
next_id = 1; /* 0 is not a valid ID. */
cmap_init(&id_map);
cmap_init(&metadata_map);
- list_init(&expiring);
- list_init(&expired);
+ ovs_list_init(&expiring);
+ ovs_list_init(&expired);
ovs_mutex_unlock(&mutex);
ovsthread_once_done(&once);
ovsrcu_postpone(recirc_id_node_free, node);
}
- if (!list_is_empty(&expiring)) {
+ if (!ovs_list_is_empty(&expiring)) {
/* 'expired' is now empty, move nodes in 'expiring' to it. */
- list_splice(&expired, list_front(&expiring), &expiring);
+ ovs_list_splice(&expired, ovs_list_front(&expiring), &expiring);
}
}
ovs_mutex_unlock(&mutex);
cmap_remove(&metadata_map, &node->metadata_node, node->hash);
/* We keep the node in the 'id_map' so that it can be found as long
* as it lingers, and add it to the 'expiring' list. */
- list_insert(&expiring, &node->exp_node);
+ ovs_list_insert(&expiring, &node->exp_node);
ovs_mutex_unlock(&mutex);
}
}
udpif->dump_seq = seq_create();
latch_init(&udpif->exit_latch);
latch_init(&udpif->pause_latch);
- list_push_back(&all_udpifs, &udpif->list_node);
+ ovs_list_push_back(&all_udpifs, &udpif->list_node);
atomic_init(&udpif->enable_ufid, false);
atomic_init(&udpif->n_flows, 0);
atomic_init(&udpif->n_flows_timestamp, LLONG_MIN);
free(udpif->ukeys);
udpif->ukeys = NULL;
- list_remove(&udpif->list_node);
+ ovs_list_remove(&udpif->list_node);
latch_destroy(&udpif->exit_latch);
latch_destroy(&udpif->pause_latch);
seq_destroy(udpif->reval_seq);
const char *argv[] OVS_UNUSED,
void *aux OVS_UNUSED)
{
- if (list_is_singleton(&all_udpifs)) {
+ if (ovs_list_is_singleton(&all_udpifs)) {
struct udpif *udpif = NULL;
size_t len;
- udpif = OBJECT_CONTAINING(list_front(&all_udpifs), udpif, list_node);
+ udpif = OBJECT_CONTAINING(ovs_list_front(&all_udpifs), udpif, list_node);
len = (udpif->n_conns + 1) * sizeof *udpif->conns;
udpif->conn_seq = seq_read(udpif->dump_seq);
udpif->conns = xrealloc(udpif->conns, len);
static void
xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
{
- list_init(&xbridge->xbundles);
+ ovs_list_init(&xbridge->xbundles);
hmap_init(&xbridge->xports);
hmap_insert(&xcfg->xbridges, &xbridge->hmap_node,
hash_pointer(xbridge->ofproto, 0));
static void
xlate_xbundle_init(struct xlate_cfg *xcfg, struct xbundle *xbundle)
{
- list_init(&xbundle->xports);
- list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
+ ovs_list_init(&xbundle->xports);
+ ovs_list_insert(&xbundle->xbridge->xbundles, &xbundle->list_node);
hmap_insert(&xcfg->xbundles, &xbundle->hmap_node,
hash_pointer(xbundle->ofbundle, 0));
}
if (xbundle) {
new_xport->xbundle = xbundle;
- list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
+ ovs_list_insert(&new_xport->xbundle->xports, &new_xport->bundle_node);
}
HMAP_FOR_EACH (pdscp, hmap_node, &xport->skb_priorities) {
}
hmap_remove(&xcfg->xbundles, &xbundle->hmap_node);
- list_remove(&xbundle->list_node);
+ ovs_list_remove(&xbundle->list_node);
bond_unref(xbundle->bond);
lacp_unref(xbundle->lacp);
free(xbundle->name);
}
if (xport->xbundle) {
- list_remove(&xport->bundle_node);
+ ovs_list_remove(&xport->bundle_node);
}
xport->xbundle = xbundle_lookup(new_xcfg, ofbundle);
if (xport->xbundle) {
- list_insert(&xport->xbundle->xports, &xport->bundle_node);
+ ovs_list_insert(&xport->xbundle->xports, &xport->bundle_node);
}
clear_skb_priorities(xport);
}
if (xport->xbundle) {
- list_remove(&xport->bundle_node);
+ ovs_list_remove(&xport->bundle_node);
}
clear_skb_priorities(xport);
bool use_recirc = false;
vid = output_vlan_to_vid(out_xbundle, vlan);
- if (list_is_empty(&out_xbundle->xports)) {
+ if (ovs_list_is_empty(&out_xbundle->xports)) {
/* Partially configured bundle with no slaves. Drop the packet. */
return;
} else if (!out_xbundle->bond) {
- xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
+ xport = CONTAINER_OF(ovs_list_front(&out_xbundle->xports), struct xport,
bundle_node);
} else {
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
/* Loop through the ports already on the datapath and remove any
* that we don't need anymore. */
- list_init(&garbage_list);
+ ovs_list_init(&garbage_list);
dpif_port_dump_start(&port_dump, backer->dpif);
while (dpif_port_dump_next(&port_dump, &port)) {
node = shash_find(&init_ofp_ports, port.name);
if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) {
garbage = xmalloc(sizeof *garbage);
garbage->odp_port = port.port_no;
- list_push_front(&garbage_list, &garbage->list_node);
+ ovs_list_push_front(&garbage_list, &garbage->list_node);
}
}
dpif_port_dump_done(&port_dump);
bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
- list_remove(&port->bundle_node);
+ ovs_list_remove(&port->bundle_node);
port->bundle = NULL;
if (bundle->lacp) {
}
port->bundle = bundle;
- list_push_back(&bundle->ports, &port->bundle_node);
+ ovs_list_push_back(&bundle->ports, &port->bundle_node);
if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
|| port->is_layer3
|| (bundle->ofproto->stp && !stp_forward_in_state(port->stp_state))
bundle->aux = aux;
bundle->name = NULL;
- list_init(&bundle->ports);
+ ovs_list_init(&bundle->ports);
bundle->vlan_mode = PORT_VLAN_TRUNK;
bundle->vlan = -1;
bundle->trunks = NULL;
ok = false;
}
}
- if (!ok || list_size(&bundle->ports) != s->n_slaves) {
+ if (!ok || ovs_list_size(&bundle->ports) != s->n_slaves) {
struct ofport_dpif *next_port;
LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
found: ;
}
}
- ovs_assert(list_size(&bundle->ports) <= s->n_slaves);
+ ovs_assert(ovs_list_size(&bundle->ports) <= s->n_slaves);
- if (list_is_empty(&bundle->ports)) {
+ if (ovs_list_is_empty(&bundle->ports)) {
bundle_destroy(bundle);
return EINVAL;
}
}
/* Bonding. */
- if (!list_is_short(&bundle->ports)) {
+ if (!ovs_list_is_short(&bundle->ports)) {
bundle->ofproto->has_bonded_bundles = true;
if (bundle->bond) {
if (bond_reconfigure(bundle->bond, s->bond)) {
if (bundle) {
bundle_del_port(port);
- if (list_is_empty(&bundle->ports)) {
+ if (ovs_list_is_empty(&bundle->ports)) {
bundle_destroy(bundle);
- } else if (list_is_short(&bundle->ports)) {
+ } else if (ovs_list_is_short(&bundle->ports)) {
bond_unref(bundle->bond);
bundle->bond = NULL;
}
} *pkt_node;
struct ovs_list packets;
- list_init(&packets);
+ ovs_list_init(&packets);
ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
if (mac_entry_get_port(ofproto->ml, e) != bundle) {
pkt_node->pkt = bond_compose_learning_packet(bundle->bond,
e->mac, e->vlan,
(void **)&pkt_node->port);
- list_push_back(&packets, &pkt_node->list_node);
+ ovs_list_push_back(&packets, &pkt_node->list_node);
}
}
ovs_rwlock_unlock(&ofproto->ml->rwlock);
static struct ofport_dpif *
ofbundle_get_a_port(const struct ofbundle *bundle)
{
- return CONTAINER_OF(list_front(&bundle->ports), struct ofport_dpif,
+ return CONTAINER_OF(ovs_list_front(&bundle->ports), struct ofport_dpif,
bundle_node);
}
ofproto->tables_version = CLS_MIN_VERSION;
hindex_init(&ofproto->cookies);
hmap_init(&ofproto->learned_cookies);
- list_init(&ofproto->expirable);
+ ovs_list_init(&ofproto->expirable);
ofproto->connmgr = connmgr_create(ofproto, datapath_name, datapath_name);
guarded_list_init(&ofproto->rule_executes);
ofproto->vlan_bitmap = NULL;
rule_execute_destroy(struct rule_execute *e)
{
ofproto_rule_unref(e->rule);
- list_remove(&e->list_node);
+ ovs_list_remove(&e->list_node);
free(e);
}
if (!c->n) {
hmap_remove(&ofproto->learned_cookies, &c->u.hmap_node);
- list_push_back(dead_cookies, &c->u.list_node);
+ ovs_list_push_back(dead_cookies, &c->u.list_node);
}
return;
rule->flags = fm->flags & OFPUTIL_FF_STATE;
*CONST_CAST(const struct rule_actions **, &rule->actions)
= rule_actions_create(fm->ofpacts, fm->ofpacts_len);
- list_init(&rule->meter_list_node);
+ ovs_list_init(&rule->meter_list_node);
rule->eviction_group = NULL;
- list_init(&rule->expirable);
+ ovs_list_init(&rule->expirable);
rule->monitor_flags = 0;
rule->add_seqno = 0;
rule->modify_seqno = 0;
}
ovs_mutex_lock(&ofproto_mutex);
- if (list_is_empty(&rule->expirable)) {
- list_insert(&rule->ofproto->expirable, &rule->expirable);
+ if (ovs_list_is_empty(&rule->expirable)) {
+ ovs_list_insert(&rule->ofproto->expirable, &rule->expirable);
}
ovs_mutex_unlock(&ofproto_mutex);
fu.ofpacts = actions ? actions->ofpacts : NULL;
fu.ofpacts_len = actions ? actions->ofpacts_len : 0;
- if (list_is_empty(msgs)) {
+ if (ovs_list_is_empty(msgs)) {
ofputil_start_flow_update(msgs);
}
ofputil_append_flow_update(&fu, msgs);
uint32_t meter_id = ofpacts_get_meter(a->ofpacts, a->ofpacts_len);
struct meter *meter = rule->ofproto->meters[meter_id];
- list_insert(&meter->rules, &rule->meter_list_node);
+ ovs_list_insert(&meter->rules, &rule->meter_list_node);
}
static void
meter = xzalloc(sizeof *meter);
meter->provider_meter_id = provider_meter_id;
meter->created = time_msec();
- list_init(&meter->rules);
+ ovs_list_init(&meter->rules);
meter_update(meter, config);
ovs_mutex_lock(&ofproto_mutex);
for (meter_id = first; meter_id <= last; ++meter_id) {
struct meter *meter = ofproto->meters[meter_id];
- if (meter && !list_is_empty(&meter->rules)) {
+ if (meter && !ovs_list_is_empty(&meter->rules)) {
struct rule *rule;
LIST_FOR_EACH (rule, meter_list_node, &meter->rules) {
stats.meter_id = meter_id;
/* Provider sets the packet and byte counts, we do the rest. */
- stats.flow_count = list_size(&meter->rules);
+ stats.flow_count = ovs_list_size(&meter->rules);
calc_duration(meter->created, time_msec(),
&stats.duration_sec, &stats.duration_nsec);
stats.n_bands = meter->n_bands;
*CONST_CAST(long long int *, &((*ofgroup)->modified)) = now;
ovs_refcount_init(&(*ofgroup)->ref_count);
- list_init(&(*ofgroup)->buckets);
+ ovs_list_init(&(*ofgroup)->buckets);
ofputil_bucket_clone_list(&(*ofgroup)->buckets, &gm->buckets, NULL);
*CONST_CAST(uint32_t *, &(*ofgroup)->n_buckets) =
- list_size(&(*ofgroup)->buckets);
+ ovs_list_size(&(*ofgroup)->buckets);
memcpy(CONST_CAST(struct ofputil_group_props *, &(*ofgroup)->props),
&gm->props, sizeof (struct ofputil_group_props));
return OFPERR_OFPGMFC_UNKNOWN_BUCKET;
}
- if (!list_is_empty(&new_ofgroup->buckets)) {
+ if (!ovs_list_is_empty(&new_ofgroup->buckets)) {
last = ofputil_bucket_list_back(&new_ofgroup->buckets);
}
}
/* Rearrange list according to command_bucket_id */
if (command_bucket_id == OFPG15_BUCKET_LAST) {
- if (!list_is_empty(&ofgroup->buckets)) {
+ if (!ovs_list_is_empty(&ofgroup->buckets)) {
struct ofputil_bucket *new_first;
const struct ofputil_bucket *first;
new_first = ofputil_bucket_find(&new_ofgroup->buckets,
first->bucket_id);
- list_splice(new_ofgroup->buckets.next, &new_first->list_node,
+ ovs_list_splice(new_ofgroup->buckets.next, &new_first->list_node,
&new_ofgroup->buckets);
}
} else if (command_bucket_id <= OFPG15_BUCKET_MAX && last) {
/* Presence of bucket is checked above so after should never be NULL */
after = ofputil_bucket_find(&new_ofgroup->buckets, command_bucket_id);
- list_splice(after->list_node.next, new_ofgroup->buckets.next,
+ ovs_list_splice(after->list_node.next, new_ofgroup->buckets.next,
last->list_node.next);
}
}
if (command_bucket_id == OFPG15_BUCKET_FIRST) {
- if (!list_is_empty(&ofgroup->buckets)) {
+ if (!ovs_list_is_empty(&ofgroup->buckets)) {
skip = ofputil_bucket_list_front(&ofgroup->buckets);
}
} else if (command_bucket_id == OFPG15_BUCKET_LAST) {
- if (!list_is_empty(&ofgroup->buckets)) {
+ if (!ovs_list_is_empty(&ofgroup->buckets)) {
skip = ofputil_bucket_list_back(&ofgroup->buckets);
}
} else {
ovs_assert(rule->removed);
if (rule->hard_timeout || rule->idle_timeout) {
- list_insert(&ofproto->expirable, &rule->expirable);
+ ovs_list_insert(&ofproto->expirable, &rule->expirable);
}
cookies_insert(ofproto, rule);
eviction_group_add_rule(rule);
cookies_remove(ofproto, rule);
eviction_group_remove_rule(rule);
- if (!list_is_empty(&rule->expirable)) {
- list_remove(&rule->expirable);
+ if (!ovs_list_is_empty(&rule->expirable)) {
+ ovs_list_remove(&rule->expirable);
}
- if (!list_is_empty(&rule->meter_list_node)) {
- list_remove(&rule->meter_list_node);
- list_init(&rule->meter_list_node);
+ if (!ovs_list_is_empty(&rule->meter_list_node)) {
+ ovs_list_remove(&rule->meter_list_node);
+ ovs_list_init(&rule->meter_list_node);
}
rule->removed = true;
static struct ofpbuf *
dequeue_packet(struct pinsched *ps, struct pinqueue *q)
{
- struct ofpbuf *packet = ofpbuf_from_list(list_pop_front(&q->packets));
+ struct ofpbuf *packet = ofpbuf_from_list(ovs_list_pop_front(&q->packets));
q->n--;
ps->n_queued--;
return packet;
q = xmalloc(sizeof *q);
hmap_insert(&ps->queues, &q->node, hash);
q->port_no = port_no;
- list_init(&q->packets);
+ ovs_list_init(&q->packets);
q->n = 0;
return q;
}
pinsched_send(struct pinsched *ps, ofp_port_t port_no,
struct ofpbuf *packet, struct ovs_list *txq)
{
- list_init(txq);
+ ovs_list_init(txq);
if (!ps) {
- list_push_back(txq, &packet->list_node);
+ ovs_list_push_back(txq, &packet->list_node);
} else if (!ps->n_queued && get_token(ps)) {
/* In the common case where we are not constrained by the rate limit,
* let the packet take the normal path. */
ps->n_normal++;
- list_push_back(txq, &packet->list_node);
+ ovs_list_push_back(txq, &packet->list_node);
} else {
/* Otherwise queue it up for the periodic callback to drain out. */
if (ps->n_queued * 1000 >= ps->token_bucket.burst) {
}
struct pinqueue *q = pinqueue_get(ps, port_no);
- list_push_back(&q->packets, &packet->list_node);
+ ovs_list_push_back(&q->packets, &packet->list_node);
q->n++;
ps->n_queued++;
ps->n_limited++;
void
pinsched_run(struct pinsched *ps, struct ovs_list *txq)
{
- list_init(txq);
+ ovs_list_init(txq);
if (ps) {
int i;
* number of iterations to allow other code to get work done too. */
for (i = 0; ps->n_queued && get_token(ps) && i < 50; i++) {
struct ofpbuf *packet = get_tx_packet(ps);
- list_push_back(txq, &packet->list_node);
+ ovs_list_push_back(txq, &packet->list_node);
}
}
}
struct ofputil_tlv_table_mod ttm;
ttm.command = NXTTMC_ADD;
- list_init(&ttm.mappings);
- list_push_back(&ttm.mappings, &tm.list_node);
+ ovs_list_init(&ttm.mappings);
+ ovs_list_push_back(&ttm.mappings, &tm.list_node);
xid = queue_msg(ofputil_encode_tlv_table_mod(OFP13_VERSION, &ttm));
xid2 = queue_msg(ofputil_encode_barrier_request(OFP13_VERSION));
{
struct expr *e = xmalloc(sizeof *e);
e->type = type;
- list_init(&e->andor);
+ ovs_list_init(&e->andor);
return e;
}
return a;
} else if (a->type == type) {
if (b->type == type) {
- list_splice(&a->andor, b->andor.next, &b->andor);
+ ovs_list_splice(&a->andor, b->andor.next, &b->andor);
free(b);
} else {
- list_push_back(&a->andor, &b->node);
+ ovs_list_push_back(&a->andor, &b->node);
}
return a;
} else if (b->type == type) {
- list_push_front(&b->andor, &a->node);
+ ovs_list_push_front(&b->andor, &a->node);
return b;
} else {
struct expr *e = expr_create_andor(type);
- list_push_back(&e->andor, &a->node);
- list_push_back(&e->andor, &b->node);
+ ovs_list_push_back(&e->andor, &a->node);
+ ovs_list_push_back(&e->andor, &b->node);
return e;
}
}
if (andor->type == EXPR_T_AND) {
/* Conjunction junction, what's your function? */
}
- list_splice(&before->node, new->andor.next, &new->andor);
+ ovs_list_splice(&before->node, new->andor.next, &new->andor);
free(new);
} else {
- list_insert(&before->node, &new->node);
+ ovs_list_insert(&before->node, &new->node);
}
}
expr_destroy(expr);
return expr_create_boolean(short_circuit);
} else {
- list_remove(&sub->node);
+ ovs_list_remove(&sub->node);
expr_destroy(sub);
}
}
}
- if (list_is_short(&expr->andor)) {
- if (list_is_empty(&expr->andor)) {
+ if (ovs_list_is_short(&expr->andor)) {
+ if (ovs_list_is_empty(&expr->andor)) {
free(expr);
return expr_create_boolean(!short_circuit);
} else {
- sub = expr_from_node(list_front(&expr->andor));
+ sub = expr_from_node(ovs_list_front(&expr->andor));
free(expr);
return sub;
}
LIST_FOR_EACH (sub, node, &expr->andor) {
struct expr *new_sub = expr_clone(sub);
- list_push_back(&new->andor, &new_sub->node);
+ ovs_list_push_back(&new->andor, &new_sub->node);
}
return new;
}
case EXPR_T_AND:
case EXPR_T_OR:
LIST_FOR_EACH_SAFE (sub, next, node, &expr->andor) {
- list_remove(&sub->node);
+ ovs_list_remove(&sub->node);
expr_destroy(sub);
}
break;
struct annotation_nesting an;
an.symbol = symbol;
- list_push_back(nesting, &an.node);
+ ovs_list_push_back(nesting, &an.node);
struct expr *prereqs = NULL;
if (symbol->prereqs) {
}
}
- list_remove(&an.node);
+ ovs_list_remove(&an.node);
return prereqs ? expr_combine(EXPR_T_AND, expr, prereqs) : expr;
error:
expr_destroy(expr);
expr_destroy(prereqs);
- list_remove(&an.node);
+ ovs_list_remove(&an.node);
return NULL;
}
struct expr *sub, *next;
LIST_FOR_EACH_SAFE (sub, next, node, &expr->andor) {
- list_remove(&sub->node);
+ ovs_list_remove(&sub->node);
struct expr *new_sub = expr_annotate__(sub, symtab,
nesting, errorp);
if (!new_sub) {
case EXPR_T_AND:
case EXPR_T_OR:
LIST_FOR_EACH_SAFE (sub, next, node, &expr->andor) {
- list_remove(&sub->node);
+ ovs_list_remove(&sub->node);
expr_insert_andor(expr, next, expr_simplify(sub));
}
return expr_fix(expr);
enum expr_type b_type = a->expr->type;
return a_type < b_type ? -1 : a_type > b_type;
} else if (a->type == EXPR_T_AND || a->type == EXPR_T_OR) {
- size_t a_len = list_size(&a->expr->andor);
- size_t b_len = list_size(&b->expr->andor);
+ size_t a_len = ovs_list_size(&a->expr->andor);
+ size_t b_len = ovs_list_size(&b->expr->andor);
return a_len < b_len ? -1 : a_len > b_len;
} else {
return 0;
static struct expr *
crush_and_string(struct expr *expr, const struct expr_symbol *symbol)
{
- ovs_assert(!list_is_short(&expr->andor));
+ ovs_assert(!ovs_list_is_short(&expr->andor));
struct expr *singleton = NULL;
* EXPR_T_OR with EXPR_T_CMP subexpressions. */
struct expr *sub, *next = NULL;
LIST_FOR_EACH_SAFE (sub, next, node, &expr->andor) {
- list_remove(&sub->node);
+ ovs_list_remove(&sub->node);
struct expr *new = crush_cmps(sub, symbol);
switch (new->type) {
case EXPR_T_CMP:
if (!singleton) {
- list_insert(&next->node, &new->node);
+ ovs_list_insert(&next->node, &new->node);
singleton = new;
} else {
bool match = !strcmp(new->cmp.string, singleton->cmp.string);
case EXPR_T_AND:
OVS_NOT_REACHED();
case EXPR_T_OR:
- list_insert(&next->node, &new->node);
+ ovs_list_insert(&next->node, &new->node);
break;
case EXPR_T_BOOLEAN:
if (!new->boolean) {
return expr_create_boolean(false);
}
}
- list_remove(&singleton->node);
+ ovs_list_remove(&singleton->node);
expr_destroy(expr);
return singleton;
}
sub->type = EXPR_T_CMP;
sub->cmp.symbol = symbol;
sub->cmp.string = xstrdup(string);
- list_push_back(&expr->andor, &sub->node);
+ ovs_list_push_back(&expr->andor, &sub->node);
}
sset_destroy(&result);
return expr_fix(expr);
static struct expr *
crush_and_numeric(struct expr *expr, const struct expr_symbol *symbol)
{
- ovs_assert(!list_is_short(&expr->andor));
+ ovs_assert(!ovs_list_is_short(&expr->andor));
union mf_subvalue value, mask;
memset(&value, 0, sizeof value);
struct expr *sub, *next = NULL;
LIST_FOR_EACH_SAFE (sub, next, node, &expr->andor) {
- list_remove(&sub->node);
+ ovs_list_remove(&sub->node);
struct expr *new = crush_cmps(sub, symbol);
switch (new->type) {
case EXPR_T_CMP:
case EXPR_T_AND:
OVS_NOT_REACHED();
case EXPR_T_OR:
- list_insert(&next->node, &new->node);
+ ovs_list_insert(&next->node, &new->node);
break;
case EXPR_T_BOOLEAN:
if (!new->boolean) {
break;
}
}
- if (list_is_empty(&expr->andor)) {
+ if (ovs_list_is_empty(&expr->andor)) {
if (is_all_zeros(&mask, sizeof mask)) {
expr_destroy(expr);
return expr_create_boolean(true);
expr_destroy(expr);
return cmp;
}
- } else if (list_is_short(&expr->andor)) {
+ } else if (ovs_list_is_short(&expr->andor)) {
/* Transform "a && (b || c || d)" into "ab || ac || ad" where "ab" is
* computed as "a && b", etc. */
- struct expr *disjuncts = expr_from_node(list_pop_front(&expr->andor));
+ struct expr *disjuncts = expr_from_node(ovs_list_pop_front(&expr->andor));
struct expr *or;
or = xmalloc(sizeof *or);
or->type = EXPR_T_OR;
- list_init(&or->andor);
+ ovs_list_init(&or->andor);
ovs_assert(disjuncts->type == EXPR_T_OR);
LIST_FOR_EACH_SAFE (sub, next, node, &disjuncts->andor) {
ovs_assert(sub->type == EXPR_T_CMP);
- list_remove(&sub->node);
+ ovs_list_remove(&sub->node);
if (mf_subvalue_intersect(&value, &mask,
&sub->cmp.value, &sub->cmp.mask,
&sub->cmp.value, &sub->cmp.mask)) {
- list_push_back(&or->andor, &sub->node);
+ ovs_list_push_back(&or->andor, &sub->node);
} else {
expr_destroy(sub);
}
}
free(disjuncts);
free(expr);
- if (list_is_empty(&or->andor)) {
+ if (ovs_list_is_empty(&or->andor)) {
free(or);
return expr_create_boolean(false);
- } else if (list_is_short(&or->andor)) {
- struct expr *cmp = expr_from_node(list_pop_front(&or->andor));
+ } else if (ovs_list_is_short(&or->andor)) {
+ struct expr *cmp = expr_from_node(ovs_list_pop_front(&or->andor));
free(or);
return cmp;
} else {
} else {
/* Transform "x && (a0 || a1) && (b0 || b1) && ..." into
* "(xa0b0 || xa0b1 || xa1b0 || xa1b1) && ...". */
- struct expr *as = expr_from_node(list_pop_front(&expr->andor));
- struct expr *bs = expr_from_node(list_pop_front(&expr->andor));
+ struct expr *as = expr_from_node(ovs_list_pop_front(&expr->andor));
+ struct expr *bs = expr_from_node(ovs_list_pop_front(&expr->andor));
struct expr *new = NULL;
struct expr *or;
or = xmalloc(sizeof *or);
or->type = EXPR_T_OR;
- list_init(&or->andor);
+ ovs_list_init(&or->andor);
struct expr *a;
LIST_FOR_EACH (a, node, &as->andor) {
if (mf_subvalue_intersect(&a_value, &a_mask,
&b->cmp.value, &b->cmp.mask,
&new->cmp.value, &new->cmp.mask)) {
- list_push_back(&or->andor, &new->node);
+ ovs_list_push_back(&or->andor, &new->node);
new = NULL;
}
}
expr_destroy(bs);
free(new);
- if (list_is_empty(&or->andor)) {
+ if (ovs_list_is_empty(&or->andor)) {
expr_destroy(expr);
free(or);
return expr_create_boolean(false);
- } else if (list_is_short(&or->andor)) {
- struct expr *cmp = expr_from_node(list_pop_front(&or->andor));
+ } else if (ovs_list_is_short(&or->andor)) {
+ struct expr *cmp = expr_from_node(ovs_list_pop_front(&or->andor));
free(or);
- if (list_is_empty(&expr->andor)) {
+ if (ovs_list_is_empty(&expr->andor)) {
expr_destroy(expr);
return crush_cmps(cmp, symbol);
} else {
return crush_cmps(expr_combine(EXPR_T_AND, cmp, expr), symbol);
}
- } else if (!list_is_empty(&expr->andor)) {
+ } else if (!ovs_list_is_empty(&expr->andor)) {
struct expr *e = expr_combine(EXPR_T_AND, or, expr);
- ovs_assert(!list_is_short(&e->andor));
+ ovs_assert(!ovs_list_is_short(&e->andor));
return crush_cmps(e, symbol);
} else {
expr_destroy(expr);
* OR-expression entirely; if so, return the result. Otherwise, 'expr'
* is now a disjunction of cmps over the same symbol. */
LIST_FOR_EACH_SAFE (sub, next, node, &expr->andor) {
- list_remove(&sub->node);
+ ovs_list_remove(&sub->node);
expr_insert_andor(expr, next, crush_cmps(sub, symbol));
}
expr = expr_fix(expr);
}
/* Sort subexpressions by value and mask, to bring together duplicates. */
- size_t n = list_size(&expr->andor);
+ size_t n = ovs_list_size(&expr->andor);
struct expr **subs = xmalloc(n * sizeof *subs);
size_t i = 0;
qsort(subs, n, sizeof *subs, compare_cmps_cb);
/* Eliminate duplicates. */
- list_init(&expr->andor);
- list_push_back(&expr->andor, &subs[0]->node);
+ ovs_list_init(&expr->andor);
+ ovs_list_push_back(&expr->andor, &subs[0]->node);
for (i = 1; i < n; i++) {
- struct expr *a = expr_from_node(list_back(&expr->andor));
+ struct expr *a = expr_from_node(ovs_list_back(&expr->andor));
struct expr *b = subs[i];
if (compare_cmps_3way(a, b)) {
- list_push_back(&expr->andor, &b->node);
+ ovs_list_push_back(&expr->andor, &b->node);
} else {
expr_destroy(b);
}
static struct expr *
expr_sort(struct expr *expr)
{
- size_t n = list_size(&expr->andor);
+ size_t n = ovs_list_size(&expr->andor);
struct expr_sort *subs = xmalloc(n * sizeof *subs);
struct expr *sub;
size_t i;
qsort(subs, n, sizeof *subs, compare_expr_sort);
- list_init(&expr->andor);
+ ovs_list_init(&expr->andor);
for (int i = 0; i < n; ) {
if (subs[i].relop) {
int j;
combined = expr_combine(EXPR_T_AND, combined,
subs[k].expr);
}
- ovs_assert(!list_is_short(&combined->andor));
+ ovs_assert(!ovs_list_is_short(&combined->andor));
crushed = crush_cmps(combined, subs[i].relop);
}
if (crushed->type == EXPR_T_BOOLEAN) {
&b->cmp.value, &b->cmp.mask,
&b->cmp.value, &b->cmp.mask)
: !strcmp(a->cmp.string, b->cmp.string)) {
- list_remove(&a->node);
+ ovs_list_remove(&a->node);
expr_destroy(a);
} else {
expr_destroy(expr);
return expr_create_boolean(false);
}
}
- if (list_is_short(&expr->andor)) {
- struct expr *sub = expr_from_node(list_front(&expr->andor));
+ if (ovs_list_is_short(&expr->andor)) {
+ struct expr *sub = expr_from_node(ovs_list_front(&expr->andor));
free(expr);
return sub;
}
LIST_FOR_EACH (p, node, &term->andor) {
struct expr *new = expr_clone(p);
- list_push_back(&and->andor, &new->node);
+ ovs_list_push_back(&and->andor, &new->node);
}
} else {
struct expr *new = expr_clone(term);
- list_push_back(&and->andor, &new->node);
+ ovs_list_push_back(&and->andor, &new->node);
}
}
- list_push_back(&or->andor, &and->node);
+ ovs_list_push_back(&or->andor, &and->node);
}
expr_destroy(expr);
return expr_normalize_or(or);
LIST_FOR_EACH_SAFE (sub, next, node, &expr->andor) {
if (sub->type == EXPR_T_AND) {
- list_remove(&sub->node);
+ ovs_list_remove(&sub->node);
struct expr *new = expr_normalize_and(sub);
if (new->type == EXPR_T_BOOLEAN) {
ovs_assert(sub->type == EXPR_T_CMP);
}
}
- if (list_is_empty(&expr->andor)) {
+ if (ovs_list_is_empty(&expr->andor)) {
free(expr);
return expr_create_boolean(false);
}
- if (list_is_short(&expr->andor)) {
- struct expr *sub = expr_from_node(list_pop_front(&expr->andor));
+ if (ovs_list_is_short(&expr->andor)) {
+ struct expr *sub = expr_from_node(ovs_list_pop_front(&expr->andor));
free(expr);
return sub;
}
case EXPR_T_AND:
case EXPR_T_OR:
- if (list_is_short(&expr->andor)) {
+ if (ovs_list_is_short(&expr->andor)) {
return false;
}
LIST_FOR_EACH (sub, node, &expr->andor) {
struct ovs_list *both)
{
hmap_init(datapaths);
- list_init(sb_only);
- list_init(nb_only);
- list_init(both);
+ ovs_list_init(sb_only);
+ ovs_list_init(nb_only);
+ ovs_list_init(both);
const struct sbrec_datapath_binding *sb, *sb_next;
SBREC_DATAPATH_BINDING_FOR_EACH_SAFE (sb, sb_next, ctx->ovnsb_idl) {
struct ovn_datapath *od = ovn_datapath_create(datapaths, &key,
NULL, NULL, sb);
- list_push_back(sb_only, &od->list);
+ ovs_list_push_back(sb_only, &od->list);
}
const struct nbrec_logical_switch *nbs;
&nbs->header_.uuid);
if (od) {
od->nbs = nbs;
- list_remove(&od->list);
- list_push_back(both, &od->list);
+ ovs_list_remove(&od->list);
+ ovs_list_push_back(both, &od->list);
} else {
od = ovn_datapath_create(datapaths, &nbs->header_.uuid,
nbs, NULL, NULL);
- list_push_back(nb_only, &od->list);
+ ovs_list_push_back(nb_only, &od->list);
}
}
if (od) {
if (!od->nbs) {
od->nbr = nbr;
- list_remove(&od->list);
- list_push_back(both, &od->list);
+ ovs_list_remove(&od->list);
+ ovs_list_push_back(both, &od->list);
} else {
/* Can't happen! */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
} else {
od = ovn_datapath_create(datapaths, &nbr->header_.uuid,
NULL, nbr, NULL);
- list_push_back(nb_only, &od->list);
+ ovs_list_push_back(nb_only, &od->list);
}
od->gateway = 0;
join_datapaths(ctx, datapaths, &sb_only, &nb_only, &both);
- if (!list_is_empty(&nb_only)) {
+ if (!ovs_list_is_empty(&nb_only)) {
/* First index the in-use datapath tunnel IDs. */
struct hmap dp_tnlids = HMAP_INITIALIZER(&dp_tnlids);
struct ovn_datapath *od;
/* Delete southbound records without northbound matches. */
struct ovn_datapath *od, *next;
LIST_FOR_EACH_SAFE (od, next, list, &sb_only) {
- list_remove(&od->list);
+ ovs_list_remove(&od->list);
sbrec_datapath_binding_delete(od->sb);
ovn_datapath_destroy(datapaths, od);
}
struct ovs_list *both)
{
hmap_init(ports);
- list_init(sb_only);
- list_init(nb_only);
- list_init(both);
+ ovs_list_init(sb_only);
+ ovs_list_init(nb_only);
+ ovs_list_init(both);
const struct sbrec_port_binding *sb;
SBREC_PORT_BINDING_FOR_EACH (sb, ctx->ovnsb_idl) {
struct ovn_port *op = ovn_port_create(ports, sb->logical_port,
NULL, NULL, sb);
- list_push_back(sb_only, &op->list);
+ ovs_list_push_back(sb_only, &op->list);
}
struct ovn_datapath *od;
continue;
}
op->nbs = nbs;
- list_remove(&op->list);
- list_push_back(both, &op->list);
+ ovs_list_remove(&op->list);
+ ovs_list_push_back(both, &op->list);
} else {
op = ovn_port_create(ports, nbs->name, nbs, NULL, NULL);
- list_push_back(nb_only, &op->list);
+ ovs_list_push_back(nb_only, &op->list);
}
op->od = od;
continue;
}
op->nbr = nbr;
- list_remove(&op->list);
- list_push_back(both, &op->list);
+ ovs_list_remove(&op->list);
+ ovs_list_push_back(both, &op->list);
} else {
op = ovn_port_create(ports, nbr->name, NULL, nbr, NULL);
- list_push_back(nb_only, &op->list);
+ ovs_list_push_back(nb_only, &op->list);
}
op->ip = ip;
/* Delete southbound records without northbound matches. */
LIST_FOR_EACH_SAFE(op, next, list, &sb_only) {
- list_remove(&op->list);
+ ovs_list_remove(&op->list);
sbrec_port_binding_delete(op->sb);
ovn_port_destroy(ports, op);
}
remote = xmalloc(sizeof *remote);
remote->server = svr;
remote->listener = listener;
- list_init(&remote->sessions);
+ ovs_list_init(&remote->sessions);
remote->dscp = options->dscp;
shash_add(&svr->remotes, name, remote);
if (remote->listener) {
status->bound_port = pstream_get_bound_port(remote->listener);
- status->is_connected = !list_is_empty(&remote->sessions);
- status->n_connections = list_size(&remote->sessions);
+ status->is_connected = !ovs_list_is_empty(&remote->sessions);
+ status->n_connections = ovs_list_size(&remote->sessions);
return true;
}
s = xzalloc(sizeof *s);
ovsdb_session_init(&s->up, &remote->server->up);
s->remote = remote;
- list_push_back(&remote->sessions, &s->node);
+ ovs_list_push_back(&remote->sessions, &s->node);
hmap_init(&s->triggers);
hmap_init(&s->monitors);
s->js = js;
hmap_destroy(&s->triggers);
jsonrpc_session_close(s->js);
- list_remove(&s->node);
+ ovs_list_remove(&s->node);
s->remote->server->n_sessions--;
ovsdb_session_destroy(&s->up);
free(s);
const struct ovs_list *sessions = &remote->sessions;
const struct ovsdb_jsonrpc_session *s;
- if (list_is_empty(sessions)) {
+ if (ovs_list_is_empty(sessions)) {
return false;
}
- ovs_assert(list_is_singleton(sessions));
- s = CONTAINER_OF(list_front(sessions), struct ovsdb_jsonrpc_session, node);
+ ovs_assert(ovs_list_is_singleton(sessions));
+ s = CONTAINER_OF(ovs_list_front(sessions), struct ovsdb_jsonrpc_session, node);
ovsdb_jsonrpc_session_get_status(s, status);
status->n_connections = 1;
static void
ovsdb_jsonrpc_trigger_complete_done(struct ovsdb_jsonrpc_session *s)
{
- while (!list_is_empty(&s->up.completions)) {
+ while (!ovs_list_is_empty(&s->up.completions)) {
struct ovsdb_jsonrpc_trigger *t
= CONTAINER_OF(s->up.completions.next,
struct ovsdb_jsonrpc_trigger, trigger.node);
jm = xzalloc(sizeof *jm);
jm->jsonrpc_monitor = jsonrpc_monitor;
- list_push_back(&dbmon->jsonrpc_monitors, &jm->node);
+ ovs_list_push_back(&dbmon->jsonrpc_monitors, &jm->node);
}
struct ovsdb_monitor *
ovsdb_replica_init(&dbmon->replica, &ovsdb_jsonrpc_replica_class);
ovsdb_add_replica(db, &dbmon->replica);
- list_init(&dbmon->jsonrpc_monitors);
+ ovs_list_init(&dbmon->jsonrpc_monitors);
dbmon->db = db;
dbmon->n_transactions = 0;
shash_init(&dbmon->tables);
{
struct jsonrpc_monitor_node *jm;
- if (list_is_empty(&dbmon->jsonrpc_monitors)) {
+ if (ovs_list_is_empty(&dbmon->jsonrpc_monitors)) {
ovsdb_monitor_destroy(dbmon);
return;
}
struct ovsdb_monitor_table *mt = node->data;
ovsdb_monitor_table_untrack_changes(mt, unflushed);
}
- list_remove(&jm->node);
+ ovs_list_remove(&jm->node);
free(jm);
/* Destroy ovsdb monitor if this is the last user. */
- if (list_is_empty(&dbmon->jsonrpc_monitors)) {
+ if (ovs_list_is_empty(&dbmon->jsonrpc_monitors)) {
ovsdb_monitor_destroy(dbmon);
}
/* New_dbmon should be associated with only one jsonrpc
* connections. */
- ovs_assert(list_is_singleton(&new_dbmon->jsonrpc_monitors));
+ ovs_assert(ovs_list_is_singleton(&new_dbmon->jsonrpc_monitors));
hash = ovsdb_monitor_hash(new_dbmon, 0);
HMAP_FOR_EACH_WITH_HASH(dbmon, hmap_node, hash, &ovsdb_monitors) {
{
struct shash_node *node;
- list_remove(&dbmon->replica.node);
+ ovs_list_remove(&dbmon->replica.node);
if (!hmap_node_is_null(&dbmon->hmap_node)) {
hmap_remove(&ovsdb_monitors, &dbmon->hmap_node);
db = xmalloc(sizeof *db);
db->schema = schema;
- list_init(&db->replicas);
- list_init(&db->triggers);
+ ovs_list_init(&db->replicas);
+ ovs_list_init(&db->triggers);
db->run_triggers = false;
shash_init(&db->tables);
struct shash_node *node;
/* Remove all the replicas. */
- while (!list_is_empty(&db->replicas)) {
+ while (!ovs_list_is_empty(&db->replicas)) {
struct ovsdb_replica *r
- = CONTAINER_OF(list_pop_back(&db->replicas),
+ = CONTAINER_OF(ovs_list_pop_back(&db->replicas),
struct ovsdb_replica, node);
ovsdb_remove_replica(db, r);
}
void
ovsdb_add_replica(struct ovsdb *db, struct ovsdb_replica *r)
{
- list_push_back(&db->replicas, &r->node);
+ ovs_list_push_back(&db->replicas, &r->node);
}
void
ovsdb_remove_replica(struct ovsdb *db OVS_UNUSED, struct ovsdb_replica *r)
{
- list_remove(&r->node);
+ ovs_list_remove(&r->node);
(r->class->destroy)(r);
}
struct ovsdb_row *row = xmalloc(row_size);
row->table = CONST_CAST(struct ovsdb_table *, table);
row->txn_row = NULL;
- list_init(&row->src_refs);
- list_init(&row->dst_refs);
+ ovs_list_init(&row->src_refs);
+ ovs_list_init(&row->dst_refs);
row->n_refs = 0;
return row;
}
const struct shash_node *node;
LIST_FOR_EACH_SAFE (weak, next, dst_node, &row->dst_refs) {
- list_remove(&weak->src_node);
- list_remove(&weak->dst_node);
+ ovs_list_remove(&weak->src_node);
+ ovs_list_remove(&weak->dst_node);
free(weak);
}
LIST_FOR_EACH_SAFE (weak, next, src_node, &row->src_refs) {
- list_remove(&weak->src_node);
- list_remove(&weak->dst_node);
+ ovs_list_remove(&weak->src_node);
+ ovs_list_remove(&weak->dst_node);
free(weak);
}
ovsdb_session_init(struct ovsdb_session *session, struct ovsdb_server *server)
{
session->server = server;
- list_init(&session->completions);
+ ovs_list_init(&session->completions);
hmap_init(&session->waiters);
}
struct ovsdb_lock_waiter *
ovsdb_lock_get_owner(const struct ovsdb_lock *lock)
{
- return CONTAINER_OF(list_front(&lock->waiters),
+ return CONTAINER_OF(ovs_list_front(&lock->waiters),
struct ovsdb_lock_waiter, lock_node);
}
{
struct ovsdb_lock *lock = waiter->lock;
- list_remove(&waiter->lock_node);
+ ovs_list_remove(&waiter->lock_node);
waiter->lock = NULL;
- if (list_is_empty(&lock->waiters)) {
+ if (ovs_list_is_empty(&lock->waiters)) {
hmap_remove(&lock->server->locks, &lock->hmap_node);
free(lock->name);
free(lock);
lock->server = server;
lock->name = xstrdup(lock_name);
hmap_insert(&server->locks, &lock->hmap_node, hash);
- list_init(&lock->waiters);
+ ovs_list_init(&lock->waiters);
return lock;
}
struct ovsdb_lock *lock;
lock = ovsdb_server_create_lock__(server, lock_name, hash);
- victim = (mode == OVSDB_LOCK_STEAL && !list_is_empty(&lock->waiters)
+ victim = (mode == OVSDB_LOCK_STEAL && !ovs_list_is_empty(&lock->waiters)
? ovsdb_lock_get_owner(lock)
: NULL);
waiter->lock_name = xstrdup(lock_name);
waiter->lock = lock;
if (mode == OVSDB_LOCK_STEAL) {
- list_push_front(&lock->waiters, &waiter->lock_node);
+ ovs_list_push_front(&lock->waiters, &waiter->lock_node);
} else {
- list_push_back(&lock->waiters, &waiter->lock_node);
+ ovs_list_push_back(&lock->waiters, &waiter->lock_node);
}
waiter->session = session;
hmap_insert(&waiter->session->waiters, &waiter->session_node, hash);
{
struct ovsdb_txn *txn = xmalloc(sizeof *txn);
txn->db = db;
- list_init(&txn->txn_tables);
+ ovs_list_init(&txn->txn_tables);
ds_init(&txn->comment);
return txn;
}
static void
ovsdb_txn_free(struct ovsdb_txn *txn)
{
- ovs_assert(list_is_empty(&txn->txn_tables));
+ ovs_assert(ovs_list_is_empty(&txn->txn_tables));
ds_destroy(&txn->comment);
free(txn);
}
dst = ovsdb_txn_row_modify(txn, dst);
- if (!list_is_empty(&dst->dst_refs)) {
+ if (!ovs_list_is_empty(&dst->dst_refs)) {
/* Omit duplicates. */
- weak = CONTAINER_OF(list_back(&dst->dst_refs),
+ weak = CONTAINER_OF(ovs_list_back(&dst->dst_refs),
struct ovsdb_weak_ref, dst_node);
if (weak->src == src) {
return;
weak = xmalloc(sizeof *weak);
weak->src = src;
- list_push_back(&dst->dst_refs, &weak->dst_node);
- list_push_back(&src->src_refs, &weak->src_node);
+ ovs_list_push_back(&dst->dst_refs, &weak->dst_node);
+ ovs_list_push_back(&src->src_refs, &weak->src_node);
}
static struct ovsdb_error * OVS_WARN_UNUSED_RESULT
if (error) {
return OVSDB_WRAP_BUG("can't happen", error);
}
- if (list_is_empty(&txn->txn_tables)) {
+ if (ovs_list_is_empty(&txn->txn_tables)) {
ovsdb_txn_abort(txn);
return NULL;
}
for (i = 0; i < table->schema->n_indexes; i++) {
hmap_init(&txn_table->txn_indexes[i]);
}
- list_push_back(&txn->txn_tables, &txn_table->node);
+ ovs_list_push_back(&txn->txn_tables, &txn_table->node);
}
return table->txn_table;
}
txn_table->table->txn_table = NULL;
hmap_destroy(&txn_table->txn_rows);
- list_remove(&txn_table->node);
+ ovs_list_remove(&txn_table->node);
free(txn_table);
}
{
trigger->session = session;
trigger->db = db;
- list_push_back(&trigger->db->triggers, &trigger->node);
+ ovs_list_push_back(&trigger->db->triggers, &trigger->node);
trigger->request = request;
trigger->result = NULL;
trigger->created = now;
void
ovsdb_trigger_destroy(struct ovsdb_trigger *trigger)
{
- list_remove(&trigger->node);
+ ovs_list_remove(&trigger->node);
json_destroy(trigger->request);
json_destroy(trigger->result);
}
ovsdb_trigger_complete(struct ovsdb_trigger *t)
{
ovs_assert(t->result != NULL);
- list_remove(&t->node);
- list_push_back(&t->session->completions, &t->node);
+ ovs_list_remove(&t->node);
+ ovs_list_push_back(&t->session->completions, &t->node);
}
sport->p_element.system_id.rsvd2[1]);
/* Should receive 2 mappings */
- assert(!list_is_empty(&rport->p_isid_vlan_maps));
+ assert(!ovs_list_is_empty(&rport->p_isid_vlan_maps));
/* For each received isid/vlan mapping */
LIST_FOR_EACH (received_map, m_entries, &rport->p_isid_vlan_maps) {
lldp = lldp_create_dummy();
if ((lldp == NULL) ||
(lldp->lldpd == NULL) ||
- list_is_empty(&lldp->lldpd->g_hardware)) {
+ ovs_list_is_empty(&lldp->lldpd->g_hardware)) {
printf("Error: unable to create dummy lldp instance");
return 1;
}
map[1].isid_vlan_data.vlan = map_init[1].isid_vlan_data.vlan;
map[1].isid_vlan_data.isid = map_init[1].isid_vlan_data.isid;
- list_init(&hw->h_lport.p_isid_vlan_maps);
- list_push_back(&hw->h_lport.p_isid_vlan_maps, &map[0].m_entries);
- list_push_back(&hw->h_lport.p_isid_vlan_maps, &map[1].m_entries);
+ ovs_list_init(&hw->h_lport.p_isid_vlan_maps);
+ ovs_list_push_back(&hw->h_lport.p_isid_vlan_maps, &map[0].m_entries);
+ ovs_list_push_back(&hw->h_lport.p_isid_vlan_maps, &map[1].m_entries);
/* Construct LLDPPDU (including Ethernet header) */
eth_compose(&packet, eth_addr_lldp, eth_src, ETH_TYPE_LLDP, 0);
&tcls);
check_tables(&cls, -1, -1, -1, n_invisible_rules, version);
- list_push_back(&list, &rule->list_node);
+ ovs_list_push_back(&list, &rule->list_node);
} else if (classifier_remove(&cls, &rule->cls_rule)) {
ovsrcu_postpone(free_rule, rule);
}
{
size_t i;
- list_init(list);
+ ovs_list_init(list);
for (i = 0; i < n; i++) {
elements[i].value = i;
- list_push_back(list, &elements[i].node);
+ ovs_list_push_back(list, &elements[i].node);
values[i] = i;
}
}
assert(&e->node == list);
assert(i == n);
- assert(list_is_empty(list) == !n);
- assert(list_is_singleton(list) == (n == 1));
- assert(list_is_short(list) == (n < 2));
- assert(list_size(list) == n);
+ assert(ovs_list_is_empty(list) == !n);
+ assert(ovs_list_is_singleton(list) == (n == 1));
+ assert(ovs_list_is_short(list) == (n < 2));
+ assert(ovs_list_size(list) == n);
}
#if 0
LIST_FOR_EACH_SAFE (e, next, node, &list) {
assert(i < n);
if (pattern & (1ul << i)) {
- list_remove(&e->node);
+ ovs_list_remove(&e->node);
n_remaining--;
memmove(&values[values_idx], &values[values_idx + 1],
sizeof *values * (n_remaining - values_idx));
struct expr *e = expr_create_andor(type);
for (int i = 0; i < 2; i++) {
struct expr *sub = make_terminal(terminalp);
- list_push_back(&e->andor, &sub->node);
+ ovs_list_push_back(&e->andor, &sub->node);
}
return e;
} else if (n == 1) {
struct expr *sub = (ts->s[i] > 2
? build_tree_shape(t, tsp, terminalp)
: build_simple_tree(t, ts->s[i], terminalp));
- list_push_back(&e->andor, &sub->node);
+ ovs_list_push_back(&e->andor, &sub->node);
}
return e;
}
}
ovsdb_trigger_run(db, now);
- while (!list_is_empty(&session.completions)) {
- do_trigger_dump(CONTAINER_OF(list_pop_front(&session.completions),
+ while (!ovs_list_is_empty(&session.completions)) {
+ do_trigger_dump(CONTAINER_OF(ovs_list_pop_front(&session.completions),
struct test_trigger, trigger.node),
now, "delayed");
}
{
struct ovs_list requests;
- list_init(&requests);
- list_push_back(&requests, &request->list_node);
+ ovs_list_init(&requests);
+ ovs_list_push_back(&requests, &request->list_node);
transact_multiple_noreply(vconn, &requests);
}
struct ovs_list requests;
size_t i;
- list_init(&requests);
+ ovs_list_init(&requests);
/* Bundles need OpenFlow 1.3+. */
usable_protocols &= OFPUTIL_P_OF13_UP;
struct ofputil_flow_mod *fm = &fms[i];
struct ofpbuf *request = ofputil_encode_flow_mod(fm, protocol);
- list_push_back(&requests, &request->list_node);
+ ovs_list_push_back(&requests, &request->list_node);
free(CONST_CAST(struct ofpact *, fm->ofpacts));
}
}
ofm = ofputil_encode_flow_mod(&fm, protocol);
- list_push_back(packets, &ofm->list_node);
+ ovs_list_push_back(packets, &ofm->list_node);
}
static void
read_flows_from_switch(vconn, protocol, &tables, SWITCH_IDX);
- list_init(&requests);
+ ovs_list_init(&requests);
FOR_EACH_TABLE (cls, &tables) {
/* Delete flows that exist on the switch but not in the file. */
struct vsctl_bridge *br = xmalloc(sizeof *br);
br->br_cfg = br_cfg;
br->name = xstrdup(name);
- list_init(&br->ports);
+ ovs_list_init(&br->ports);
br->parent = parent;
br->vlan = vlan;
hmap_init(&br->children);
static void
del_cached_bridge(struct vsctl_context *vsctl_ctx, struct vsctl_bridge *br)
{
- ovs_assert(list_is_empty(&br->ports));
+ ovs_assert(ovs_list_is_empty(&br->ports));
ovs_assert(hmap_is_empty(&br->children));
if (br->parent) {
hmap_remove(&br->parent->children, &br->children_node);
}
port = xmalloc(sizeof *port);
- list_push_back(&parent->ports, &port->ports_node);
- list_init(&port->ifaces);
+ ovs_list_push_back(&parent->ports, &port->ports_node);
+ ovs_list_init(&port->ifaces);
port->port_cfg = port_cfg;
port->bridge = parent;
shash_add(&vsctl_ctx->ports, port_cfg->name, port);
static void
del_cached_port(struct vsctl_context *vsctl_ctx, struct vsctl_port *port)
{
- ovs_assert(list_is_empty(&port->ifaces));
- list_remove(&port->ports_node);
+ ovs_assert(ovs_list_is_empty(&port->ifaces));
+ ovs_list_remove(&port->ports_node);
shash_find_and_delete(&vsctl_ctx->ports, port->port_cfg->name);
ovsrec_port_delete(port->port_cfg);
free(port);
struct vsctl_iface *iface;
iface = xmalloc(sizeof *iface);
- list_push_back(&parent->ifaces, &iface->ifaces_node);
+ ovs_list_push_back(&parent->ifaces, &iface->ifaces_node);
iface->iface_cfg = iface_cfg;
iface->port = parent;
shash_add(&vsctl_ctx->ifaces, iface_cfg->name, iface);
static void
del_cached_iface(struct vsctl_context *vsctl_ctx, struct vsctl_iface *iface)
{
- list_remove(&iface->ifaces_node);
+ ovs_list_remove(&iface->ifaces_node);
shash_find_and_delete(&vsctl_ctx->ifaces, iface->iface_cfg->name);
ovsrec_interface_delete(iface->iface_cfg);
free(iface);
}
}
- if (list_is_empty(&port->ifaces)) {
+ if (ovs_list_is_empty(&port->ifaces)) {
port_destroy(port);
}
}
/* Get slaves. */
s.n_slaves = 0;
- s.slaves = xmalloc(list_size(&port->ifaces) * sizeof *s.slaves);
+ s.slaves = xmalloc(ovs_list_size(&port->ifaces) * sizeof *s.slaves);
LIST_FOR_EACH (iface, port_elem, &port->ifaces) {
s.slaves[s.n_slaves++] = iface->ofp_port;
}
}
/* STP over bonds is not supported. */
- if (!list_is_singleton(&port->ifaces)) {
+ if (!ovs_list_is_singleton(&port->ifaces)) {
VLOG_ERR("port %s: cannot enable STP on bonds, disabling",
port->name);
port_s->enable = false;
return;
}
- iface = CONTAINER_OF(list_front(&port->ifaces), struct iface, port_elem);
+ iface = CONTAINER_OF(ovs_list_front(&port->ifaces), struct iface, port_elem);
/* Internal ports shouldn't participate in spanning tree, so
* skip them. */
}
/* RSTP over bonds is not supported. */
- if (!list_is_singleton(&port->ifaces)) {
+ if (!ovs_list_is_singleton(&port->ifaces)) {
VLOG_ERR("port %s: cannot enable RSTP on bonds, disabling",
port->name);
port_s->enable = false;
return;
}
- iface = CONTAINER_OF(list_front(&port->ifaces), struct iface, port_elem);
+ iface = CONTAINER_OF(ovs_list_front(&port->ifaces), struct iface, port_elem);
/* Internal ports shouldn't participate in spanning tree, so
* skip them. */
static bool
port_is_bond_fake_iface(const struct port *port)
{
- return port->cfg->bond_fake_iface && !list_is_short(&port->ifaces);
+ return port->cfg->bond_fake_iface && !ovs_list_is_short(&port->ifaces);
}
static void
/* Create the iface structure. */
iface = xzalloc(sizeof *iface);
- list_push_back(&port->ifaces, &iface->port_elem);
+ ovs_list_push_back(&port->ifaces, &iface->port_elem);
hmap_insert(&br->iface_by_name, &iface->name_node,
hash_string(iface_cfg->name, 0));
iface->port = port;
}
/* STP doesn't currently support bonds. */
- if (!list_is_singleton(&port->ifaces)) {
+ if (!ovs_list_is_singleton(&port->ifaces)) {
ovsrec_port_set_status(port->cfg, NULL);
return;
}
- iface = CONTAINER_OF(list_front(&port->ifaces), struct iface, port_elem);
+ iface = CONTAINER_OF(ovs_list_front(&port->ifaces), struct iface, port_elem);
if (ofproto_port_get_stp_status(ofproto, iface->ofp_port, &status)) {
return;
}
}
/* STP doesn't currently support bonds. */
- if (!list_is_singleton(&port->ifaces)) {
+ if (!ovs_list_is_singleton(&port->ifaces)) {
return;
}
- iface = CONTAINER_OF(list_front(&port->ifaces), struct iface, port_elem);
+ iface = CONTAINER_OF(ovs_list_front(&port->ifaces), struct iface, port_elem);
if (ofproto_port_get_stp_stats(ofproto, iface->ofp_port, &stats)) {
return;
}
}
/* RSTP doesn't currently support bonds. */
- if (!list_is_singleton(&port->ifaces)) {
+ if (!ovs_list_is_singleton(&port->ifaces)) {
ovsrec_port_set_rstp_status(port->cfg, NULL);
return;
}
- iface = CONTAINER_OF(list_front(&port->ifaces), struct iface, port_elem);
+ iface = CONTAINER_OF(ovs_list_front(&port->ifaces), struct iface, port_elem);
if (ofproto_port_get_rstp_status(ofproto, iface->ofp_port, &status)) {
return;
}
struct eth_addr mac;
/* Return if port is not a bond */
- if (list_is_singleton(&port->ifaces)) {
+ if (ovs_list_is_singleton(&port->ifaces)) {
return;
}
struct ovs_list *list = xmalloc(sizeof *list);
struct bridge_aa_vlan *node, *next;
- list_init(list);
+ ovs_list_init(list);
ofproto_aa_vlan_get_queued(br->ofproto, list);
LIST_FOR_EACH_SAFE (node, next, list_node, list) {
bridge_aa_update_trunks(port, node);
}
- list_remove(&node->list_node);
+ ovs_list_remove(&node->list_node);
free(node->port_name);
free(node);
}
port->bridge = br;
port->name = xstrdup(cfg->name);
port->cfg = cfg;
- list_init(&port->ifaces);
+ ovs_list_init(&port->ifaces);
hmap_insert(&br->ports, &port->hmap_node, hash_string(port->name, 0));
return port;
0);
s->priority = (priority > 0 && priority <= UINT16_MAX
? priority
- : UINT16_MAX - !list_is_short(&port->ifaces));
+ : UINT16_MAX - !ovs_list_is_short(&port->ifaces));
lacp_time = smap_get(&port->cfg->other_config, "lacp-time");
s->fast = lacp_time && !strcasecmp(lacp_time, "fast");
hmap_remove(&br->ifaces, &iface->ofp_port_node);
}
- list_remove(&iface->port_elem);
+ ovs_list_remove(&iface->port_elem);
hmap_remove(&br->iface_by_name, &iface->name_node);
/* The user is changing configuration here, so netdev_remove needs to be
struct port *port = iface->port;
iface_destroy__(iface);
- if (list_is_empty(&port->ifaces)) {
+ if (ovs_list_is_empty(&port->ifaces)) {
port_destroy(port);
}
}
ofproto_bundle_unregister(port->bridge->ofproto, port);
- vlandev = CONTAINER_OF(list_front(&port->ifaces), struct iface,
+ vlandev = CONTAINER_OF(ovs_list_front(&port->ifaces), struct iface,
port_elem);
realdev_name = smap_get(&port->cfg->other_config, "realdev");
struct vtep_ctl_port *port;
port = xmalloc(sizeof *port);
- list_push_back(&ps->ports, &port->ports_node);
+ ovs_list_push_back(&ps->ports, &port->ports_node);
port->port_cfg = port_cfg;
port->ps = ps;
shash_add(&vtepctl_ctx->ports, cache_name, port);
{
char *cache_name = xasprintf("%s+%s", port->ps->name, port->port_cfg->name);
- list_remove(&port->ports_node);
+ ovs_list_remove(&port->ports_node);
shash_find_and_delete(&vtepctl_ctx->ports, cache_name);
vteprec_physical_port_delete(port->port_cfg);
free(cache_name);
struct vtep_ctl_pswitch *ps = xmalloc(sizeof *ps);
ps->ps_cfg = ps_cfg;
ps->name = xstrdup(ps_cfg->name);
- list_init(&ps->ports);
+ ovs_list_init(&ps->ports);
shash_add(&vtepctl_ctx->pswitches, ps->name, ps);
}
static void
del_cached_pswitch(struct vtep_ctl_context *ctx, struct vtep_ctl_pswitch *ps)
{
- ovs_assert(list_is_empty(&ps->ports));
+ ovs_assert(ovs_list_is_empty(&ps->ports));
if (ps->ps_cfg) {
vteprec_physical_switch_delete(ps->ps_cfg);
vtep_delete_pswitch(ctx->vtep_global, ps->ps_cfg);
ploc = xmalloc(sizeof *ploc);
ploc->ploc_cfg = ploc_cfg;
- list_push_back(&mcast_mac->locators, &ploc->locators_node);
+ ovs_list_push_back(&mcast_mac->locators, &ploc->locators_node);
}
static void
LIST_FOR_EACH (ploc, locators_node, &mcast_mac->locators) {
if (ploc->ploc_cfg == ploc_cfg) {
- list_remove(&ploc->locators_node);
+ ovs_list_remove(&ploc->locators_node);
free(ploc);
return;
}
mcast_shash = local ? &ls->mcast_local : &ls->mcast_remote;
mcast_mac->ploc_set_cfg = ploc_set_cfg;
- list_init(&mcast_mac->locators);
+ ovs_list_init(&mcast_mac->locators);
shash_add(mcast_shash, mac, mcast_mac);
for (i = 0; i < ploc_set_cfg->n_locators; i++) {
size_t n_locators;
int i;
- n_locators = list_size(&mcast_mac->locators);
+ n_locators = ovs_list_size(&mcast_mac->locators);
ovs_assert(n_locators);
locators = xmalloc(n_locators * sizeof *locators);
mcast_mac->ploc_set_cfg = ploc_set_cfg;
del_ploc_from_mcast_mac(mcast_mac, ploc_cfg);
- if (list_is_empty(&mcast_mac->locators)) {
+ if (ovs_list_is_empty(&mcast_mac->locators)) {
struct shash_node *node = shash_find(mcast_shash, mac);
vteprec_physical_locator_set_delete(ploc_set_cfg);