void
bfd_unref(struct bfd *bfd) OVS_EXCLUDED(mutex)
{
- if (bfd && ovs_refcount_unref(&bfd->ref_cnt) == 1) {
+ if (bfd && ovs_refcount_unref_relaxed(&bfd->ref_cnt) == 1) {
ovs_mutex_lock(&mutex);
bfd_status_changed(bfd);
hmap_remove(all_bfds, &bfd->node);
return;
}
- if (ovs_refcount_unref(&cfm->ref_cnt) != 1) {
+ if (ovs_refcount_unref_relaxed(&cfm->ref_cnt) != 1) {
return;
}
/* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
* get a new reference to 'dp' through the 'dp_netdevs' shash. */
ovs_mutex_lock(&dp_netdev_mutex);
- if (ovs_refcount_unref(&dp->ref_cnt) == 1) {
+ if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) {
dp_netdev_free(dp);
}
ovs_mutex_unlock(&dp_netdev_mutex);
struct dp_netdev *dp = get_dp_netdev(dpif);
if (!atomic_flag_test_and_set(&dp->destroyed)) {
- if (ovs_refcount_unref(&dp->ref_cnt) == 1) {
+ if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) {
/* Can't happen: 'dpif' still owns a reference to 'dp'. */
OVS_NOT_REACHED();
}
static void
port_unref(struct dp_netdev_port *port)
{
- if (port && ovs_refcount_unref(&port->ref_cnt) == 1) {
+ if (port && ovs_refcount_unref_relaxed(&port->ref_cnt) == 1) {
ovsrcu_postpone(port_destroy__, port);
}
}
void
lacp_unref(struct lacp *lacp) OVS_EXCLUDED(mutex)
{
- if (lacp && ovs_refcount_unref(&lacp->ref_cnt) == 1) {
+ if (lacp && ovs_refcount_unref_relaxed(&lacp->ref_cnt) == 1) {
struct slave *slave, *next;
lacp_lock();
return;
}
- if (ovs_refcount_unref(&ms->ref_cnt) == 1) {
+ if (ovs_refcount_unref_relaxed(&ms->ref_cnt) == 1) {
mcast_snooping_flush(ms);
hmap_destroy(&ms->table);
ovs_rwlock_destroy(&ms->rwlock);
void
stp_unref(struct stp *stp)
{
- if (stp && ovs_refcount_unref(&stp->ref_cnt) == 1) {
+ if (stp && ovs_refcount_unref_relaxed(&stp->ref_cnt) == 1) {
ovs_mutex_lock(&mutex);
list_remove(&stp->node);
ovs_mutex_unlock(&mutex);
struct bond_slave *slave, *next_slave;
struct bond_pr_rule_op *pr_op, *next_op;
- if (!bond || ovs_refcount_unref(&bond->ref_cnt) != 1) {
+ if (!bond || ovs_refcount_unref_relaxed(&bond->ref_cnt) != 1) {
return;
}
void
netflow_unref(struct netflow *nf)
{
- if (nf && ovs_refcount_unref(&nf->ref_cnt) == 1) {
+ if (nf && ovs_refcount_unref_relaxed(&nf->ref_cnt) == 1) {
int orig;
atomic_sub(&netflow_count, 1, &orig);
void
dpif_ipfix_unref(struct dpif_ipfix *di) OVS_EXCLUDED(mutex)
{
- if (di && ovs_refcount_unref(&di->ref_cnt) == 1) {
+ if (di && ovs_refcount_unref_relaxed(&di->ref_cnt) == 1) {
ovs_mutex_lock(&mutex);
dpif_ipfix_clear(di);
dpif_ipfix_bridge_exporter_destroy(&di->bridge_exporter);
void
dpif_sflow_unref(struct dpif_sflow *ds) OVS_EXCLUDED(mutex)
{
- if (ds && ovs_refcount_unref(&ds->ref_cnt) == 1) {
+ if (ds && ovs_refcount_unref_relaxed(&ds->ref_cnt) == 1) {
struct dpif_sflow_port *dsp, *next;
route_table_unregister();
void
ofproto_rule_unref(struct rule *rule)
{
- if (rule && ovs_refcount_unref(&rule->ref_count) == 1) {
+ if (rule && ovs_refcount_unref_relaxed(&rule->ref_count) == 1) {
ovsrcu_postpone(rule_destroy_cb, rule);
}
}