}
#ifdef IP_SET_HASH_WITH_NETS
+#ifdef IP_SET_HASH_WITH_NETS_PACKED
+/* When cidr is packed with nomatch, cidr - 1 is stored in the entry */
+#define CIDR(cidr) (cidr + 1)
+#else
+#define CIDR(cidr) (cidr)
+#endif
#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
#define type_pf_data_list TOKEN(TYPE, PF, _data_list)
#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist)
#define type_pf_data_next TOKEN(TYPE, PF, _data_next)
+#define type_pf_data_flags TOKEN(TYPE, PF, _data_flags)
+#ifdef IP_SET_HASH_WITH_NETS
+#define type_pf_data_match TOKEN(TYPE, PF, _data_match)
+#else
+#define type_pf_data_match(d) 1
+#endif
#define type_pf_elem TOKEN(TYPE, PF, _elem)
#define type_pf_telem TOKEN(TYPE, PF, _telem)
* we spare the maintenance of the internal counters. */
static int
type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value,
- u8 ahash_max)
+ u8 ahash_max, u32 cadt_flags)
{
+ struct type_pf_elem *data;
+
if (n->pos >= n->size) {
void *tmp;
n->value = tmp;
n->size += AHASH_INIT_SIZE;
}
- type_pf_data_copy(ahash_data(n, n->pos++), value);
+ data = ahash_data(n, n->pos++);
+ type_pf_data_copy(data, value);
+#ifdef IP_SET_HASH_WITH_NETS
+ /* Resizing won't overwrite stored flags */
+ if (cadt_flags)
+ type_pf_data_flags(data, cadt_flags);
+#endif
return 0;
}
htable_bits++;
pr_debug("attempt to resize set %s from %u to %u, t %p\n",
set->name, orig->htable_bits, htable_bits, orig);
- if (!htable_bits)
+ if (!htable_bits) {
/* In case we have plenty of memory :-) */
+ pr_warning("Cannot increase the hashsize of set %s further\n",
+ set->name);
return -IPSET_ERR_HASH_FULL;
+ }
t = ip_set_alloc(sizeof(*t)
+ jhash_size(htable_bits) * sizeof(struct hbucket));
if (!t)
for (j = 0; j < n->pos; j++) {
data = ahash_data(n, j);
m = hbucket(t, HKEY(data, h->initval, htable_bits));
- ret = type_pf_elem_add(m, data, AHASH_MAX(h));
+ ret = type_pf_elem_add(m, data, AHASH_MAX(h), 0);
if (ret < 0) {
read_unlock_bh(&set->lock);
ahash_destroy(t);
struct hbucket *n;
int i, ret = 0;
u32 key, multi = 0;
+ u32 cadt_flags = flags >> 16;
- if (h->elements >= h->maxelem)
+ if (h->elements >= h->maxelem) {
+ if (net_ratelimit())
+ pr_warning("Set %s is full, maxelem %u reached\n",
+ set->name, h->maxelem);
return -IPSET_ERR_HASH_FULL;
+ }
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
n = hbucket(t, key);
for (i = 0; i < n->pos; i++)
if (type_pf_data_equal(ahash_data(n, i), d, &multi)) {
+#ifdef IP_SET_HASH_WITH_NETS
+ if (flags & IPSET_FLAG_EXIST)
+ /* Support overwriting just the flags */
+ type_pf_data_flags(ahash_data(n, i),
+ cadt_flags);
+#endif
ret = -IPSET_ERR_EXIST;
goto out;
}
TUNE_AHASH_MAX(h, multi);
- ret = type_pf_elem_add(n, value, AHASH_MAX(h));
+ ret = type_pf_elem_add(n, value, AHASH_MAX(h), cadt_flags);
if (ret != 0) {
if (ret == -EAGAIN)
type_pf_data_next(h, d);
}
#ifdef IP_SET_HASH_WITH_NETS
- add_cidr(h, d->cidr, HOST_MASK);
+ add_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
h->elements++;
out:
n->pos--;
h->elements--;
#ifdef IP_SET_HASH_WITH_NETS
- del_cidr(h, d->cidr, HOST_MASK);
+ del_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
if (n->pos + AHASH_INIT_SIZE < n->size) {
void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
for (i = 0; i < n->pos; i++) {
data = ahash_data(n, i);
if (type_pf_data_equal(data, d, &multi))
- return 1;
+ return type_pf_data_match(data);
}
}
return 0;
#ifdef IP_SET_HASH_WITH_NETS
/* If we test an IP address and not a network address,
* try all possible network sizes */
- if (d->cidr == SET_HOST_MASK(set->family))
+ if (CIDR(d->cidr) == SET_HOST_MASK(set->family))
return type_pf_test_cidrs(set, d, timeout);
#endif
for (i = 0; i < n->pos; i++) {
data = ahash_data(n, i);
if (type_pf_data_equal(data, d, &multi))
- return 1;
+ return type_pf_data_match(data);
}
return 0;
}
static int
type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
- u8 ahash_max, u32 timeout)
+ u8 ahash_max, u32 cadt_flags, u32 timeout)
{
struct type_pf_elem *data;
data = ahash_tdata(n, n->pos++);
type_pf_data_copy(data, value);
type_pf_data_timeout_set(data, timeout);
+#ifdef IP_SET_HASH_WITH_NETS
+ /* Resizing won't overwrite stored flags */
+ if (cadt_flags)
+ type_pf_data_flags(data, cadt_flags);
+#endif
return 0;
}
if (type_pf_data_expired(data)) {
pr_debug("expired %u/%u\n", i, j);
#ifdef IP_SET_HASH_WITH_NETS
- del_cidr(h, data->cidr, HOST_MASK);
+ del_cidr(h, CIDR(data->cidr), HOST_MASK);
#endif
if (j != n->pos - 1)
/* Not last one */
retry:
ret = 0;
htable_bits++;
- if (!htable_bits)
+ if (!htable_bits) {
/* In case we have plenty of memory :-) */
+ pr_warning("Cannot increase the hashsize of set %s further\n",
+ set->name);
return -IPSET_ERR_HASH_FULL;
+ }
t = ip_set_alloc(sizeof(*t)
+ jhash_size(htable_bits) * sizeof(struct hbucket));
if (!t)
for (j = 0; j < n->pos; j++) {
data = ahash_tdata(n, j);
m = hbucket(t, HKEY(data, h->initval, htable_bits));
- ret = type_pf_elem_tadd(m, data, AHASH_MAX(h),
+ ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0,
type_pf_data_timeout(data));
if (ret < 0) {
read_unlock_bh(&set->lock);
int ret = 0, i, j = AHASH_MAX(h) + 1;
bool flag_exist = flags & IPSET_FLAG_EXIST;
u32 key, multi = 0;
+ u32 cadt_flags = flags >> 16;
if (h->elements >= h->maxelem)
/* FIXME: when set is full, we slow down here */
type_pf_expire(h);
- if (h->elements >= h->maxelem)
+ if (h->elements >= h->maxelem) {
+ if (net_ratelimit())
+ pr_warning("Set %s is full, maxelem %u reached\n",
+ set->name, h->maxelem);
return -IPSET_ERR_HASH_FULL;
+ }
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
data = ahash_tdata(n, i);
if (type_pf_data_equal(data, d, &multi)) {
if (type_pf_data_expired(data) || flag_exist)
+ /* Just timeout value may be updated */
j = i;
else {
ret = -IPSET_ERR_EXIST;
if (j != AHASH_MAX(h) + 1) {
data = ahash_tdata(n, j);
#ifdef IP_SET_HASH_WITH_NETS
- del_cidr(h, data->cidr, HOST_MASK);
- add_cidr(h, d->cidr, HOST_MASK);
+ del_cidr(h, CIDR(data->cidr), HOST_MASK);
+ add_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
type_pf_data_copy(data, d);
type_pf_data_timeout_set(data, timeout);
+#ifdef IP_SET_HASH_WITH_NETS
+ type_pf_data_flags(data, cadt_flags);
+#endif
goto out;
}
TUNE_AHASH_MAX(h, multi);
- ret = type_pf_elem_tadd(n, d, AHASH_MAX(h), timeout);
+ ret = type_pf_elem_tadd(n, d, AHASH_MAX(h), cadt_flags, timeout);
if (ret != 0) {
if (ret == -EAGAIN)
type_pf_data_next(h, d);
}
#ifdef IP_SET_HASH_WITH_NETS
- add_cidr(h, d->cidr, HOST_MASK);
+ add_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
h->elements++;
out:
n->pos--;
h->elements--;
#ifdef IP_SET_HASH_WITH_NETS
- del_cidr(h, d->cidr, HOST_MASK);
+ del_cidr(h, CIDR(d->cidr), HOST_MASK);
#endif
if (n->pos + AHASH_INIT_SIZE < n->size) {
void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
data = ahash_tdata(n, i);
- if (type_pf_data_equal(data, d, &multi))
- return !type_pf_data_expired(data);
+ if (type_pf_data_equal(data, d, &multi) &&
+ !type_pf_data_expired(data))
+ return type_pf_data_match(data);
}
}
return 0;
u32 key, multi = 0;
#ifdef IP_SET_HASH_WITH_NETS
- if (d->cidr == SET_HOST_MASK(set->family))
+ if (CIDR(d->cidr) == SET_HOST_MASK(set->family))
return type_pf_ttest_cidrs(set, d, timeout);
#endif
key = HKEY(d, h->initval, t->htable_bits);
n = hbucket(t, key);
for (i = 0; i < n->pos; i++) {
data = ahash_tdata(n, i);
- if (type_pf_data_equal(data, d, &multi))
- return !type_pf_data_expired(data);
+ if (type_pf_data_equal(data, d, &multi) &&
+ !type_pf_data_expired(data))
+ return type_pf_data_match(data);
}
return 0;
}
#undef type_pf_data_isnull
#undef type_pf_data_copy
#undef type_pf_data_zero_out
+#undef type_pf_data_netmask
#undef type_pf_data_list
#undef type_pf_data_tlist
+#undef type_pf_data_next
+#undef type_pf_data_flags
+#undef type_pf_data_match
#undef type_pf_elem
#undef type_pf_telem
#undef type_pf_data_timeout
#undef type_pf_data_expired
-#undef type_pf_data_netmask
#undef type_pf_data_timeout_set
#undef type_pf_elem_add
#undef type_pf_test
#undef type_pf_elem_tadd
+#undef type_pf_del_telem
#undef type_pf_expire
#undef type_pf_tadd
#undef type_pf_tdel