#include <linux/slab.h>
#include <linux/list.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/err.h>
struct kds_link resources[0];
};
-static DEFINE_MUTEX(kds_lock);
+static DEFINE_SPINLOCK(kds_lock);
int kds_callback_init(struct kds_callback *cb, int direct, kds_callback_fn user_cb)
{
struct kds_resource **resource_list)
{
struct kds_resource_set *rset = NULL;
+ unsigned long lflags;
int i;
int triggered;
int err = -EFAULT;
BUG_ON(!pprset);
BUG_ON(!resource_list);
BUG_ON(!cb);
-
- mutex_lock(&kds_lock);
-
- if ((flags & KDS_FLAG_LOCKED_ACTION) == KDS_FLAG_LOCKED_FAIL)
- {
- for (i = 0; i < number_resources; i++)
- {
- if (resource_list[i]->lock_count)
- {
- err = -EBUSY;
- goto errout;
- }
- }
- }
+ WARN_ON(number_resources > 10);
rset = kmalloc(sizeof(*rset) + number_resources * sizeof(struct kds_link), GFP_KERNEL);
if (!rset)
{
- err = -ENOMEM;
- goto errout;
+ return -ENOMEM;
}
rset->num_resources = number_resources;
for (i = 0; i < number_resources; i++)
{
- unsigned long link_state = 0;
-
INIT_LIST_HEAD(&rset->resources[i].link);
rset->resources[i].parent = rset;
+ }
+
+ spin_lock_irqsave(&kds_lock, lflags);
+
+ if ((flags & KDS_FLAG_LOCKED_ACTION) == KDS_FLAG_LOCKED_FAIL)
+ {
+ for (i = 0; i < number_resources; i++)
+ {
+ if (resource_list[i]->lock_count)
+ {
+ err = -EBUSY;
+ goto errout;
+ }
+ }
+ }
+
+ for (i = 0; i < number_resources; i++)
+ {
+ unsigned long link_state = 0;
+
if (test_bit(i, exclusive_access_bitmap))
{
link_state |= KDS_LINK_EXCLUSIVE;
triggered = (rset->pending == 0);
- mutex_unlock(&kds_lock);
+ spin_unlock_irqrestore(&kds_lock, lflags);
/* set the pointer before the callback is called so it sees it */
*pprset = rset;
{
list_del(&rset->resources[i].link);
}
- kfree(rset);
err = -EINVAL;
errout:
- mutex_unlock(&kds_lock);
+ spin_unlock_irqrestore(&kds_lock, lflags);
+ kfree(rset);
return err;
}
EXPORT_SYMBOL(kds_async_waitall);
unsigned long jiffies_timeout)
{
struct kds_resource_set *rset;
+ unsigned long flags;
int i;
int triggered = 0;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
INIT_LIST_HEAD(&rset->callback_link);
INIT_WORK(&rset->callback_work, kds_queued_callback);
- mutex_lock(&kds_lock);
+ spin_lock_irqsave(&kds_lock, flags);
for (i = 0; i < number_resources; i++)
{
rset->callback_extra_parameter = NULL;
}
- mutex_unlock(&kds_lock);
+ spin_unlock_irqrestore(&kds_lock, flags);
if (!triggered)
{
resource_list[i]->lock_count--;
}
- mutex_unlock(&kds_lock);
+ spin_unlock_irqrestore(&kds_lock, flags);
kfree(rset);
return ERR_PTR(-EINVAL);
}
struct list_head triggered = LIST_HEAD_INIT(triggered);
struct kds_resource_set *rset;
struct kds_resource_set *it;
+ unsigned long flags;
int i;
BUG_ON(!pprset);
- mutex_lock(&kds_lock);
+ spin_lock_irqsave(&kds_lock, flags);
rset = *pprset;
if (!rset)
{
/* caught a race between a cancelation
* and a completion, nothing to do */
- mutex_unlock(&kds_lock);
+ spin_unlock_irqrestore(&kds_lock, flags);
return;
}
}
- mutex_unlock(&kds_lock);
+ spin_unlock_irqrestore(&kds_lock, flags);
while (!list_empty(&triggered))
{
kds_callback_perform(it);
}
- cancel_work_sync(&rset->callback_work);
+ /*
+ * Caller is responsible for guaranteeing that callback work is not
+ * pending (i.e. its running or completed) prior to calling release.
+ * This should happen by default since its via the callback that we know
+ * that the lock has been acquired. The one wierd exception is if
+ * we use a non-direct callback and the KDS_FLAG_LOCKED_IGNORE flag.
+ * TODO: we should probably disallow this combinations of options.
+ */
+ BUG_ON(work_pending(&rset->callback_work));
/* free the resource set */
kfree(rset);