1 /* The industrial I/O core, trigger handling functions
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/idr.h>
12 #include <linux/err.h>
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/slab.h>
18 #include <linux/iio/iio.h>
19 #include <linux/iio/trigger.h>
21 #include "iio_core_trigger.h"
22 #include <linux/iio/trigger_consumer.h>
24 /* RFC - Question of approach
25 * Make the common case (single sensor single trigger)
26 * simple by starting trigger capture from when first sensors
29 * Complex simultaneous start requires use of 'hold' functionality
30 * of the trigger. (not implemented)
32 * Any other suggestions?
35 static DEFINE_IDA(iio_trigger_ida);
37 /* Single list of all available triggers */
38 static LIST_HEAD(iio_trigger_list);
39 static DEFINE_MUTEX(iio_trigger_list_lock);
42 * iio_trigger_read_name() - retrieve useful identifying name
43 * @dev: device associated with the iio_trigger
44 * @attr: pointer to the device_attribute structure that is
46 * @buf: buffer to print the name into
48 * Return: a negative number on failure or the number of written
49 * characters on success.
51 static ssize_t iio_trigger_read_name(struct device *dev,
52 struct device_attribute *attr,
55 struct iio_trigger *trig = to_iio_trigger(dev);
56 return sprintf(buf, "%s\n", trig->name);
59 static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
61 static struct attribute *iio_trig_dev_attrs[] = {
65 ATTRIBUTE_GROUPS(iio_trig_dev);
67 int iio_trigger_register(struct iio_trigger *trig_info)
71 /* trig_info->ops is required for the module member */
75 trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
76 if (trig_info->id < 0)
79 /* Set the name used for the sysfs directory etc */
80 dev_set_name(&trig_info->dev, "trigger%ld",
81 (unsigned long) trig_info->id);
83 ret = device_add(&trig_info->dev);
85 goto error_unregister_id;
87 /* Add to list of available triggers held by the IIO core */
88 mutex_lock(&iio_trigger_list_lock);
89 list_add_tail(&trig_info->list, &iio_trigger_list);
90 mutex_unlock(&iio_trigger_list_lock);
95 ida_simple_remove(&iio_trigger_ida, trig_info->id);
98 EXPORT_SYMBOL(iio_trigger_register);
100 void iio_trigger_unregister(struct iio_trigger *trig_info)
102 mutex_lock(&iio_trigger_list_lock);
103 list_del(&trig_info->list);
104 mutex_unlock(&iio_trigger_list_lock);
106 ida_simple_remove(&iio_trigger_ida, trig_info->id);
107 /* Possible issue in here */
108 device_del(&trig_info->dev);
110 EXPORT_SYMBOL(iio_trigger_unregister);
112 static struct iio_trigger *iio_trigger_find_by_name(const char *name,
115 struct iio_trigger *trig = NULL, *iter;
117 mutex_lock(&iio_trigger_list_lock);
118 list_for_each_entry(iter, &iio_trigger_list, list)
119 if (sysfs_streq(iter->name, name)) {
123 mutex_unlock(&iio_trigger_list_lock);
128 void iio_trigger_poll(struct iio_trigger *trig)
132 if (!atomic_read(&trig->use_count)) {
133 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
135 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
136 if (trig->subirqs[i].enabled)
137 generic_handle_irq(trig->subirq_base + i);
139 iio_trigger_notify_done(trig);
143 EXPORT_SYMBOL(iio_trigger_poll);
145 irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
147 iio_trigger_poll(private);
150 EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
152 void iio_trigger_poll_chained(struct iio_trigger *trig)
156 if (!atomic_read(&trig->use_count)) {
157 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
159 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
160 if (trig->subirqs[i].enabled)
161 handle_nested_irq(trig->subirq_base + i);
163 iio_trigger_notify_done(trig);
167 EXPORT_SYMBOL(iio_trigger_poll_chained);
169 void iio_trigger_notify_done(struct iio_trigger *trig)
171 if (atomic_dec_and_test(&trig->use_count) && trig->ops->try_reenable)
172 if (trig->ops->try_reenable(trig))
173 /* Missed an interrupt so launch new poll now */
174 iio_trigger_poll(trig);
176 EXPORT_SYMBOL(iio_trigger_notify_done);
178 /* Trigger Consumer related functions */
179 static int iio_trigger_get_irq(struct iio_trigger *trig)
182 mutex_lock(&trig->pool_lock);
183 ret = bitmap_find_free_region(trig->pool,
184 CONFIG_IIO_CONSUMERS_PER_TRIGGER,
186 mutex_unlock(&trig->pool_lock);
188 ret += trig->subirq_base;
193 static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
195 mutex_lock(&trig->pool_lock);
196 clear_bit(irq - trig->subirq_base, trig->pool);
197 mutex_unlock(&trig->pool_lock);
200 /* Complexity in here. With certain triggers (datardy) an acknowledgement
201 * may be needed if the pollfuncs do not include the data read for the
203 * This is not currently handled. Alternative of not enabling trigger unless
204 * the relevant function is in there may be the best option.
206 /* Worth protecting against double additions? */
207 static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
208 struct iio_poll_func *pf)
212 = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
214 /* Prevent the module from being removed whilst attached to a trigger */
215 __module_get(pf->indio_dev->info->driver_module);
216 pf->irq = iio_trigger_get_irq(trig);
217 ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
221 module_put(pf->indio_dev->info->driver_module);
225 if (trig->ops->set_trigger_state && notinuse) {
226 ret = trig->ops->set_trigger_state(trig, true);
228 module_put(pf->indio_dev->info->driver_module);
234 static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
235 struct iio_poll_func *pf)
239 = (bitmap_weight(trig->pool,
240 CONFIG_IIO_CONSUMERS_PER_TRIGGER)
242 if (trig->ops->set_trigger_state && no_other_users) {
243 ret = trig->ops->set_trigger_state(trig, false);
247 iio_trigger_put_irq(trig, pf->irq);
248 free_irq(pf->irq, pf);
249 module_put(pf->indio_dev->info->driver_module);
254 irqreturn_t iio_pollfunc_store_time(int irq, void *p)
256 struct iio_poll_func *pf = p;
257 pf->timestamp = iio_get_time_ns();
258 return IRQ_WAKE_THREAD;
260 EXPORT_SYMBOL(iio_pollfunc_store_time);
263 *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
264 irqreturn_t (*thread)(int irq, void *p),
266 struct iio_dev *indio_dev,
271 struct iio_poll_func *pf;
273 pf = kmalloc(sizeof *pf, GFP_KERNEL);
276 va_start(vargs, fmt);
277 pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
279 if (pf->name == NULL) {
286 pf->indio_dev = indio_dev;
290 EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
292 void iio_dealloc_pollfunc(struct iio_poll_func *pf)
297 EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
300 * iio_trigger_read_current() - trigger consumer sysfs query current trigger
301 * @dev: device associated with an industrial I/O device
302 * @attr: pointer to the device_attribute structure that
304 * @buf: buffer where the current trigger name will be printed into
306 * For trigger consumers the current_trigger interface allows the trigger
307 * used by the device to be queried.
309 * Return: a negative number on failure, the number of characters written
310 * on success or 0 if no trigger is available
312 static ssize_t iio_trigger_read_current(struct device *dev,
313 struct device_attribute *attr,
316 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
319 return sprintf(buf, "%s\n", indio_dev->trig->name);
324 * iio_trigger_write_current() - trigger consumer sysfs set current trigger
325 * @dev: device associated with an industrial I/O device
326 * @attr: device attribute that is being processed
327 * @buf: string buffer that holds the name of the trigger
328 * @len: length of the trigger name held by buf
330 * For trigger consumers the current_trigger interface allows the trigger
331 * used for this device to be specified at run time based on the trigger's
334 * Return: negative error code on failure or length of the buffer
337 static ssize_t iio_trigger_write_current(struct device *dev,
338 struct device_attribute *attr,
342 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
343 struct iio_trigger *oldtrig = indio_dev->trig;
344 struct iio_trigger *trig;
347 mutex_lock(&indio_dev->mlock);
348 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
349 mutex_unlock(&indio_dev->mlock);
352 mutex_unlock(&indio_dev->mlock);
354 trig = iio_trigger_find_by_name(buf, len);
358 if (trig && indio_dev->info->validate_trigger) {
359 ret = indio_dev->info->validate_trigger(indio_dev, trig);
364 if (trig && trig->ops->validate_device) {
365 ret = trig->ops->validate_device(trig, indio_dev);
370 indio_dev->trig = trig;
373 if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
374 iio_trigger_detach_poll_func(oldtrig,
375 indio_dev->pollfunc_event);
376 iio_trigger_put(oldtrig);
378 if (indio_dev->trig) {
379 iio_trigger_get(indio_dev->trig);
380 if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
381 iio_trigger_attach_poll_func(indio_dev->trig,
382 indio_dev->pollfunc_event);
388 static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
389 iio_trigger_read_current,
390 iio_trigger_write_current);
392 static struct attribute *iio_trigger_consumer_attrs[] = {
393 &dev_attr_current_trigger.attr,
397 static const struct attribute_group iio_trigger_consumer_attr_group = {
399 .attrs = iio_trigger_consumer_attrs,
402 static void iio_trig_release(struct device *device)
404 struct iio_trigger *trig = to_iio_trigger(device);
407 if (trig->subirq_base) {
408 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
409 irq_modify_status(trig->subirq_base + i,
411 IRQ_NOREQUEST | IRQ_NOPROBE);
412 irq_set_chip(trig->subirq_base + i,
414 irq_set_handler(trig->subirq_base + i,
418 irq_free_descs(trig->subirq_base,
419 CONFIG_IIO_CONSUMERS_PER_TRIGGER);
425 static struct device_type iio_trig_type = {
426 .release = iio_trig_release,
427 .groups = iio_trig_dev_groups,
430 static void iio_trig_subirqmask(struct irq_data *d)
432 struct irq_chip *chip = irq_data_get_irq_chip(d);
433 struct iio_trigger *trig
435 struct iio_trigger, subirq_chip);
436 trig->subirqs[d->irq - trig->subirq_base].enabled = false;
439 static void iio_trig_subirqunmask(struct irq_data *d)
441 struct irq_chip *chip = irq_data_get_irq_chip(d);
442 struct iio_trigger *trig
444 struct iio_trigger, subirq_chip);
445 trig->subirqs[d->irq - trig->subirq_base].enabled = true;
448 static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
450 struct iio_trigger *trig;
451 trig = kzalloc(sizeof *trig, GFP_KERNEL);
454 trig->dev.type = &iio_trig_type;
455 trig->dev.bus = &iio_bus_type;
456 device_initialize(&trig->dev);
458 mutex_init(&trig->pool_lock);
460 = irq_alloc_descs(-1, 0,
461 CONFIG_IIO_CONSUMERS_PER_TRIGGER,
463 if (trig->subirq_base < 0) {
468 trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
469 if (trig->name == NULL) {
470 irq_free_descs(trig->subirq_base,
471 CONFIG_IIO_CONSUMERS_PER_TRIGGER);
475 trig->subirq_chip.name = trig->name;
476 trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
477 trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
478 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
479 irq_set_chip(trig->subirq_base + i,
481 irq_set_handler(trig->subirq_base + i,
483 irq_modify_status(trig->subirq_base + i,
484 IRQ_NOREQUEST | IRQ_NOAUTOEN,
487 get_device(&trig->dev);
493 struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
495 struct iio_trigger *trig;
498 va_start(vargs, fmt);
499 trig = viio_trigger_alloc(fmt, vargs);
504 EXPORT_SYMBOL(iio_trigger_alloc);
506 void iio_trigger_free(struct iio_trigger *trig)
509 put_device(&trig->dev);
511 EXPORT_SYMBOL(iio_trigger_free);
513 static void devm_iio_trigger_release(struct device *dev, void *res)
515 iio_trigger_free(*(struct iio_trigger **)res);
518 static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
520 struct iio_trigger **r = res;
531 * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
532 * @dev: Device to allocate iio_trigger for
533 * @fmt: trigger name format. If it includes format
534 * specifiers, the additional arguments following
535 * format are formatted and inserted in the resulting
536 * string replacing their respective specifiers.
538 * Managed iio_trigger_alloc. iio_trigger allocated with this function is
539 * automatically freed on driver detach.
541 * If an iio_trigger allocated with this function needs to be freed separately,
542 * devm_iio_trigger_free() must be used.
545 * Pointer to allocated iio_trigger on success, NULL on failure.
547 struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
548 const char *fmt, ...)
550 struct iio_trigger **ptr, *trig;
553 ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
558 /* use raw alloc_dr for kmalloc caller tracing */
559 va_start(vargs, fmt);
560 trig = viio_trigger_alloc(fmt, vargs);
564 devres_add(dev, ptr);
571 EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
574 * devm_iio_trigger_free - Resource-managed iio_trigger_free()
575 * @dev: Device this iio_dev belongs to
576 * @iio_trig: the iio_trigger associated with the device
578 * Free iio_trigger allocated with devm_iio_trigger_alloc().
580 void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
584 rc = devres_release(dev, devm_iio_trigger_release,
585 devm_iio_trigger_match, iio_trig);
588 EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
590 void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
592 indio_dev->groups[indio_dev->groupcounter++] =
593 &iio_trigger_consumer_attr_group;
596 void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
598 /* Clean up an associated but not attached trigger reference */
600 iio_trigger_put(indio_dev->trig);
603 int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
605 return iio_trigger_attach_poll_func(indio_dev->trig,
606 indio_dev->pollfunc);
608 EXPORT_SYMBOL(iio_triggered_buffer_postenable);
610 int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
612 return iio_trigger_detach_poll_func(indio_dev->trig,
613 indio_dev->pollfunc);
615 EXPORT_SYMBOL(iio_triggered_buffer_predisable);