genirq: Expose interrupt information through sysfs
[cascardo/linux.git] / kernel / irq / irqdesc.c
index 0ccd028..93b5172 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/radix-tree.h>
 #include <linux/bitmap.h>
 #include <linux/irqdomain.h>
+#include <linux/sysfs.h>
 
 #include "internals.h"
 
@@ -68,9 +69,13 @@ static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
        return 0;
 }
 
-static void desc_smp_init(struct irq_desc *desc, int node)
+static void desc_smp_init(struct irq_desc *desc, int node,
+                         const struct cpumask *affinity)
 {
-       cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity);
+       if (!affinity)
+               affinity = irq_default_affinity;
+       cpumask_copy(desc->irq_common_data.affinity, affinity);
+
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        cpumask_clear(desc->pending_mask);
 #endif
@@ -82,11 +87,12 @@ static void desc_smp_init(struct irq_desc *desc, int node)
 #else
 static inline int
 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
-static inline void desc_smp_init(struct irq_desc *desc, int node) { }
+static inline void
+desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
 #endif
 
 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
-               struct module *owner)
+                             const struct cpumask *affinity, struct module *owner)
 {
        int cpu;
 
@@ -107,7 +113,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
        desc->owner = owner;
        for_each_possible_cpu(cpu)
                *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
-       desc_smp_init(desc, node);
+       desc_smp_init(desc, node, affinity);
 }
 
 int nr_irqs = NR_IRQS;
@@ -118,6 +124,181 @@ static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
 
 #ifdef CONFIG_SPARSE_IRQ
 
+static void irq_kobj_release(struct kobject *kobj);
+
+#ifdef CONFIG_SYSFS
+static struct kobject *irq_kobj_base;
+
+#define IRQ_ATTR_RO(_name) \
+static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
+
+static ssize_t per_cpu_count_show(struct kobject *kobj,
+                                 struct kobj_attribute *attr, char *buf)
+{
+       struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
+       int cpu, irq = desc->irq_data.irq;
+       ssize_t ret = 0;
+       char *p = "";
+
+       for_each_possible_cpu(cpu) {
+               unsigned int c = kstat_irqs_cpu(irq, cpu);
+
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
+               p = ",";
+       }
+
+       ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+       return ret;
+}
+IRQ_ATTR_RO(per_cpu_count);
+
+static ssize_t chip_name_show(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
+       ssize_t ret = 0;
+
+       raw_spin_lock_irq(&desc->lock);
+       if (desc->irq_data.chip && desc->irq_data.chip->name) {
+               ret = scnprintf(buf, PAGE_SIZE, "%s\n",
+                               desc->irq_data.chip->name);
+       }
+       raw_spin_unlock_irq(&desc->lock);
+
+       return ret;
+}
+IRQ_ATTR_RO(chip_name);
+
+static ssize_t hwirq_show(struct kobject *kobj,
+                         struct kobj_attribute *attr, char *buf)
+{
+       struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
+       ssize_t ret = 0;
+
+       raw_spin_lock_irq(&desc->lock);
+       if (desc->irq_data.domain)
+               ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
+       raw_spin_unlock_irq(&desc->lock);
+
+       return ret;
+}
+IRQ_ATTR_RO(hwirq);
+
+static ssize_t type_show(struct kobject *kobj,
+                        struct kobj_attribute *attr, char *buf)
+{
+       struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
+       ssize_t ret = 0;
+
+       raw_spin_lock_irq(&desc->lock);
+       ret = sprintf(buf, "%s\n",
+                     irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
+       raw_spin_unlock_irq(&desc->lock);
+
+       return ret;
+
+}
+IRQ_ATTR_RO(type);
+
+static ssize_t name_show(struct kobject *kobj,
+                        struct kobj_attribute *attr, char *buf)
+{
+       struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
+       ssize_t ret = 0;
+
+       raw_spin_lock_irq(&desc->lock);
+       if (desc->name)
+               ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
+       raw_spin_unlock_irq(&desc->lock);
+
+       return ret;
+}
+IRQ_ATTR_RO(name);
+
+static ssize_t actions_show(struct kobject *kobj,
+                           struct kobj_attribute *attr, char *buf)
+{
+       struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
+       struct irqaction *action;
+       ssize_t ret = 0;
+       char *p = "";
+
+       raw_spin_lock_irq(&desc->lock);
+       for (action = desc->action; action != NULL; action = action->next) {
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
+                                p, action->name);
+               p = ",";
+       }
+       raw_spin_unlock_irq(&desc->lock);
+
+       if (ret)
+               ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+
+       return ret;
+}
+IRQ_ATTR_RO(actions);
+
+static struct attribute *irq_attrs[] = {
+       &per_cpu_count_attr.attr,
+       &chip_name_attr.attr,
+       &hwirq_attr.attr,
+       &type_attr.attr,
+       &name_attr.attr,
+       &actions_attr.attr,
+       NULL
+};
+
+static struct kobj_type irq_kobj_type = {
+       .release        = irq_kobj_release,
+       .sysfs_ops      = &kobj_sysfs_ops,
+       .default_attrs  = irq_attrs,
+};
+
+static void irq_sysfs_add(int irq, struct irq_desc *desc)
+{
+       if (irq_kobj_base) {
+               /*
+                * Continue even in case of failure as this is nothing
+                * crucial.
+                */
+               if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
+                       pr_warn("Failed to add kobject for irq %d\n", irq);
+       }
+}
+
+static int __init irq_sysfs_init(void)
+{
+       struct irq_desc *desc;
+       int irq;
+
+       /* Prevent concurrent irq alloc/free */
+       irq_lock_sparse();
+
+       irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
+       if (!irq_kobj_base) {
+               irq_unlock_sparse();
+               return -ENOMEM;
+       }
+
+       /* Add the already allocated interrupts */
+       for_each_irq_desc(irq, desc)
+               irq_sysfs_add(irq, desc);
+       irq_unlock_sparse();
+
+       return 0;
+}
+postcore_initcall(irq_sysfs_init);
+
+#else /* !CONFIG_SYSFS */
+
+static struct kobj_type irq_kobj_type = {
+       .release        = irq_kobj_release,
+};
+
+static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
+
+#endif /* CONFIG_SYSFS */
+
 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
 
 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
@@ -158,7 +339,9 @@ void irq_unlock_sparse(void)
        mutex_unlock(&sparse_irq_lock);
 }
 
-static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
+static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
+                                  const struct cpumask *affinity,
+                                  struct module *owner)
 {
        struct irq_desc *desc;
        gfp_t gfp = GFP_KERNEL;
@@ -178,7 +361,9 @@ static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
        lockdep_set_class(&desc->lock, &irq_desc_lock_class);
        init_rcu_head(&desc->rcu);
 
-       desc_set_defaults(irq, desc, node, owner);
+       desc_set_defaults(irq, desc, node, affinity, owner);
+       irqd_set(&desc->irq_data, flags);
+       kobject_init(&desc->kobj, &irq_kobj_type);
 
        return desc;
 
@@ -189,15 +374,22 @@ err_desc:
        return NULL;
 }
 
-static void delayed_free_desc(struct rcu_head *rhp)
+static void irq_kobj_release(struct kobject *kobj)
 {
-       struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
+       struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
 
        free_masks(desc);
        free_percpu(desc->kstat_irqs);
        kfree(desc);
 }
 
+static void delayed_free_desc(struct rcu_head *rhp)
+{
+       struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
+
+       kobject_put(&desc->kobj);
+}
+
 static void free_desc(unsigned int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
@@ -209,8 +401,12 @@ static void free_desc(unsigned int irq)
         * kstat_irq_usr(). Once we deleted the descriptor from the
         * sparse tree we can free it. Access in proc will fail to
         * lookup the descriptor.
+        *
+        * The sysfs entry must be serialized against a concurrent
+        * irq_sysfs_init() as well.
         */
        mutex_lock(&sparse_irq_lock);
+       kobject_del(&desc->kobj);
        delete_irq_desc(irq);
        mutex_unlock(&sparse_irq_lock);
 
@@ -223,17 +419,37 @@ static void free_desc(unsigned int irq)
 }
 
 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
-                      struct module *owner)
+                      const struct cpumask *affinity, struct module *owner)
 {
+       const struct cpumask *mask = NULL;
        struct irq_desc *desc;
-       int i;
+       unsigned int flags;
+       int i, cpu = -1;
+
+       if (affinity && cpumask_empty(affinity))
+               return -EINVAL;
+
+       flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
 
        for (i = 0; i < cnt; i++) {
-               desc = alloc_desc(start + i, node, owner);
+               if (affinity) {
+                       cpu = cpumask_next(cpu, affinity);
+                       if (cpu >= nr_cpu_ids)
+                               cpu = cpumask_first(affinity);
+                       node = cpu_to_node(cpu);
+
+                       /*
+                        * For single allocations we use the caller provided
+                        * mask otherwise we use the mask of the target cpu
+                        */
+                       mask = cnt == 1 ? affinity : cpumask_of(cpu);
+               }
+               desc = alloc_desc(start + i, node, flags, mask, owner);
                if (!desc)
                        goto err;
                mutex_lock(&sparse_irq_lock);
                irq_insert_desc(start + i, desc);
+               irq_sysfs_add(start + i, desc);
                mutex_unlock(&sparse_irq_lock);
        }
        return start;
@@ -277,7 +493,7 @@ int __init early_irq_init(void)
                nr_irqs = initcnt;
 
        for (i = 0; i < initcnt; i++) {
-               desc = alloc_desc(i, node, NULL);
+               desc = alloc_desc(i, node, 0, NULL, NULL);
                set_bit(i, allocated_irqs);
                irq_insert_desc(i, desc);
        }
@@ -311,7 +527,7 @@ int __init early_irq_init(void)
                alloc_masks(&desc[i], GFP_KERNEL, node);
                raw_spin_lock_init(&desc[i].lock);
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
-               desc_set_defaults(i, &desc[i], node, NULL);
+               desc_set_defaults(i, &desc[i], node, NULL, NULL);
        }
        return arch_early_irq_init();
 }
@@ -328,11 +544,12 @@ static void free_desc(unsigned int irq)
        unsigned long flags;
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-       desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL);
+       desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 }
 
 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
+                             const struct cpumask *affinity,
                              struct module *owner)
 {
        u32 i;
@@ -453,12 +670,15 @@ EXPORT_SYMBOL_GPL(irq_free_descs);
  * @cnt:       Number of consecutive irqs to allocate.
  * @node:      Preferred node on which the irq descriptor should be allocated
  * @owner:     Owning module (can be NULL)
+ * @affinity:  Optional pointer to an affinity mask which hints where the
+ *             irq descriptors should be allocated and which default
+ *             affinities to use
  *
  * Returns the first irq number or error code
  */
 int __ref
 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
-                 struct module *owner)
+                 struct module *owner, const struct cpumask *affinity)
 {
        int start, ret;
 
@@ -494,7 +714,7 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
 
        bitmap_set(allocated_irqs, start, cnt);
        mutex_unlock(&sparse_irq_lock);
-       return alloc_descs(start, cnt, node, owner);
+       return alloc_descs(start, cnt, node, affinity, owner);
 
 err:
        mutex_unlock(&sparse_irq_lock);
@@ -512,7 +732,7 @@ EXPORT_SYMBOL_GPL(__irq_alloc_descs);
  */
 unsigned int irq_alloc_hwirqs(int cnt, int node)
 {
-       int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);
+       int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
 
        if (irq < 0)
                return 0;
@@ -595,7 +815,8 @@ void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
                chip_bus_sync_unlock(desc);
 }
 
-int irq_set_percpu_devid(unsigned int irq)
+int irq_set_percpu_devid_partition(unsigned int irq,
+                                  const struct cpumask *affinity)
 {
        struct irq_desc *desc = irq_to_desc(irq);
 
@@ -610,10 +831,33 @@ int irq_set_percpu_devid(unsigned int irq)
        if (!desc->percpu_enabled)
                return -ENOMEM;
 
+       if (affinity)
+               desc->percpu_affinity = affinity;
+       else
+               desc->percpu_affinity = cpu_possible_mask;
+
        irq_set_percpu_devid_flags(irq);
        return 0;
 }
 
+int irq_set_percpu_devid(unsigned int irq)
+{
+       return irq_set_percpu_devid_partition(irq, NULL);
+}
+
+int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (!desc || !desc->percpu_enabled)
+               return -EINVAL;
+
+       if (affinity)
+               cpumask_copy(affinity, desc->percpu_affinity);
+
+       return 0;
+}
+
 void kstat_incr_irq_this_cpu(unsigned int irq)
 {
        kstat_incr_irqs_this_cpu(irq_to_desc(irq));