Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
[cascardo/linux.git] / kernel / irq / chip.c
1 /*
2  * linux/kernel/irq/chip.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code, for irq-chip
8  * based architectures.
9  *
10  * Detailed information is available in Documentation/DocBook/genericirq
11  */
12
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18
19 #include <trace/events/irq.h>
20
21 #include "internals.h"
22
23 /**
24  *      irq_set_chip - set the irq chip for an irq
25  *      @irq:   irq number
26  *      @chip:  pointer to irq chip description structure
27  */
28 int irq_set_chip(unsigned int irq, struct irq_chip *chip)
29 {
30         unsigned long flags;
31         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
32
33         if (!desc)
34                 return -EINVAL;
35
36         if (!chip)
37                 chip = &no_irq_chip;
38
39         desc->irq_data.chip = chip;
40         irq_put_desc_unlock(desc, flags);
41         /*
42          * For !CONFIG_SPARSE_IRQ make the irq show up in
43          * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
44          * already marked, and this call is harmless.
45          */
46         irq_reserve_irq(irq);
47         return 0;
48 }
49 EXPORT_SYMBOL(irq_set_chip);
50
51 /**
52  *      irq_set_type - set the irq trigger type for an irq
53  *      @irq:   irq number
54  *      @type:  IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
55  */
56 int irq_set_irq_type(unsigned int irq, unsigned int type)
57 {
58         unsigned long flags;
59         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
60         int ret = 0;
61
62         if (!desc)
63                 return -EINVAL;
64
65         type &= IRQ_TYPE_SENSE_MASK;
66         ret = __irq_set_trigger(desc, irq, type);
67         irq_put_desc_busunlock(desc, flags);
68         return ret;
69 }
70 EXPORT_SYMBOL(irq_set_irq_type);
71
72 /**
73  *      irq_set_handler_data - set irq handler data for an irq
74  *      @irq:   Interrupt number
75  *      @data:  Pointer to interrupt specific data
76  *
77  *      Set the hardware irq controller data for an irq
78  */
79 int irq_set_handler_data(unsigned int irq, void *data)
80 {
81         unsigned long flags;
82         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
83
84         if (!desc)
85                 return -EINVAL;
86         desc->irq_data.handler_data = data;
87         irq_put_desc_unlock(desc, flags);
88         return 0;
89 }
90 EXPORT_SYMBOL(irq_set_handler_data);
91
92 /**
93  *      irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
94  *      @irq_base:      Interrupt number base
95  *      @irq_offset:    Interrupt number offset
96  *      @entry:         Pointer to MSI descriptor data
97  *
98  *      Set the MSI descriptor entry for an irq at offset
99  */
100 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
101                          struct msi_desc *entry)
102 {
103         unsigned long flags;
104         struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
105
106         if (!desc)
107                 return -EINVAL;
108         desc->irq_data.msi_desc = entry;
109         if (entry && !irq_offset)
110                 entry->irq = irq_base;
111         irq_put_desc_unlock(desc, flags);
112         return 0;
113 }
114
115 /**
116  *      irq_set_msi_desc - set MSI descriptor data for an irq
117  *      @irq:   Interrupt number
118  *      @entry: Pointer to MSI descriptor data
119  *
120  *      Set the MSI descriptor entry for an irq
121  */
122 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
123 {
124         return irq_set_msi_desc_off(irq, 0, entry);
125 }
126
127 /**
128  *      irq_set_chip_data - set irq chip data for an irq
129  *      @irq:   Interrupt number
130  *      @data:  Pointer to chip specific data
131  *
132  *      Set the hardware irq chip data for an irq
133  */
134 int irq_set_chip_data(unsigned int irq, void *data)
135 {
136         unsigned long flags;
137         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
138
139         if (!desc)
140                 return -EINVAL;
141         desc->irq_data.chip_data = data;
142         irq_put_desc_unlock(desc, flags);
143         return 0;
144 }
145 EXPORT_SYMBOL(irq_set_chip_data);
146
147 struct irq_data *irq_get_irq_data(unsigned int irq)
148 {
149         struct irq_desc *desc = irq_to_desc(irq);
150
151         return desc ? &desc->irq_data : NULL;
152 }
153 EXPORT_SYMBOL_GPL(irq_get_irq_data);
154
155 static void irq_state_clr_disabled(struct irq_desc *desc)
156 {
157         irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
158 }
159
160 static void irq_state_set_disabled(struct irq_desc *desc)
161 {
162         irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
163 }
164
165 static void irq_state_clr_masked(struct irq_desc *desc)
166 {
167         irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
168 }
169
170 static void irq_state_set_masked(struct irq_desc *desc)
171 {
172         irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
173 }
174
175 int irq_startup(struct irq_desc *desc, bool resend)
176 {
177         int ret = 0;
178
179         irq_state_clr_disabled(desc);
180         desc->depth = 0;
181
182         if (desc->irq_data.chip->irq_startup) {
183                 ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
184                 irq_state_clr_masked(desc);
185         } else {
186                 irq_enable(desc);
187         }
188         if (resend)
189                 check_irq_resend(desc, desc->irq_data.irq);
190         return ret;
191 }
192
193 void irq_shutdown(struct irq_desc *desc)
194 {
195         irq_state_set_disabled(desc);
196         desc->depth = 1;
197         if (desc->irq_data.chip->irq_shutdown)
198                 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
199         else if (desc->irq_data.chip->irq_disable)
200                 desc->irq_data.chip->irq_disable(&desc->irq_data);
201         else
202                 desc->irq_data.chip->irq_mask(&desc->irq_data);
203         irq_state_set_masked(desc);
204 }
205
206 void irq_enable(struct irq_desc *desc)
207 {
208         irq_state_clr_disabled(desc);
209         if (desc->irq_data.chip->irq_enable)
210                 desc->irq_data.chip->irq_enable(&desc->irq_data);
211         else
212                 desc->irq_data.chip->irq_unmask(&desc->irq_data);
213         irq_state_clr_masked(desc);
214 }
215
216 /**
217  * irq_disable - Mark interrupt disabled
218  * @desc:       irq descriptor which should be disabled
219  *
220  * If the chip does not implement the irq_disable callback, we
221  * use a lazy disable approach. That means we mark the interrupt
222  * disabled, but leave the hardware unmasked. That's an
223  * optimization because we avoid the hardware access for the
224  * common case where no interrupt happens after we marked it
225  * disabled. If an interrupt happens, then the interrupt flow
226  * handler masks the line at the hardware level and marks it
227  * pending.
228  */
229 void irq_disable(struct irq_desc *desc)
230 {
231         irq_state_set_disabled(desc);
232         if (desc->irq_data.chip->irq_disable) {
233                 desc->irq_data.chip->irq_disable(&desc->irq_data);
234                 irq_state_set_masked(desc);
235         }
236 }
237
238 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
239 {
240         if (desc->irq_data.chip->irq_enable)
241                 desc->irq_data.chip->irq_enable(&desc->irq_data);
242         else
243                 desc->irq_data.chip->irq_unmask(&desc->irq_data);
244         cpumask_set_cpu(cpu, desc->percpu_enabled);
245 }
246
247 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
248 {
249         if (desc->irq_data.chip->irq_disable)
250                 desc->irq_data.chip->irq_disable(&desc->irq_data);
251         else
252                 desc->irq_data.chip->irq_mask(&desc->irq_data);
253         cpumask_clear_cpu(cpu, desc->percpu_enabled);
254 }
255
256 static inline void mask_ack_irq(struct irq_desc *desc)
257 {
258         if (desc->irq_data.chip->irq_mask_ack)
259                 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
260         else {
261                 desc->irq_data.chip->irq_mask(&desc->irq_data);
262                 if (desc->irq_data.chip->irq_ack)
263                         desc->irq_data.chip->irq_ack(&desc->irq_data);
264         }
265         irq_state_set_masked(desc);
266 }
267
268 void mask_irq(struct irq_desc *desc)
269 {
270         if (desc->irq_data.chip->irq_mask) {
271                 desc->irq_data.chip->irq_mask(&desc->irq_data);
272                 irq_state_set_masked(desc);
273         }
274 }
275
276 void unmask_irq(struct irq_desc *desc)
277 {
278         if (desc->irq_data.chip->irq_unmask) {
279                 desc->irq_data.chip->irq_unmask(&desc->irq_data);
280                 irq_state_clr_masked(desc);
281         }
282 }
283
284 void unmask_threaded_irq(struct irq_desc *desc)
285 {
286         struct irq_chip *chip = desc->irq_data.chip;
287
288         if (chip->flags & IRQCHIP_EOI_THREADED)
289                 chip->irq_eoi(&desc->irq_data);
290
291         if (chip->irq_unmask) {
292                 chip->irq_unmask(&desc->irq_data);
293                 irq_state_clr_masked(desc);
294         }
295 }
296
297 /*
298  *      handle_nested_irq - Handle a nested irq from a irq thread
299  *      @irq:   the interrupt number
300  *
301  *      Handle interrupts which are nested into a threaded interrupt
302  *      handler. The handler function is called inside the calling
303  *      threads context.
304  */
305 void handle_nested_irq(unsigned int irq)
306 {
307         struct irq_desc *desc = irq_to_desc(irq);
308         struct irqaction *action;
309         irqreturn_t action_ret;
310
311         might_sleep();
312
313         raw_spin_lock_irq(&desc->lock);
314
315         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
316         kstat_incr_irqs_this_cpu(irq, desc);
317
318         action = desc->action;
319         if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
320                 desc->istate |= IRQS_PENDING;
321                 goto out_unlock;
322         }
323
324         irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
325         raw_spin_unlock_irq(&desc->lock);
326
327         action_ret = action->thread_fn(action->irq, action->dev_id);
328         if (!noirqdebug)
329                 note_interrupt(irq, desc, action_ret);
330
331         raw_spin_lock_irq(&desc->lock);
332         irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
333
334 out_unlock:
335         raw_spin_unlock_irq(&desc->lock);
336 }
337 EXPORT_SYMBOL_GPL(handle_nested_irq);
338
339 static bool irq_check_poll(struct irq_desc *desc)
340 {
341         if (!(desc->istate & IRQS_POLL_INPROGRESS))
342                 return false;
343         return irq_wait_for_poll(desc);
344 }
345
346 /**
347  *      handle_simple_irq - Simple and software-decoded IRQs.
348  *      @irq:   the interrupt number
349  *      @desc:  the interrupt description structure for this irq
350  *
351  *      Simple interrupts are either sent from a demultiplexing interrupt
352  *      handler or come from hardware, where no interrupt hardware control
353  *      is necessary.
354  *
355  *      Note: The caller is expected to handle the ack, clear, mask and
356  *      unmask issues if necessary.
357  */
358 void
359 handle_simple_irq(unsigned int irq, struct irq_desc *desc)
360 {
361         raw_spin_lock(&desc->lock);
362
363         if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
364                 if (!irq_check_poll(desc))
365                         goto out_unlock;
366
367         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
368         kstat_incr_irqs_this_cpu(irq, desc);
369
370         if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
371                 desc->istate |= IRQS_PENDING;
372                 goto out_unlock;
373         }
374
375         handle_irq_event(desc);
376
377 out_unlock:
378         raw_spin_unlock(&desc->lock);
379 }
380 EXPORT_SYMBOL_GPL(handle_simple_irq);
381
382 /*
383  * Called unconditionally from handle_level_irq() and only for oneshot
384  * interrupts from handle_fasteoi_irq()
385  */
386 static void cond_unmask_irq(struct irq_desc *desc)
387 {
388         /*
389          * We need to unmask in the following cases:
390          * - Standard level irq (IRQF_ONESHOT is not set)
391          * - Oneshot irq which did not wake the thread (caused by a
392          *   spurious interrupt or a primary handler handling it
393          *   completely).
394          */
395         if (!irqd_irq_disabled(&desc->irq_data) &&
396             irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
397                 unmask_irq(desc);
398 }
399
400 /**
401  *      handle_level_irq - Level type irq handler
402  *      @irq:   the interrupt number
403  *      @desc:  the interrupt description structure for this irq
404  *
405  *      Level type interrupts are active as long as the hardware line has
406  *      the active level. This may require to mask the interrupt and unmask
407  *      it after the associated handler has acknowledged the device, so the
408  *      interrupt line is back to inactive.
409  */
410 void
411 handle_level_irq(unsigned int irq, struct irq_desc *desc)
412 {
413         raw_spin_lock(&desc->lock);
414         mask_ack_irq(desc);
415
416         if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
417                 if (!irq_check_poll(desc))
418                         goto out_unlock;
419
420         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
421         kstat_incr_irqs_this_cpu(irq, desc);
422
423         /*
424          * If its disabled or no action available
425          * keep it masked and get out of here
426          */
427         if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
428                 desc->istate |= IRQS_PENDING;
429                 goto out_unlock;
430         }
431
432         handle_irq_event(desc);
433
434         cond_unmask_irq(desc);
435
436 out_unlock:
437         raw_spin_unlock(&desc->lock);
438 }
439 EXPORT_SYMBOL_GPL(handle_level_irq);
440
441 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
442 static inline void preflow_handler(struct irq_desc *desc)
443 {
444         if (desc->preflow_handler)
445                 desc->preflow_handler(&desc->irq_data);
446 }
447 #else
448 static inline void preflow_handler(struct irq_desc *desc) { }
449 #endif
450
451 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
452 {
453         if (!(desc->istate & IRQS_ONESHOT)) {
454                 chip->irq_eoi(&desc->irq_data);
455                 return;
456         }
457         /*
458          * We need to unmask in the following cases:
459          * - Oneshot irq which did not wake the thread (caused by a
460          *   spurious interrupt or a primary handler handling it
461          *   completely).
462          */
463         if (!irqd_irq_disabled(&desc->irq_data) &&
464             irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
465                 chip->irq_eoi(&desc->irq_data);
466                 unmask_irq(desc);
467         } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
468                 chip->irq_eoi(&desc->irq_data);
469         }
470 }
471
472 /**
473  *      handle_fasteoi_irq - irq handler for transparent controllers
474  *      @irq:   the interrupt number
475  *      @desc:  the interrupt description structure for this irq
476  *
477  *      Only a single callback will be issued to the chip: an ->eoi()
478  *      call when the interrupt has been serviced. This enables support
479  *      for modern forms of interrupt handlers, which handle the flow
480  *      details in hardware, transparently.
481  */
482 void
483 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
484 {
485         struct irq_chip *chip = desc->irq_data.chip;
486
487         raw_spin_lock(&desc->lock);
488
489         if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
490                 if (!irq_check_poll(desc))
491                         goto out;
492
493         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
494         kstat_incr_irqs_this_cpu(irq, desc);
495
496         /*
497          * If its disabled or no action available
498          * then mask it and get out of here:
499          */
500         if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
501                 desc->istate |= IRQS_PENDING;
502                 mask_irq(desc);
503                 goto out;
504         }
505
506         if (desc->istate & IRQS_ONESHOT)
507                 mask_irq(desc);
508
509         preflow_handler(desc);
510         handle_irq_event(desc);
511
512         cond_unmask_eoi_irq(desc, chip);
513
514         raw_spin_unlock(&desc->lock);
515         return;
516 out:
517         if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
518                 chip->irq_eoi(&desc->irq_data);
519         raw_spin_unlock(&desc->lock);
520 }
521
522 /**
523  *      handle_edge_irq - edge type IRQ handler
524  *      @irq:   the interrupt number
525  *      @desc:  the interrupt description structure for this irq
526  *
527  *      Interrupt occures on the falling and/or rising edge of a hardware
528  *      signal. The occurrence is latched into the irq controller hardware
529  *      and must be acked in order to be reenabled. After the ack another
530  *      interrupt can happen on the same source even before the first one
531  *      is handled by the associated event handler. If this happens it
532  *      might be necessary to disable (mask) the interrupt depending on the
533  *      controller hardware. This requires to reenable the interrupt inside
534  *      of the loop which handles the interrupts which have arrived while
535  *      the handler was running. If all pending interrupts are handled, the
536  *      loop is left.
537  */
538 void
539 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
540 {
541         raw_spin_lock(&desc->lock);
542
543         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
544         /*
545          * If we're currently running this IRQ, or its disabled,
546          * we shouldn't process the IRQ. Mark it pending, handle
547          * the necessary masking and go out
548          */
549         if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
550                      irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
551                 if (!irq_check_poll(desc)) {
552                         desc->istate |= IRQS_PENDING;
553                         mask_ack_irq(desc);
554                         goto out_unlock;
555                 }
556         }
557         kstat_incr_irqs_this_cpu(irq, desc);
558
559         /* Start handling the irq */
560         desc->irq_data.chip->irq_ack(&desc->irq_data);
561
562         do {
563                 if (unlikely(!desc->action)) {
564                         mask_irq(desc);
565                         goto out_unlock;
566                 }
567
568                 /*
569                  * When another irq arrived while we were handling
570                  * one, we could have masked the irq.
571                  * Renable it, if it was not disabled in meantime.
572                  */
573                 if (unlikely(desc->istate & IRQS_PENDING)) {
574                         if (!irqd_irq_disabled(&desc->irq_data) &&
575                             irqd_irq_masked(&desc->irq_data))
576                                 unmask_irq(desc);
577                 }
578
579                 handle_irq_event(desc);
580
581         } while ((desc->istate & IRQS_PENDING) &&
582                  !irqd_irq_disabled(&desc->irq_data));
583
584 out_unlock:
585         raw_spin_unlock(&desc->lock);
586 }
587 EXPORT_SYMBOL(handle_edge_irq);
588
589 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
590 /**
591  *      handle_edge_eoi_irq - edge eoi type IRQ handler
592  *      @irq:   the interrupt number
593  *      @desc:  the interrupt description structure for this irq
594  *
595  * Similar as the above handle_edge_irq, but using eoi and w/o the
596  * mask/unmask logic.
597  */
598 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
599 {
600         struct irq_chip *chip = irq_desc_get_chip(desc);
601
602         raw_spin_lock(&desc->lock);
603
604         desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
605         /*
606          * If we're currently running this IRQ, or its disabled,
607          * we shouldn't process the IRQ. Mark it pending, handle
608          * the necessary masking and go out
609          */
610         if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
611                      irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
612                 if (!irq_check_poll(desc)) {
613                         desc->istate |= IRQS_PENDING;
614                         goto out_eoi;
615                 }
616         }
617         kstat_incr_irqs_this_cpu(irq, desc);
618
619         do {
620                 if (unlikely(!desc->action))
621                         goto out_eoi;
622
623                 handle_irq_event(desc);
624
625         } while ((desc->istate & IRQS_PENDING) &&
626                  !irqd_irq_disabled(&desc->irq_data));
627
628 out_eoi:
629         chip->irq_eoi(&desc->irq_data);
630         raw_spin_unlock(&desc->lock);
631 }
632 #endif
633
634 /**
635  *      handle_percpu_irq - Per CPU local irq handler
636  *      @irq:   the interrupt number
637  *      @desc:  the interrupt description structure for this irq
638  *
639  *      Per CPU interrupts on SMP machines without locking requirements
640  */
641 void
642 handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
643 {
644         struct irq_chip *chip = irq_desc_get_chip(desc);
645
646         kstat_incr_irqs_this_cpu(irq, desc);
647
648         if (chip->irq_ack)
649                 chip->irq_ack(&desc->irq_data);
650
651         handle_irq_event_percpu(desc, desc->action);
652
653         if (chip->irq_eoi)
654                 chip->irq_eoi(&desc->irq_data);
655 }
656
657 /**
658  * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
659  * @irq:        the interrupt number
660  * @desc:       the interrupt description structure for this irq
661  *
662  * Per CPU interrupts on SMP machines without locking requirements. Same as
663  * handle_percpu_irq() above but with the following extras:
664  *
665  * action->percpu_dev_id is a pointer to percpu variables which
666  * contain the real device id for the cpu on which this handler is
667  * called
668  */
669 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
670 {
671         struct irq_chip *chip = irq_desc_get_chip(desc);
672         struct irqaction *action = desc->action;
673         void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
674         irqreturn_t res;
675
676         kstat_incr_irqs_this_cpu(irq, desc);
677
678         if (chip->irq_ack)
679                 chip->irq_ack(&desc->irq_data);
680
681         trace_irq_handler_entry(irq, action);
682         res = action->handler(irq, dev_id);
683         trace_irq_handler_exit(irq, action, res);
684
685         if (chip->irq_eoi)
686                 chip->irq_eoi(&desc->irq_data);
687 }
688
689 void
690 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
691                   const char *name)
692 {
693         unsigned long flags;
694         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
695
696         if (!desc)
697                 return;
698
699         if (!handle) {
700                 handle = handle_bad_irq;
701         } else {
702                 if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
703                         goto out;
704         }
705
706         /* Uninstall? */
707         if (handle == handle_bad_irq) {
708                 if (desc->irq_data.chip != &no_irq_chip)
709                         mask_ack_irq(desc);
710                 irq_state_set_disabled(desc);
711                 desc->depth = 1;
712         }
713         desc->handle_irq = handle;
714         desc->name = name;
715
716         if (handle != handle_bad_irq && is_chained) {
717                 irq_settings_set_noprobe(desc);
718                 irq_settings_set_norequest(desc);
719                 irq_settings_set_nothread(desc);
720                 irq_startup(desc, true);
721         }
722 out:
723         irq_put_desc_busunlock(desc, flags);
724 }
725 EXPORT_SYMBOL_GPL(__irq_set_handler);
726
727 void
728 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
729                               irq_flow_handler_t handle, const char *name)
730 {
731         irq_set_chip(irq, chip);
732         __irq_set_handler(irq, handle, 0, name);
733 }
734 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
735
736 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
737 {
738         unsigned long flags;
739         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
740
741         if (!desc)
742                 return;
743         irq_settings_clr_and_set(desc, clr, set);
744
745         irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
746                    IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
747         if (irq_settings_has_no_balance_set(desc))
748                 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
749         if (irq_settings_is_per_cpu(desc))
750                 irqd_set(&desc->irq_data, IRQD_PER_CPU);
751         if (irq_settings_can_move_pcntxt(desc))
752                 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
753         if (irq_settings_is_level(desc))
754                 irqd_set(&desc->irq_data, IRQD_LEVEL);
755
756         irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
757
758         irq_put_desc_unlock(desc, flags);
759 }
760 EXPORT_SYMBOL_GPL(irq_modify_status);
761
762 /**
763  *      irq_cpu_online - Invoke all irq_cpu_online functions.
764  *
765  *      Iterate through all irqs and invoke the chip.irq_cpu_online()
766  *      for each.
767  */
768 void irq_cpu_online(void)
769 {
770         struct irq_desc *desc;
771         struct irq_chip *chip;
772         unsigned long flags;
773         unsigned int irq;
774
775         for_each_active_irq(irq) {
776                 desc = irq_to_desc(irq);
777                 if (!desc)
778                         continue;
779
780                 raw_spin_lock_irqsave(&desc->lock, flags);
781
782                 chip = irq_data_get_irq_chip(&desc->irq_data);
783                 if (chip && chip->irq_cpu_online &&
784                     (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
785                      !irqd_irq_disabled(&desc->irq_data)))
786                         chip->irq_cpu_online(&desc->irq_data);
787
788                 raw_spin_unlock_irqrestore(&desc->lock, flags);
789         }
790 }
791
792 /**
793  *      irq_cpu_offline - Invoke all irq_cpu_offline functions.
794  *
795  *      Iterate through all irqs and invoke the chip.irq_cpu_offline()
796  *      for each.
797  */
798 void irq_cpu_offline(void)
799 {
800         struct irq_desc *desc;
801         struct irq_chip *chip;
802         unsigned long flags;
803         unsigned int irq;
804
805         for_each_active_irq(irq) {
806                 desc = irq_to_desc(irq);
807                 if (!desc)
808                         continue;
809
810                 raw_spin_lock_irqsave(&desc->lock, flags);
811
812                 chip = irq_data_get_irq_chip(&desc->irq_data);
813                 if (chip && chip->irq_cpu_offline &&
814                     (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
815                      !irqd_irq_disabled(&desc->irq_data)))
816                         chip->irq_cpu_offline(&desc->irq_data);
817
818                 raw_spin_unlock_irqrestore(&desc->lock, flags);
819         }
820 }