sched: rt-group: deal with PI
[cascardo/linux.git] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10         return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15         cpu_set(rq->cpu, rq->rd->rto_mask);
16         /*
17          * Make sure the mask is visible before we set
18          * the overload count. That is checked to determine
19          * if we should look at the mask. It would be a shame
20          * if we looked at the mask, but the mask was not
21          * updated yet.
22          */
23         wmb();
24         atomic_inc(&rq->rd->rto_count);
25 }
26
27 static inline void rt_clear_overload(struct rq *rq)
28 {
29         /* the order here really doesn't matter */
30         atomic_dec(&rq->rd->rto_count);
31         cpu_clear(rq->cpu, rq->rd->rto_mask);
32 }
33
34 static void update_rt_migration(struct rq *rq)
35 {
36         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
37                 if (!rq->rt.overloaded) {
38                         rt_set_overload(rq);
39                         rq->rt.overloaded = 1;
40                 }
41         } else if (rq->rt.overloaded) {
42                 rt_clear_overload(rq);
43                 rq->rt.overloaded = 0;
44         }
45 }
46 #endif /* CONFIG_SMP */
47
48 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
49 {
50         return container_of(rt_se, struct task_struct, rt);
51 }
52
53 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
54 {
55         return !list_empty(&rt_se->run_list);
56 }
57
58 #ifdef CONFIG_FAIR_GROUP_SCHED
59
60 static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
61 {
62         if (!rt_rq->tg)
63                 return SCHED_RT_FRAC;
64
65         return rt_rq->tg->rt_ratio;
66 }
67
68 #define for_each_leaf_rt_rq(rt_rq, rq) \
69         list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
70
71 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
72 {
73         return rt_rq->rq;
74 }
75
76 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
77 {
78         return rt_se->rt_rq;
79 }
80
81 #define for_each_sched_rt_entity(rt_se) \
82         for (; rt_se; rt_se = rt_se->parent)
83
84 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
85 {
86         return rt_se->my_q;
87 }
88
89 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
90 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
91
92 static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
93 {
94         struct sched_rt_entity *rt_se = rt_rq->rt_se;
95
96         if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
97                 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
98
99                 enqueue_rt_entity(rt_se);
100                 if (rt_rq->highest_prio < curr->prio)
101                         resched_task(curr);
102         }
103 }
104
105 static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
106 {
107         struct sched_rt_entity *rt_se = rt_rq->rt_se;
108
109         if (rt_se && on_rt_rq(rt_se))
110                 dequeue_rt_entity(rt_se);
111 }
112
113 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
114 {
115         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
116 }
117
118 static int rt_se_boosted(struct sched_rt_entity *rt_se)
119 {
120         struct rt_rq *rt_rq = group_rt_rq(rt_se);
121         struct task_struct *p;
122
123         if (rt_rq)
124                 return !!rt_rq->rt_nr_boosted;
125
126         p = rt_task_of(rt_se);
127         return p->prio != p->normal_prio;
128 }
129
130 #else
131
132 static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
133 {
134         return sysctl_sched_rt_ratio;
135 }
136
137 #define for_each_leaf_rt_rq(rt_rq, rq) \
138         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
139
140 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
141 {
142         return container_of(rt_rq, struct rq, rt);
143 }
144
145 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
146 {
147         struct task_struct *p = rt_task_of(rt_se);
148         struct rq *rq = task_rq(p);
149
150         return &rq->rt;
151 }
152
153 #define for_each_sched_rt_entity(rt_se) \
154         for (; rt_se; rt_se = NULL)
155
156 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
157 {
158         return NULL;
159 }
160
161 static inline void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
162 {
163 }
164
165 static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
166 {
167 }
168
169 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
170 {
171         return rt_rq->rt_throttled;
172 }
173 #endif
174
175 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
176 {
177 #ifdef CONFIG_FAIR_GROUP_SCHED
178         struct rt_rq *rt_rq = group_rt_rq(rt_se);
179
180         if (rt_rq)
181                 return rt_rq->highest_prio;
182 #endif
183
184         return rt_task_of(rt_se)->prio;
185 }
186
187 static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
188 {
189         unsigned int rt_ratio = sched_rt_ratio(rt_rq);
190         u64 period, ratio;
191
192         if (rt_ratio == SCHED_RT_FRAC)
193                 return 0;
194
195         if (rt_rq->rt_throttled)
196                 return rt_rq_throttled(rt_rq);
197
198         period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
199         ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
200
201         if (rt_rq->rt_time > ratio) {
202                 struct rq *rq = rq_of_rt_rq(rt_rq);
203
204                 rq->rt_throttled = 1;
205                 rt_rq->rt_throttled = 1;
206
207                 if (rt_rq_throttled(rt_rq)) {
208                         sched_rt_ratio_dequeue(rt_rq);
209                         return 1;
210                 }
211         }
212
213         return 0;
214 }
215
216 static void update_sched_rt_period(struct rq *rq)
217 {
218         struct rt_rq *rt_rq;
219         u64 period;
220
221         while (rq->clock > rq->rt_period_expire) {
222                 period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
223                 rq->rt_period_expire += period;
224
225                 for_each_leaf_rt_rq(rt_rq, rq) {
226                         unsigned long rt_ratio = sched_rt_ratio(rt_rq);
227                         u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
228
229                         rt_rq->rt_time -= min(rt_rq->rt_time, ratio);
230                         if (rt_rq->rt_throttled) {
231                                 rt_rq->rt_throttled = 0;
232                                 sched_rt_ratio_enqueue(rt_rq);
233                         }
234                 }
235
236                 rq->rt_throttled = 0;
237         }
238 }
239
240 /*
241  * Update the current task's runtime statistics. Skip current tasks that
242  * are not in our scheduling class.
243  */
244 static void update_curr_rt(struct rq *rq)
245 {
246         struct task_struct *curr = rq->curr;
247         struct sched_rt_entity *rt_se = &curr->rt;
248         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
249         u64 delta_exec;
250
251         if (!task_has_rt_policy(curr))
252                 return;
253
254         delta_exec = rq->clock - curr->se.exec_start;
255         if (unlikely((s64)delta_exec < 0))
256                 delta_exec = 0;
257
258         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
259
260         curr->se.sum_exec_runtime += delta_exec;
261         curr->se.exec_start = rq->clock;
262         cpuacct_charge(curr, delta_exec);
263
264         rt_rq->rt_time += delta_exec;
265         /*
266          * might make it a tad more accurate:
267          *
268          * update_sched_rt_period(rq);
269          */
270         if (sched_rt_ratio_exceeded(rt_rq))
271                 resched_task(curr);
272 }
273
274 static inline
275 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
276 {
277         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
278         rt_rq->rt_nr_running++;
279 #if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
280         if (rt_se_prio(rt_se) < rt_rq->highest_prio)
281                 rt_rq->highest_prio = rt_se_prio(rt_se);
282 #endif
283 #ifdef CONFIG_SMP
284         if (rt_se->nr_cpus_allowed > 1) {
285                 struct rq *rq = rq_of_rt_rq(rt_rq);
286                 rq->rt.rt_nr_migratory++;
287         }
288
289         update_rt_migration(rq_of_rt_rq(rt_rq));
290 #endif
291 #ifdef CONFIG_FAIR_GROUP_SCHED
292         if (rt_se_boosted(rt_se))
293                 rt_rq->rt_nr_boosted++;
294 #endif
295 }
296
297 static inline
298 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
299 {
300         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
301         WARN_ON(!rt_rq->rt_nr_running);
302         rt_rq->rt_nr_running--;
303 #if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
304         if (rt_rq->rt_nr_running) {
305                 struct rt_prio_array *array;
306
307                 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
308                 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
309                         /* recalculate */
310                         array = &rt_rq->active;
311                         rt_rq->highest_prio =
312                                 sched_find_first_bit(array->bitmap);
313                 } /* otherwise leave rq->highest prio alone */
314         } else
315                 rt_rq->highest_prio = MAX_RT_PRIO;
316 #endif
317 #ifdef CONFIG_SMP
318         if (rt_se->nr_cpus_allowed > 1) {
319                 struct rq *rq = rq_of_rt_rq(rt_rq);
320                 rq->rt.rt_nr_migratory--;
321         }
322
323         update_rt_migration(rq_of_rt_rq(rt_rq));
324 #endif /* CONFIG_SMP */
325 #ifdef CONFIG_FAIR_GROUP_SCHED
326         if (rt_se_boosted(rt_se))
327                 rt_rq->rt_nr_boosted--;
328
329         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
330 #endif
331 }
332
333 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
334 {
335         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
336         struct rt_prio_array *array = &rt_rq->active;
337         struct rt_rq *group_rq = group_rt_rq(rt_se);
338
339         if (group_rq && rt_rq_throttled(group_rq))
340                 return;
341
342         list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
343         __set_bit(rt_se_prio(rt_se), array->bitmap);
344
345         inc_rt_tasks(rt_se, rt_rq);
346 }
347
348 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
349 {
350         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
351         struct rt_prio_array *array = &rt_rq->active;
352
353         list_del_init(&rt_se->run_list);
354         if (list_empty(array->queue + rt_se_prio(rt_se)))
355                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
356
357         dec_rt_tasks(rt_se, rt_rq);
358 }
359
360 /*
361  * Because the prio of an upper entry depends on the lower
362  * entries, we must remove entries top - down.
363  *
364  * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
365  *      doesn't matter much for now, as h=2 for GROUP_SCHED.
366  */
367 static void dequeue_rt_stack(struct task_struct *p)
368 {
369         struct sched_rt_entity *rt_se, *top_se;
370
371         /*
372          * dequeue all, top - down.
373          */
374         do {
375                 rt_se = &p->rt;
376                 top_se = NULL;
377                 for_each_sched_rt_entity(rt_se) {
378                         if (on_rt_rq(rt_se))
379                                 top_se = rt_se;
380                 }
381                 if (top_se)
382                         dequeue_rt_entity(top_se);
383         } while (top_se);
384 }
385
386 /*
387  * Adding/removing a task to/from a priority array:
388  */
389 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
390 {
391         struct sched_rt_entity *rt_se = &p->rt;
392
393         if (wakeup)
394                 rt_se->timeout = 0;
395
396         dequeue_rt_stack(p);
397
398         /*
399          * enqueue everybody, bottom - up.
400          */
401         for_each_sched_rt_entity(rt_se)
402                 enqueue_rt_entity(rt_se);
403
404         inc_cpu_load(rq, p->se.load.weight);
405 }
406
407 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
408 {
409         struct sched_rt_entity *rt_se = &p->rt;
410         struct rt_rq *rt_rq;
411
412         update_curr_rt(rq);
413
414         dequeue_rt_stack(p);
415
416         /*
417          * re-enqueue all non-empty rt_rq entities.
418          */
419         for_each_sched_rt_entity(rt_se) {
420                 rt_rq = group_rt_rq(rt_se);
421                 if (rt_rq && rt_rq->rt_nr_running)
422                         enqueue_rt_entity(rt_se);
423         }
424
425         dec_cpu_load(rq, p->se.load.weight);
426 }
427
428 /*
429  * Put task to the end of the run list without the overhead of dequeue
430  * followed by enqueue.
431  */
432 static
433 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
434 {
435         struct rt_prio_array *array = &rt_rq->active;
436
437         list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
438 }
439
440 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
441 {
442         struct sched_rt_entity *rt_se = &p->rt;
443         struct rt_rq *rt_rq;
444
445         for_each_sched_rt_entity(rt_se) {
446                 rt_rq = rt_rq_of_se(rt_se);
447                 requeue_rt_entity(rt_rq, rt_se);
448         }
449 }
450
451 static void yield_task_rt(struct rq *rq)
452 {
453         requeue_task_rt(rq, rq->curr);
454 }
455
456 #ifdef CONFIG_SMP
457 static int find_lowest_rq(struct task_struct *task);
458
459 static int select_task_rq_rt(struct task_struct *p, int sync)
460 {
461         struct rq *rq = task_rq(p);
462
463         /*
464          * If the current task is an RT task, then
465          * try to see if we can wake this RT task up on another
466          * runqueue. Otherwise simply start this RT task
467          * on its current runqueue.
468          *
469          * We want to avoid overloading runqueues. Even if
470          * the RT task is of higher priority than the current RT task.
471          * RT tasks behave differently than other tasks. If
472          * one gets preempted, we try to push it off to another queue.
473          * So trying to keep a preempting RT task on the same
474          * cache hot CPU will force the running RT task to
475          * a cold CPU. So we waste all the cache for the lower
476          * RT task in hopes of saving some of a RT task
477          * that is just being woken and probably will have
478          * cold cache anyway.
479          */
480         if (unlikely(rt_task(rq->curr)) &&
481             (p->rt.nr_cpus_allowed > 1)) {
482                 int cpu = find_lowest_rq(p);
483
484                 return (cpu == -1) ? task_cpu(p) : cpu;
485         }
486
487         /*
488          * Otherwise, just let it ride on the affined RQ and the
489          * post-schedule router will push the preempted task away
490          */
491         return task_cpu(p);
492 }
493 #endif /* CONFIG_SMP */
494
495 /*
496  * Preempt the current task with a newly woken task if needed:
497  */
498 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
499 {
500         if (p->prio < rq->curr->prio)
501                 resched_task(rq->curr);
502 }
503
504 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
505                                                    struct rt_rq *rt_rq)
506 {
507         struct rt_prio_array *array = &rt_rq->active;
508         struct sched_rt_entity *next = NULL;
509         struct list_head *queue;
510         int idx;
511
512         idx = sched_find_first_bit(array->bitmap);
513         BUG_ON(idx >= MAX_RT_PRIO);
514
515         queue = array->queue + idx;
516         next = list_entry(queue->next, struct sched_rt_entity, run_list);
517
518         return next;
519 }
520
521 static struct task_struct *pick_next_task_rt(struct rq *rq)
522 {
523         struct sched_rt_entity *rt_se;
524         struct task_struct *p;
525         struct rt_rq *rt_rq;
526
527         rt_rq = &rq->rt;
528
529         if (unlikely(!rt_rq->rt_nr_running))
530                 return NULL;
531
532         if (rt_rq_throttled(rt_rq))
533                 return NULL;
534
535         do {
536                 rt_se = pick_next_rt_entity(rq, rt_rq);
537                 BUG_ON(!rt_se);
538                 rt_rq = group_rt_rq(rt_se);
539         } while (rt_rq);
540
541         p = rt_task_of(rt_se);
542         p->se.exec_start = rq->clock;
543         return p;
544 }
545
546 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
547 {
548         update_curr_rt(rq);
549         p->se.exec_start = 0;
550 }
551
552 #ifdef CONFIG_SMP
553
554 /* Only try algorithms three times */
555 #define RT_MAX_TRIES 3
556
557 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
558 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
559
560 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
561 {
562         if (!task_running(rq, p) &&
563             (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
564             (p->rt.nr_cpus_allowed > 1))
565                 return 1;
566         return 0;
567 }
568
569 /* Return the second highest RT task, NULL otherwise */
570 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
571 {
572         struct task_struct *next = NULL;
573         struct sched_rt_entity *rt_se;
574         struct rt_prio_array *array;
575         struct rt_rq *rt_rq;
576         int idx;
577
578         for_each_leaf_rt_rq(rt_rq, rq) {
579                 array = &rt_rq->active;
580                 idx = sched_find_first_bit(array->bitmap);
581  next_idx:
582                 if (idx >= MAX_RT_PRIO)
583                         continue;
584                 if (next && next->prio < idx)
585                         continue;
586                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
587                         struct task_struct *p = rt_task_of(rt_se);
588                         if (pick_rt_task(rq, p, cpu)) {
589                                 next = p;
590                                 break;
591                         }
592                 }
593                 if (!next) {
594                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
595                         goto next_idx;
596                 }
597         }
598
599         return next;
600 }
601
602 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
603
604 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
605 {
606         int       lowest_prio = -1;
607         int       lowest_cpu  = -1;
608         int       count       = 0;
609         int       cpu;
610
611         cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
612
613         /*
614          * Scan each rq for the lowest prio.
615          */
616         for_each_cpu_mask(cpu, *lowest_mask) {
617                 struct rq *rq = cpu_rq(cpu);
618
619                 /* We look for lowest RT prio or non-rt CPU */
620                 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
621                         /*
622                          * if we already found a low RT queue
623                          * and now we found this non-rt queue
624                          * clear the mask and set our bit.
625                          * Otherwise just return the queue as is
626                          * and the count==1 will cause the algorithm
627                          * to use the first bit found.
628                          */
629                         if (lowest_cpu != -1) {
630                                 cpus_clear(*lowest_mask);
631                                 cpu_set(rq->cpu, *lowest_mask);
632                         }
633                         return 1;
634                 }
635
636                 /* no locking for now */
637                 if ((rq->rt.highest_prio > task->prio)
638                     && (rq->rt.highest_prio >= lowest_prio)) {
639                         if (rq->rt.highest_prio > lowest_prio) {
640                                 /* new low - clear old data */
641                                 lowest_prio = rq->rt.highest_prio;
642                                 lowest_cpu = cpu;
643                                 count = 0;
644                         }
645                         count++;
646                 } else
647                         cpu_clear(cpu, *lowest_mask);
648         }
649
650         /*
651          * Clear out all the set bits that represent
652          * runqueues that were of higher prio than
653          * the lowest_prio.
654          */
655         if (lowest_cpu > 0) {
656                 /*
657                  * Perhaps we could add another cpumask op to
658                  * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
659                  * Then that could be optimized to use memset and such.
660                  */
661                 for_each_cpu_mask(cpu, *lowest_mask) {
662                         if (cpu >= lowest_cpu)
663                                 break;
664                         cpu_clear(cpu, *lowest_mask);
665                 }
666         }
667
668         return count;
669 }
670
671 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
672 {
673         int first;
674
675         /* "this_cpu" is cheaper to preempt than a remote processor */
676         if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
677                 return this_cpu;
678
679         first = first_cpu(*mask);
680         if (first != NR_CPUS)
681                 return first;
682
683         return -1;
684 }
685
686 static int find_lowest_rq(struct task_struct *task)
687 {
688         struct sched_domain *sd;
689         cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
690         int this_cpu = smp_processor_id();
691         int cpu      = task_cpu(task);
692         int count    = find_lowest_cpus(task, lowest_mask);
693
694         if (!count)
695                 return -1; /* No targets found */
696
697         /*
698          * There is no sense in performing an optimal search if only one
699          * target is found.
700          */
701         if (count == 1)
702                 return first_cpu(*lowest_mask);
703
704         /*
705          * At this point we have built a mask of cpus representing the
706          * lowest priority tasks in the system.  Now we want to elect
707          * the best one based on our affinity and topology.
708          *
709          * We prioritize the last cpu that the task executed on since
710          * it is most likely cache-hot in that location.
711          */
712         if (cpu_isset(cpu, *lowest_mask))
713                 return cpu;
714
715         /*
716          * Otherwise, we consult the sched_domains span maps to figure
717          * out which cpu is logically closest to our hot cache data.
718          */
719         if (this_cpu == cpu)
720                 this_cpu = -1; /* Skip this_cpu opt if the same */
721
722         for_each_domain(cpu, sd) {
723                 if (sd->flags & SD_WAKE_AFFINE) {
724                         cpumask_t domain_mask;
725                         int       best_cpu;
726
727                         cpus_and(domain_mask, sd->span, *lowest_mask);
728
729                         best_cpu = pick_optimal_cpu(this_cpu,
730                                                     &domain_mask);
731                         if (best_cpu != -1)
732                                 return best_cpu;
733                 }
734         }
735
736         /*
737          * And finally, if there were no matches within the domains
738          * just give the caller *something* to work with from the compatible
739          * locations.
740          */
741         return pick_optimal_cpu(this_cpu, lowest_mask);
742 }
743
744 /* Will lock the rq it finds */
745 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
746 {
747         struct rq *lowest_rq = NULL;
748         int tries;
749         int cpu;
750
751         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
752                 cpu = find_lowest_rq(task);
753
754                 if ((cpu == -1) || (cpu == rq->cpu))
755                         break;
756
757                 lowest_rq = cpu_rq(cpu);
758
759                 /* if the prio of this runqueue changed, try again */
760                 if (double_lock_balance(rq, lowest_rq)) {
761                         /*
762                          * We had to unlock the run queue. In
763                          * the mean time, task could have
764                          * migrated already or had its affinity changed.
765                          * Also make sure that it wasn't scheduled on its rq.
766                          */
767                         if (unlikely(task_rq(task) != rq ||
768                                      !cpu_isset(lowest_rq->cpu,
769                                                 task->cpus_allowed) ||
770                                      task_running(rq, task) ||
771                                      !task->se.on_rq)) {
772
773                                 spin_unlock(&lowest_rq->lock);
774                                 lowest_rq = NULL;
775                                 break;
776                         }
777                 }
778
779                 /* If this rq is still suitable use it. */
780                 if (lowest_rq->rt.highest_prio > task->prio)
781                         break;
782
783                 /* try again */
784                 spin_unlock(&lowest_rq->lock);
785                 lowest_rq = NULL;
786         }
787
788         return lowest_rq;
789 }
790
791 /*
792  * If the current CPU has more than one RT task, see if the non
793  * running task can migrate over to a CPU that is running a task
794  * of lesser priority.
795  */
796 static int push_rt_task(struct rq *rq)
797 {
798         struct task_struct *next_task;
799         struct rq *lowest_rq;
800         int ret = 0;
801         int paranoid = RT_MAX_TRIES;
802
803         if (!rq->rt.overloaded)
804                 return 0;
805
806         next_task = pick_next_highest_task_rt(rq, -1);
807         if (!next_task)
808                 return 0;
809
810  retry:
811         if (unlikely(next_task == rq->curr)) {
812                 WARN_ON(1);
813                 return 0;
814         }
815
816         /*
817          * It's possible that the next_task slipped in of
818          * higher priority than current. If that's the case
819          * just reschedule current.
820          */
821         if (unlikely(next_task->prio < rq->curr->prio)) {
822                 resched_task(rq->curr);
823                 return 0;
824         }
825
826         /* We might release rq lock */
827         get_task_struct(next_task);
828
829         /* find_lock_lowest_rq locks the rq if found */
830         lowest_rq = find_lock_lowest_rq(next_task, rq);
831         if (!lowest_rq) {
832                 struct task_struct *task;
833                 /*
834                  * find lock_lowest_rq releases rq->lock
835                  * so it is possible that next_task has changed.
836                  * If it has, then try again.
837                  */
838                 task = pick_next_highest_task_rt(rq, -1);
839                 if (unlikely(task != next_task) && task && paranoid--) {
840                         put_task_struct(next_task);
841                         next_task = task;
842                         goto retry;
843                 }
844                 goto out;
845         }
846
847         deactivate_task(rq, next_task, 0);
848         set_task_cpu(next_task, lowest_rq->cpu);
849         activate_task(lowest_rq, next_task, 0);
850
851         resched_task(lowest_rq->curr);
852
853         spin_unlock(&lowest_rq->lock);
854
855         ret = 1;
856 out:
857         put_task_struct(next_task);
858
859         return ret;
860 }
861
862 /*
863  * TODO: Currently we just use the second highest prio task on
864  *       the queue, and stop when it can't migrate (or there's
865  *       no more RT tasks).  There may be a case where a lower
866  *       priority RT task has a different affinity than the
867  *       higher RT task. In this case the lower RT task could
868  *       possibly be able to migrate where as the higher priority
869  *       RT task could not.  We currently ignore this issue.
870  *       Enhancements are welcome!
871  */
872 static void push_rt_tasks(struct rq *rq)
873 {
874         /* push_rt_task will return true if it moved an RT */
875         while (push_rt_task(rq))
876                 ;
877 }
878
879 static int pull_rt_task(struct rq *this_rq)
880 {
881         int this_cpu = this_rq->cpu, ret = 0, cpu;
882         struct task_struct *p, *next;
883         struct rq *src_rq;
884
885         if (likely(!rt_overloaded(this_rq)))
886                 return 0;
887
888         next = pick_next_task_rt(this_rq);
889
890         for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
891                 if (this_cpu == cpu)
892                         continue;
893
894                 src_rq = cpu_rq(cpu);
895                 /*
896                  * We can potentially drop this_rq's lock in
897                  * double_lock_balance, and another CPU could
898                  * steal our next task - hence we must cause
899                  * the caller to recalculate the next task
900                  * in that case:
901                  */
902                 if (double_lock_balance(this_rq, src_rq)) {
903                         struct task_struct *old_next = next;
904
905                         next = pick_next_task_rt(this_rq);
906                         if (next != old_next)
907                                 ret = 1;
908                 }
909
910                 /*
911                  * Are there still pullable RT tasks?
912                  */
913                 if (src_rq->rt.rt_nr_running <= 1)
914                         goto skip;
915
916                 p = pick_next_highest_task_rt(src_rq, this_cpu);
917
918                 /*
919                  * Do we have an RT task that preempts
920                  * the to-be-scheduled task?
921                  */
922                 if (p && (!next || (p->prio < next->prio))) {
923                         WARN_ON(p == src_rq->curr);
924                         WARN_ON(!p->se.on_rq);
925
926                         /*
927                          * There's a chance that p is higher in priority
928                          * than what's currently running on its cpu.
929                          * This is just that p is wakeing up and hasn't
930                          * had a chance to schedule. We only pull
931                          * p if it is lower in priority than the
932                          * current task on the run queue or
933                          * this_rq next task is lower in prio than
934                          * the current task on that rq.
935                          */
936                         if (p->prio < src_rq->curr->prio ||
937                             (next && next->prio < src_rq->curr->prio))
938                                 goto skip;
939
940                         ret = 1;
941
942                         deactivate_task(src_rq, p, 0);
943                         set_task_cpu(p, this_cpu);
944                         activate_task(this_rq, p, 0);
945                         /*
946                          * We continue with the search, just in
947                          * case there's an even higher prio task
948                          * in another runqueue. (low likelyhood
949                          * but possible)
950                          *
951                          * Update next so that we won't pick a task
952                          * on another cpu with a priority lower (or equal)
953                          * than the one we just picked.
954                          */
955                         next = p;
956
957                 }
958  skip:
959                 spin_unlock(&src_rq->lock);
960         }
961
962         return ret;
963 }
964
965 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
966 {
967         /* Try to pull RT tasks here if we lower this rq's prio */
968         if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
969                 pull_rt_task(rq);
970 }
971
972 static void post_schedule_rt(struct rq *rq)
973 {
974         /*
975          * If we have more than one rt_task queued, then
976          * see if we can push the other rt_tasks off to other CPUS.
977          * Note we may release the rq lock, and since
978          * the lock was owned by prev, we need to release it
979          * first via finish_lock_switch and then reaquire it here.
980          */
981         if (unlikely(rq->rt.overloaded)) {
982                 spin_lock_irq(&rq->lock);
983                 push_rt_tasks(rq);
984                 spin_unlock_irq(&rq->lock);
985         }
986 }
987
988
989 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
990 {
991         if (!task_running(rq, p) &&
992             (p->prio >= rq->rt.highest_prio) &&
993             rq->rt.overloaded)
994                 push_rt_tasks(rq);
995 }
996
997 static unsigned long
998 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
999                 unsigned long max_load_move,
1000                 struct sched_domain *sd, enum cpu_idle_type idle,
1001                 int *all_pinned, int *this_best_prio)
1002 {
1003         /* don't touch RT tasks */
1004         return 0;
1005 }
1006
1007 static int
1008 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1009                  struct sched_domain *sd, enum cpu_idle_type idle)
1010 {
1011         /* don't touch RT tasks */
1012         return 0;
1013 }
1014
1015 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
1016 {
1017         int weight = cpus_weight(*new_mask);
1018
1019         BUG_ON(!rt_task(p));
1020
1021         /*
1022          * Update the migration status of the RQ if we have an RT task
1023          * which is running AND changing its weight value.
1024          */
1025         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1026                 struct rq *rq = task_rq(p);
1027
1028                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1029                         rq->rt.rt_nr_migratory++;
1030                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1031                         BUG_ON(!rq->rt.rt_nr_migratory);
1032                         rq->rt.rt_nr_migratory--;
1033                 }
1034
1035                 update_rt_migration(rq);
1036         }
1037
1038         p->cpus_allowed    = *new_mask;
1039         p->rt.nr_cpus_allowed = weight;
1040 }
1041
1042 /* Assumes rq->lock is held */
1043 static void join_domain_rt(struct rq *rq)
1044 {
1045         if (rq->rt.overloaded)
1046                 rt_set_overload(rq);
1047 }
1048
1049 /* Assumes rq->lock is held */
1050 static void leave_domain_rt(struct rq *rq)
1051 {
1052         if (rq->rt.overloaded)
1053                 rt_clear_overload(rq);
1054 }
1055
1056 /*
1057  * When switch from the rt queue, we bring ourselves to a position
1058  * that we might want to pull RT tasks from other runqueues.
1059  */
1060 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1061                            int running)
1062 {
1063         /*
1064          * If there are other RT tasks then we will reschedule
1065          * and the scheduling of the other RT tasks will handle
1066          * the balancing. But if we are the last RT task
1067          * we may need to handle the pulling of RT tasks
1068          * now.
1069          */
1070         if (!rq->rt.rt_nr_running)
1071                 pull_rt_task(rq);
1072 }
1073 #endif /* CONFIG_SMP */
1074
1075 /*
1076  * When switching a task to RT, we may overload the runqueue
1077  * with RT tasks. In this case we try to push them off to
1078  * other runqueues.
1079  */
1080 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1081                            int running)
1082 {
1083         int check_resched = 1;
1084
1085         /*
1086          * If we are already running, then there's nothing
1087          * that needs to be done. But if we are not running
1088          * we may need to preempt the current running task.
1089          * If that current running task is also an RT task
1090          * then see if we can move to another run queue.
1091          */
1092         if (!running) {
1093 #ifdef CONFIG_SMP
1094                 if (rq->rt.overloaded && push_rt_task(rq) &&
1095                     /* Don't resched if we changed runqueues */
1096                     rq != task_rq(p))
1097                         check_resched = 0;
1098 #endif /* CONFIG_SMP */
1099                 if (check_resched && p->prio < rq->curr->prio)
1100                         resched_task(rq->curr);
1101         }
1102 }
1103
1104 /*
1105  * Priority of the task has changed. This may cause
1106  * us to initiate a push or pull.
1107  */
1108 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1109                             int oldprio, int running)
1110 {
1111         if (running) {
1112 #ifdef CONFIG_SMP
1113                 /*
1114                  * If our priority decreases while running, we
1115                  * may need to pull tasks to this runqueue.
1116                  */
1117                 if (oldprio < p->prio)
1118                         pull_rt_task(rq);
1119                 /*
1120                  * If there's a higher priority task waiting to run
1121                  * then reschedule.
1122                  */
1123                 if (p->prio > rq->rt.highest_prio)
1124                         resched_task(p);
1125 #else
1126                 /* For UP simply resched on drop of prio */
1127                 if (oldprio < p->prio)
1128                         resched_task(p);
1129 #endif /* CONFIG_SMP */
1130         } else {
1131                 /*
1132                  * This task is not running, but if it is
1133                  * greater than the current running task
1134                  * then reschedule.
1135                  */
1136                 if (p->prio < rq->curr->prio)
1137                         resched_task(rq->curr);
1138         }
1139 }
1140
1141 static void watchdog(struct rq *rq, struct task_struct *p)
1142 {
1143         unsigned long soft, hard;
1144
1145         if (!p->signal)
1146                 return;
1147
1148         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1149         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1150
1151         if (soft != RLIM_INFINITY) {
1152                 unsigned long next;
1153
1154                 p->rt.timeout++;
1155                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1156                 if (p->rt.timeout > next)
1157                         p->it_sched_expires = p->se.sum_exec_runtime;
1158         }
1159 }
1160
1161 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1162 {
1163         update_curr_rt(rq);
1164
1165         watchdog(rq, p);
1166
1167         /*
1168          * RR tasks need a special form of timeslice management.
1169          * FIFO tasks have no timeslices.
1170          */
1171         if (p->policy != SCHED_RR)
1172                 return;
1173
1174         if (--p->rt.time_slice)
1175                 return;
1176
1177         p->rt.time_slice = DEF_TIMESLICE;
1178
1179         /*
1180          * Requeue to the end of queue if we are not the only element
1181          * on the queue:
1182          */
1183         if (p->rt.run_list.prev != p->rt.run_list.next) {
1184                 requeue_task_rt(rq, p);
1185                 set_tsk_need_resched(p);
1186         }
1187 }
1188
1189 static void set_curr_task_rt(struct rq *rq)
1190 {
1191         struct task_struct *p = rq->curr;
1192
1193         p->se.exec_start = rq->clock;
1194 }
1195
1196 const struct sched_class rt_sched_class = {
1197         .next                   = &fair_sched_class,
1198         .enqueue_task           = enqueue_task_rt,
1199         .dequeue_task           = dequeue_task_rt,
1200         .yield_task             = yield_task_rt,
1201 #ifdef CONFIG_SMP
1202         .select_task_rq         = select_task_rq_rt,
1203 #endif /* CONFIG_SMP */
1204
1205         .check_preempt_curr     = check_preempt_curr_rt,
1206
1207         .pick_next_task         = pick_next_task_rt,
1208         .put_prev_task          = put_prev_task_rt,
1209
1210 #ifdef CONFIG_SMP
1211         .load_balance           = load_balance_rt,
1212         .move_one_task          = move_one_task_rt,
1213         .set_cpus_allowed       = set_cpus_allowed_rt,
1214         .join_domain            = join_domain_rt,
1215         .leave_domain           = leave_domain_rt,
1216         .pre_schedule           = pre_schedule_rt,
1217         .post_schedule          = post_schedule_rt,
1218         .task_wake_up           = task_wake_up_rt,
1219         .switched_from          = switched_from_rt,
1220 #endif
1221
1222         .set_curr_task          = set_curr_task_rt,
1223         .task_tick              = task_tick_rt,
1224
1225         .prio_changed           = prio_changed_rt,
1226         .switched_to            = switched_to_rt,
1227 };