projects
/
cascardo
/
linux.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
sched: Cleanup bandwidth timers
[cascardo/linux.git]
/
kernel
/
sched
/
core.c
diff --git
a/kernel/sched/core.c
b/kernel/sched/core.c
index
261af7b
..
d8a6196
100644
(file)
--- a/
kernel/sched/core.c
+++ b/
kernel/sched/core.c
@@
-92,22
+92,14
@@
void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
{
void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
{
- unsigned long delta;
- ktime_t soft, hard, now;
-
- for (;;) {
- if (hrtimer_active(period_timer))
- break;
-
- now = hrtimer_cb_get_time(period_timer);
- hrtimer_forward(period_timer, now, period);
+ /*
+ * Do not forward the expiration time of active timers;
+ * we do not want to loose an overrun.
+ */
+ if (!hrtimer_active(period_timer))
+ hrtimer_forward_now(period_timer, period);
- soft = hrtimer_get_softexpires(period_timer);
- hard = hrtimer_get_expires(period_timer);
- delta = ktime_to_ns(ktime_sub(hard, soft));
- __hrtimer_start_range_ns(period_timer, soft, delta,
- HRTIMER_MODE_ABS_PINNED, 0);
- }
+ hrtimer_start_expires(period_timer, HRTIMER_MODE_ABS_PINNED);
}
DEFINE_MUTEX(sched_domains_mutex);
}
DEFINE_MUTEX(sched_domains_mutex);
@@
-306,6
+298,9
@@
__read_mostly int scheduler_running;
*/
int sysctl_sched_rt_runtime = 950000;
*/
int sysctl_sched_rt_runtime = 950000;
+/* cpus with isolated domains */
+cpumask_var_t cpu_isolated_map;
+
/*
* this_rq_lock - lock this runqueue and disable interrupts.
*/
/*
* this_rq_lock - lock this runqueue and disable interrupts.
*/
@@
-352,12
+347,11
@@
static enum hrtimer_restart hrtick(struct hrtimer *timer)
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
-static
int
__hrtick_restart(struct rq *rq)
+static
void
__hrtick_restart(struct rq *rq)
{
struct hrtimer *timer = &rq->hrtick_timer;
{
struct hrtimer *timer = &rq->hrtick_timer;
- ktime_t time = hrtimer_get_softexpires(timer);
-
return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0
);
+
hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED
);
}
/*
}
/*
@@
-437,8
+431,8
@@
void hrtick_start(struct rq *rq, u64 delay)
* doesn't make sense. Rely on vruntime for fairness.
*/
delay = max_t(u64, delay, 10000LL);
* doesn't make sense. Rely on vruntime for fairness.
*/
delay = max_t(u64, delay, 10000LL);
-
__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0
,
-
HRTIMER_MODE_REL_PINNED, 0
);
+
hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay)
,
+
HRTIMER_MODE_REL_PINNED
);
}
static inline void init_hrtick(void)
}
static inline void init_hrtick(void)
@@
-2850,7
+2844,7
@@
asmlinkage __visible void __sched schedule_user(void)
* we find a better solution.
*
* NB: There are buggy callers of this function. Ideally we
* we find a better solution.
*
* NB: There are buggy callers of this function. Ideally we
- * should warn if prev_state !=
IN
_USER, but that will trigger
+ * should warn if prev_state !=
CONTEXT
_USER, but that will trigger
* too frequently to make sense yet.
*/
enum ctx_state prev_state = exception_enter();
* too frequently to make sense yet.
*/
enum ctx_state prev_state = exception_enter();
@@
-5811,9
+5805,6
@@
cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
update_top_cache_domain(cpu);
}
update_top_cache_domain(cpu);
}
-/* cpus with isolated domains */
-static cpumask_var_t cpu_isolated_map;
-
/* Setup the mask of cpus configured for isolated domains */
static int __init isolated_cpu_setup(char *str)
{
/* Setup the mask of cpus configured for isolated domains */
static int __init isolated_cpu_setup(char *str)
{
@@
-8125,10
+8116,8
@@
static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
__refill_cfs_bandwidth_runtime(cfs_b);
/* restart the period timer (if active) to handle new period expiry */
__refill_cfs_bandwidth_runtime(cfs_b);
/* restart the period timer (if active) to handle new period expiry */
- if (runtime_enabled && cfs_b->timer_active) {
- /* force a reprogram */
- __start_cfs_bandwidth(cfs_b, true);
- }
+ if (runtime_enabled)
+ start_cfs_bandwidth(cfs_b);
raw_spin_unlock_irq(&cfs_b->lock);
for_each_online_cpu(i) {
raw_spin_unlock_irq(&cfs_b->lock);
for_each_online_cpu(i) {