int ret = 0;
/* Some related CPUs might not be present (physically hotplugged) */
- for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+ for_each_cpu(j, policy->real_cpus) {
if (j == policy->kobj_cpu)
continue;
unsigned int j;
/* Some related CPUs might not be present (physically hotplugged) */
- for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+ for_each_cpu(j, policy->real_cpus) {
if (j == policy->kobj_cpu)
continue;
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
goto err_free_cpumask;
+ if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
+ goto err_free_rcpumask;
+
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
"cpufreq");
if (ret) {
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
- goto err_free_rcpumask;
+ goto err_free_real_cpus;
}
INIT_LIST_HEAD(&policy->policy_list);
return policy;
+err_free_real_cpus:
+ free_cpumask_var(policy->real_cpus);
err_free_rcpumask:
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpufreq_policy_put_kobj(policy, notify);
+ free_cpumask_var(policy->real_cpus);
free_cpumask_var(policy->related_cpus);
free_cpumask_var(policy->cpus);
kfree(policy);
pr_debug("adding CPU %u\n", cpu);
- /*
- * Only possible if 'cpu' wasn't physically present earlier and we are
- * here from subsys_interface add callback. A hotplug notifier will
- * follow and we will handle it like logical CPU hotplug then. For now,
- * just create the sysfs link.
- */
- if (cpu_is_offline(cpu))
- return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
+ if (cpu_is_offline(cpu)) {
+ /*
+ * Only possible if we are here from the subsys_interface add
+ * callback. A hotplug notifier will follow and we will handle
+ * it as CPU online then. For now, just create the sysfs link,
+ * unless there is no policy or the link is already present.
+ */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
+ ? add_cpu_dev_symlink(policy, cpu) : 0;
+ }
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
/* related cpus should atleast have policy->cpus */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+ /* Remember which CPUs have been present at the policy creation time. */
+ if (!recover_policy)
+ cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
+
/*
* affected cpus must always be the one, which are online. We aren't
* managing offline cpus here.
return ret;
}
-static int __cpufreq_remove_dev_prepare(struct device *dev,
- struct subsys_interface *sif)
+static int __cpufreq_remove_dev_prepare(struct device *dev)
{
unsigned int cpu = dev->id;
int ret = 0;
if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- if (ret) {
+ if (ret)
pr_err("%s: Failed to stop governor\n", __func__);
- return ret;
- }
}
down_write(&policy->rwsem);
return ret;
}
-static int __cpufreq_remove_dev_finish(struct device *dev,
- struct subsys_interface *sif)
+static int __cpufreq_remove_dev_finish(struct device *dev)
{
unsigned int cpu = dev->id;
int ret;
/* If cpu is last user of policy, free policy */
if (has_target()) {
ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
- if (ret) {
+ if (ret)
pr_err("%s: Failed to exit governor\n", __func__);
- return ret;
- }
}
/*
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
- /* Free the policy only if the driver is getting removed. */
- if (sif)
- cpufreq_policy_free(policy, true);
-
return 0;
}
*
* Removes the cpufreq interface for a CPU device.
*/
- static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
+ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
- int ret;
-
- /*
- * Only possible if 'cpu' is getting physically removed now. A hotplug
- * notifier should have already been called and we just need to remove
- * link or free policy here.
- */
- if (cpu_is_offline(cpu)) {
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
- struct cpumask mask;
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
- if (!policy)
- return;
+ if (!policy)
- return 0;
++ return;
- cpumask_copy(&mask, policy->related_cpus);
- cpumask_clear_cpu(cpu, &mask);
+ if (cpu_online(cpu)) {
+ __cpufreq_remove_dev_prepare(dev);
+ __cpufreq_remove_dev_finish(dev);
+ }
- /*
- * Free policy only if all policy->related_cpus are removed
- * physically.
- */
- if (cpumask_intersects(&mask, cpu_present_mask)) {
- remove_cpu_dev_symlink(policy, cpu);
- return;
- }
+ cpumask_clear_cpu(cpu, policy->real_cpus);
+ if (cpumask_empty(policy->real_cpus)) {
cpufreq_policy_free(policy, true);
- return 0;
+ return;
}
- ret = __cpufreq_remove_dev_prepare(dev, sif);
+ if (cpu != policy->kobj_cpu) {
+ remove_cpu_dev_symlink(policy, cpu);
+ } else {
+ /*
+ * The CPU owning the policy object is going away. Move it to
+ * another suitable CPU.
+ */
+ unsigned int new_cpu = cpumask_first(policy->real_cpus);
+ struct device *new_dev = get_cpu_device(new_cpu);
- if (!ret)
- __cpufreq_remove_dev_finish(dev, sif);
+ dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
+
+ sysfs_remove_link(&new_dev->kobj, "cpufreq");
+ policy->kobj_cpu = new_cpu;
+ WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
+ }
-
- return 0;
}
static void handle_update(struct work_struct *work)
break;
case CPU_DOWN_PREPARE:
- __cpufreq_remove_dev_prepare(dev, NULL);
+ __cpufreq_remove_dev_prepare(dev);
break;
case CPU_POST_DEAD:
- __cpufreq_remove_dev_finish(dev, NULL);
+ __cpufreq_remove_dev_finish(dev);
break;
case CPU_DOWN_FAILED:
void cpu_hotplug_disable(void)
{
cpu_maps_update_begin();
- cpu_hotplug_disabled = 1;
+ cpu_hotplug_disabled++;
cpu_maps_update_done();
}
+EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
void cpu_hotplug_enable(void)
{
cpu_maps_update_begin();
- cpu_hotplug_disabled = 0;
+ WARN_ON(--cpu_hotplug_disabled < 0);
cpu_maps_update_done();
}
-
+EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
#endif /* CONFIG_HOTPLUG_CPU */
/* Need to know about CPUs going up/down? */
- int __ref register_cpu_notifier(struct notifier_block *nb)
+ int register_cpu_notifier(struct notifier_block *nb)
{
int ret;
cpu_maps_update_begin();
return ret;
}
- int __ref __register_cpu_notifier(struct notifier_block *nb)
+ int __register_cpu_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&cpu_chain, nb);
}
EXPORT_SYMBOL(register_cpu_notifier);
EXPORT_SYMBOL(__register_cpu_notifier);
- void __ref unregister_cpu_notifier(struct notifier_block *nb)
+ void unregister_cpu_notifier(struct notifier_block *nb)
{
cpu_maps_update_begin();
raw_notifier_chain_unregister(&cpu_chain, nb);
}
EXPORT_SYMBOL(unregister_cpu_notifier);
- void __ref __unregister_cpu_notifier(struct notifier_block *nb)
+ void __unregister_cpu_notifier(struct notifier_block *nb)
{
raw_notifier_chain_unregister(&cpu_chain, nb);
}
};
/* Take this CPU down. */
- static int __ref take_cpu_down(void *_param)
+ static int take_cpu_down(void *_param)
{
struct take_cpu_down_param *param = _param;
int err;
}
/* Requires cpu_add_remove_lock to be held */
- static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ static int _cpu_down(unsigned int cpu, int tasks_frozen)
{
int err, nr_calls = 0;
void *hcpu = (void *)(long)cpu;
return err;
}
- int __ref cpu_down(unsigned int cpu)
+ int cpu_down(unsigned int cpu)
{
int err;
}
}
- if (!error) {
+ if (!error)
BUG_ON(num_online_cpus() > 1);
- /* Make sure the CPUs won't be enabled by someone else */
- cpu_hotplug_disabled = 1;
- } else {
+ else
pr_err("Non-boot CPUs are not disabled\n");
- }
+
+ /*
+ * Make sure the CPUs won't be enabled by someone else. We need to do
+ * this even in case of failure as all disable_nonboot_cpus() users are
+ * supposed to do enable_nonboot_cpus() on the failure path.
+ */
+ cpu_hotplug_disabled++;
+
cpu_maps_update_done();
return error;
}
{
}
- void __ref enable_nonboot_cpus(void)
+ void enable_nonboot_cpus(void)
{
int cpu, error;
/* Allow everyone to use the CPU hotplug again */
cpu_maps_update_begin();
- cpu_hotplug_disabled = 0;
+ WARN_ON(--cpu_hotplug_disabled < 0);
if (cpumask_empty(frozen_cpus))
goto out;