Merge branch 'smp/for-block' into smp/hotplug
authorThomas Gleixner <tglx@linutronix.de>
Wed, 21 Sep 2016 07:39:00 +0000 (09:39 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 21 Sep 2016 07:39:00 +0000 (09:39 +0200)
Bring in the block hotplug states for consistency.

48 files changed:
arch/arm/mach-omap2/omap-wakeupgen.c
arch/arm/mach-shmobile/platsmp-scu.c
arch/arm64/kernel/fpsimd.c
arch/ia64/kernel/mca.c
arch/mips/cavium-octeon/smp.c
arch/mips/loongson64/loongson-3/smp.c
arch/powerpc/mm/mmu_context_nohash.c
arch/powerpc/platforms/powermac/smp.c
arch/s390/mm/fault.c
arch/sh/kernel/cpu/sh4a/smp-shx3.c
arch/sparc/kernel/smp_32.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/kvm.c
block/blk-softirq.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_throttling.c
drivers/bus/arm-cci.c
drivers/bus/arm-ccn.c
drivers/bus/mips_cdmm.c
drivers/cpufreq/cpufreq.c
drivers/cpuidle/coupled.c
drivers/cpuidle/cpuidle-powernv.c
drivers/cpuidle/cpuidle-pseries.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/net/ethernet/marvell/mvneta.c
drivers/net/virtio_net.c
drivers/oprofile/timer_int.c
drivers/perf/arm_pmu.c
drivers/scsi/virtio_scsi.c
include/acpi/processor.h
include/linux/cpu.h
include/linux/cpuhotplug.h
include/linux/padata.h
include/linux/perf/arm_pmu.h
include/linux/relay.h
include/linux/slab.h
kernel/cpu.c
kernel/padata.c
kernel/relay.c
kernel/softirq.c
lib/cpu-notifier-error-inject.c
lib/irq_poll.c
mm/page-writeback.c
mm/slab.c
mm/slub.c
tools/testing/radix-tree/linux/cpu.h

index 0c47543..369f95a 100644 (file)
@@ -322,34 +322,25 @@ static void irq_save_secure_context(void)
 #endif
 
 #ifdef CONFIG_HOTPLUG_CPU
-static int irq_cpu_hotplug_notify(struct notifier_block *self,
-                                 unsigned long action, void *hcpu)
+static int omap_wakeupgen_cpu_online(unsigned int cpu)
 {
-       unsigned int cpu = (unsigned int)hcpu;
-
-       /*
-        * Corresponding FROZEN transitions do not have to be handled,
-        * they are handled by at a higher level
-        * (drivers/cpuidle/coupled.c).
-        */
-       switch (action) {
-       case CPU_ONLINE:
-               wakeupgen_irqmask_all(cpu, 0);
-               break;
-       case CPU_DEAD:
-               wakeupgen_irqmask_all(cpu, 1);
-               break;
-       }
-       return NOTIFY_OK;
+       wakeupgen_irqmask_all(cpu, 0);
+       return 0;
 }
 
-static struct notifier_block irq_hotplug_notifier = {
-       .notifier_call = irq_cpu_hotplug_notify,
-};
+static int omap_wakeupgen_cpu_dead(unsigned int cpu)
+{
+       wakeupgen_irqmask_all(cpu, 1);
+       return 0;
+}
 
 static void __init irq_hotplug_init(void)
 {
-       register_hotcpu_notifier(&irq_hotplug_notifier);
+       cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/omap-wake:online",
+                                 omap_wakeupgen_cpu_online, NULL);
+       cpuhp_setup_state_nocalls(CPUHP_ARM_OMAP_WAKE_DEAD,
+                                 "arm/omap-wake:dead", NULL,
+                                 omap_wakeupgen_cpu_dead);
 }
 #else
 static void __init irq_hotplug_init(void)
index 8d478f1..d1ecaf3 100644 (file)
 static phys_addr_t shmobile_scu_base_phys;
 static void __iomem *shmobile_scu_base;
 
-static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb,
-                                         unsigned long action, void *hcpu)
+static int shmobile_scu_cpu_prepare(unsigned int cpu)
 {
-       unsigned int cpu = (long)hcpu;
-
-       switch (action) {
-       case CPU_UP_PREPARE:
-               /* For this particular CPU register SCU SMP boot vector */
-               shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
-                                 shmobile_scu_base_phys);
-               break;
-       };
-
-       return NOTIFY_OK;
+       /* For this particular CPU register SCU SMP boot vector */
+       shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
+                         shmobile_scu_base_phys);
+       return 0;
 }
 
-static struct notifier_block shmobile_smp_scu_notifier = {
-       .notifier_call = shmobile_smp_scu_notifier_call,
-};
-
 void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
                                          unsigned int max_cpus)
 {
@@ -54,7 +42,9 @@ void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
        scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL);
 
        /* Use CPU notifier for reset vector control */
-       register_cpu_notifier(&shmobile_smp_scu_notifier);
+       cpuhp_setup_state_nocalls(CPUHP_ARM_SHMOBILE_SCU_PREPARE,
+                                 "arm/shmobile-scu:prepare",
+                                 shmobile_scu_cpu_prepare, NULL);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
index 975b274..394c61d 100644 (file)
@@ -299,28 +299,16 @@ static inline void fpsimd_pm_init(void) { }
 #endif /* CONFIG_CPU_PM */
 
 #ifdef CONFIG_HOTPLUG_CPU
-static int fpsimd_cpu_hotplug_notifier(struct notifier_block *nfb,
-                                      unsigned long action,
-                                      void *hcpu)
+static int fpsimd_cpu_dead(unsigned int cpu)
 {
-       unsigned int cpu = (long)hcpu;
-
-       switch (action) {
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               per_cpu(fpsimd_last_state, cpu) = NULL;
-               break;
-       }
-       return NOTIFY_OK;
+       per_cpu(fpsimd_last_state, cpu) = NULL;
+       return 0;
 }
 
-static struct notifier_block fpsimd_cpu_hotplug_notifier_block = {
-       .notifier_call = fpsimd_cpu_hotplug_notifier,
-};
-
 static inline void fpsimd_hotplug_init(void)
 {
-       register_cpu_notifier(&fpsimd_cpu_hotplug_notifier_block);
+       cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
+                                 NULL, fpsimd_cpu_dead);
 }
 
 #else
index eb9220c..c285886 100644 (file)
@@ -1890,7 +1890,7 @@ ia64_mca_cpu_init(void *cpu_data)
                                                              PAGE_KERNEL)));
 }
 
-static void ia64_mca_cmc_vector_adjust(void *dummy)
+static int ia64_mca_cpu_online(unsigned int cpu)
 {
        unsigned long flags;
 
@@ -1898,25 +1898,9 @@ static void ia64_mca_cmc_vector_adjust(void *dummy)
        if (!cmc_polling_enabled)
                ia64_mca_cmc_vector_enable(NULL);
        local_irq_restore(flags);
+       return 0;
 }
 
-static int mca_cpu_callback(struct notifier_block *nfb,
-                                     unsigned long action,
-                                     void *hcpu)
-{
-       switch (action) {
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               ia64_mca_cmc_vector_adjust(NULL);
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block mca_cpu_notifier = {
-       .notifier_call = mca_cpu_callback
-};
-
 /*
  * ia64_mca_init
  *
@@ -2111,15 +2095,13 @@ ia64_mca_late_init(void)
        if (!mca_init)
                return 0;
 
-       register_hotcpu_notifier(&mca_cpu_notifier);
-
        /* Setup the CMCI/P vector and handler */
        setup_timer(&cmc_poll_timer, ia64_mca_cmc_poll, 0UL);
 
        /* Unmask/enable the vector */
        cmc_polling_enabled = 0;
-       schedule_work(&cmc_enable_work);
-
+       cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/mca:online",
+                         ia64_mca_cpu_online, NULL);
        IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
 
 #ifdef CONFIG_ACPI
index 4d457d6..256fe6f 100644 (file)
@@ -380,29 +380,11 @@ static int octeon_update_boot_vector(unsigned int cpu)
        return 0;
 }
 
-static int octeon_cpu_callback(struct notifier_block *nfb,
-       unsigned long action, void *hcpu)
-{
-       unsigned int cpu = (unsigned long)hcpu;
-
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_UP_PREPARE:
-               octeon_update_boot_vector(cpu);
-               break;
-       case CPU_ONLINE:
-               pr_info("Cpu %d online\n", cpu);
-               break;
-       case CPU_DEAD:
-               break;
-       }
-
-       return NOTIFY_OK;
-}
-
 static int register_cavium_notifier(void)
 {
-       hotcpu_notifier(octeon_cpu_callback, 0);
-       return 0;
+       return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
+                                        "mips/cavium:prepare",
+                                        octeon_update_boot_vector, NULL);
 }
 late_initcall(register_cavium_notifier);
 
index 2fec6f7..99aab9f 100644 (file)
@@ -677,7 +677,7 @@ void play_dead(void)
        play_dead_at_ckseg1(state_addr);
 }
 
-void loongson3_disable_clock(int cpu)
+static int loongson3_disable_clock(unsigned int cpu)
 {
        uint64_t core_id = cpu_data[cpu].core;
        uint64_t package_id = cpu_data[cpu].package;
@@ -688,9 +688,10 @@ void loongson3_disable_clock(int cpu)
                if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
                        LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
        }
+       return 0;
 }
 
-void loongson3_enable_clock(int cpu)
+static int loongson3_enable_clock(unsigned int cpu)
 {
        uint64_t core_id = cpu_data[cpu].core;
        uint64_t package_id = cpu_data[cpu].package;
@@ -701,34 +702,15 @@ void loongson3_enable_clock(int cpu)
                if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
                        LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
        }
-}
-
-#define CPU_POST_DEAD_FROZEN   (CPU_POST_DEAD | CPU_TASKS_FROZEN)
-static int loongson3_cpu_callback(struct notifier_block *nfb,
-       unsigned long action, void *hcpu)
-{
-       unsigned int cpu = (unsigned long)hcpu;
-
-       switch (action) {
-       case CPU_POST_DEAD:
-       case CPU_POST_DEAD_FROZEN:
-               pr_info("Disable clock for CPU#%d\n", cpu);
-               loongson3_disable_clock(cpu);
-               break;
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               pr_info("Enable clock for CPU#%d\n", cpu);
-               loongson3_enable_clock(cpu);
-               break;
-       }
-
-       return NOTIFY_OK;
+       return 0;
 }
 
 static int register_loongson3_notifier(void)
 {
-       hotcpu_notifier(loongson3_cpu_callback, 0);
-       return 0;
+       return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
+                                        "mips/loongson:prepare",
+                                        loongson3_enable_clock,
+                                        loongson3_disable_clock);
 }
 early_initcall(register_loongson3_notifier);
 
index 7d95bc4..c491f2c 100644 (file)
@@ -369,44 +369,34 @@ void destroy_context(struct mm_struct *mm)
 }
 
 #ifdef CONFIG_SMP
-
-static int mmu_context_cpu_notify(struct notifier_block *self,
-                                 unsigned long action, void *hcpu)
+static int mmu_ctx_cpu_prepare(unsigned int cpu)
 {
-       unsigned int cpu = (unsigned int)(long)hcpu;
-
        /* We don't touch CPU 0 map, it's allocated at aboot and kept
         * around forever
         */
        if (cpu == boot_cpuid)
-               return NOTIFY_OK;
-
-       switch (action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
-               stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
-               break;
-#ifdef CONFIG_HOTPLUG_CPU
-       case CPU_UP_CANCELED:
-       case CPU_UP_CANCELED_FROZEN:
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
-               kfree(stale_map[cpu]);
-               stale_map[cpu] = NULL;
-
-               /* We also clear the cpu_vm_mask bits of CPUs going away */
-               clear_tasks_mm_cpumask(cpu);
-       break;
-#endif /* CONFIG_HOTPLUG_CPU */
-       }
-       return NOTIFY_OK;
+               return 0;
+
+       pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
+       stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
+       return 0;
 }
 
-static struct notifier_block mmu_context_cpu_nb = {
-       .notifier_call  = mmu_context_cpu_notify,
-};
+static int mmu_ctx_cpu_dead(unsigned int cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+       if (cpu == boot_cpuid)
+               return 0;
+
+       pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
+       kfree(stale_map[cpu]);
+       stale_map[cpu] = NULL;
+
+       /* We also clear the cpu_vm_mask bits of CPUs going away */
+       clear_tasks_mm_cpumask(cpu);
+#endif
+       return 0;
+}
 
 #endif /* CONFIG_SMP */
 
@@ -469,7 +459,9 @@ void __init mmu_context_init(void)
 #else
        stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
 
-       register_cpu_notifier(&mmu_context_cpu_nb);
+       cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
+                                 "powerpc/mmu/ctx:prepare",
+                                 mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
 #endif
 
        printk(KERN_INFO
index 834868b..366e4f5 100644 (file)
@@ -852,37 +852,33 @@ static void smp_core99_setup_cpu(int cpu_nr)
 
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_HOTPLUG_CPU
-static int smp_core99_cpu_notify(struct notifier_block *self,
-                                unsigned long action, void *hcpu)
+static unsigned int smp_core99_host_open;
+
+static int smp_core99_cpu_prepare(unsigned int cpu)
 {
        int rc;
 
-       switch(action & ~CPU_TASKS_FROZEN) {
-       case CPU_UP_PREPARE:
-               /* Open i2c bus if it was used for tb sync */
-               if (pmac_tb_clock_chip_host) {
-                       rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
-                       if (rc) {
-                               pr_err("Failed to open i2c bus for time sync\n");
-                               return notifier_from_errno(rc);
-                       }
+       /* Open i2c bus if it was used for tb sync */
+       if (pmac_tb_clock_chip_host && !smp_core99_host_open) {
+               rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
+               if (rc) {
+                       pr_err("Failed to open i2c bus for time sync\n");
+                       return notifier_from_errno(rc);
                }
-               break;
-       case CPU_ONLINE:
-       case CPU_UP_CANCELED:
-               /* Close i2c bus if it was used for tb sync */
-               if (pmac_tb_clock_chip_host)
-                       pmac_i2c_close(pmac_tb_clock_chip_host);
-               break;
-       default:
-               break;
+               smp_core99_host_open = 1;
        }
-       return NOTIFY_OK;
+       return 0;
 }
 
-static struct notifier_block smp_core99_cpu_nb = {
-       .notifier_call  = smp_core99_cpu_notify,
-};
+static int smp_core99_cpu_online(unsigned int cpu)
+{
+       /* Close i2c bus if it was used for tb sync */
+       if (pmac_tb_clock_chip_host && smp_core99_host_open) {
+               pmac_i2c_close(pmac_tb_clock_chip_host);
+               smp_core99_host_open = 0;
+       }
+       return 0;
+}
 #endif /* CONFIG_HOTPLUG_CPU */
 
 static void __init smp_core99_bringup_done(void)
@@ -902,7 +898,11 @@ static void __init smp_core99_bringup_done(void)
                g5_phy_disable_cpu1();
        }
 #ifdef CONFIG_HOTPLUG_CPU
-       register_cpu_notifier(&smp_core99_cpu_nb);
+       cpuhp_setup_state_nocalls(CPUHP_POWERPC_PMAC_PREPARE,
+                                 "powerpc/pmac:prepare", smp_core99_cpu_prepare,
+                                 NULL);
+       cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "powerpc/pmac:online",
+                                 smp_core99_cpu_online, NULL);
 #endif
 
        if (ppc_md.progress)
index a58bca6..cbb73fa 100644 (file)
@@ -740,28 +740,21 @@ out:
        put_task_struct(tsk);
 }
 
-static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
-                            void *hcpu)
+static int pfault_cpu_dead(unsigned int cpu)
 {
        struct thread_struct *thread, *next;
        struct task_struct *tsk;
 
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_DEAD:
-               spin_lock_irq(&pfault_lock);
-               list_for_each_entry_safe(thread, next, &pfault_list, list) {
-                       thread->pfault_wait = 0;
-                       list_del(&thread->list);
-                       tsk = container_of(thread, struct task_struct, thread);
-                       wake_up_process(tsk);
-                       put_task_struct(tsk);
-               }
-               spin_unlock_irq(&pfault_lock);
-               break;
-       default:
-               break;
+       spin_lock_irq(&pfault_lock);
+       list_for_each_entry_safe(thread, next, &pfault_list, list) {
+               thread->pfault_wait = 0;
+               list_del(&thread->list);
+               tsk = container_of(thread, struct task_struct, thread);
+               wake_up_process(tsk);
+               put_task_struct(tsk);
        }
-       return NOTIFY_OK;
+       spin_unlock_irq(&pfault_lock);
+       return 0;
 }
 
 static int __init pfault_irq_init(void)
@@ -775,7 +768,8 @@ static int __init pfault_irq_init(void)
        if (rc)
                goto out_pfault;
        irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
-       hotcpu_notifier(pfault_cpu_notify, 0);
+       cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
+                                 NULL, pfault_cpu_dead);
        return 0;
 
 out_pfault:
index 839612c..0d3637c 100644 (file)
@@ -122,32 +122,16 @@ static void shx3_update_boot_vector(unsigned int cpu)
        __raw_writel(STBCR_RESET, STBCR_REG(cpu));
 }
 
-static int
-shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+static int shx3_cpu_prepare(unsigned int cpu)
 {
-       unsigned int cpu = (unsigned int)hcpu;
-
-       switch (action) {
-       case CPU_UP_PREPARE:
-               shx3_update_boot_vector(cpu);
-               break;
-       case CPU_ONLINE:
-               pr_info("CPU %u is now online\n", cpu);
-               break;
-       case CPU_DEAD:
-               break;
-       }
-
-       return NOTIFY_OK;
+       shx3_update_boot_vector(cpu);
+       return 0;
 }
 
-static struct notifier_block shx3_cpu_notifier = {
-       .notifier_call          = shx3_cpu_callback,
-};
-
 static int register_shx3_cpu_notifier(void)
 {
-       register_hotcpu_notifier(&shx3_cpu_notifier);
+       cpuhp_setup_state_nocalls(CPUHP_SH_SH3X_PREPARE, "sh/shx3:prepare",
+                                 shx3_cpu_prepare, NULL);
        return 0;
 }
 late_initcall(register_shx3_cpu_notifier);
index fb30e7c..e80e6ba 100644 (file)
@@ -352,9 +352,7 @@ static void sparc_start_secondary(void *arg)
        preempt_disable();
        cpu = smp_processor_id();
 
-       /* Invoke the CPU_STARTING notifier callbacks */
        notify_cpu_starting(cpu);
-
        arch_cpu_pre_online(arg);
 
        /* Set the CPU in the cpu_online_mask */
index cb0673c..391b7f8 100644 (file)
@@ -927,7 +927,7 @@ static void uv_heartbeat(unsigned long ignored)
        mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
 }
 
-static void uv_heartbeat_enable(int cpu)
+static int uv_heartbeat_enable(unsigned int cpu)
 {
        while (!uv_cpu_scir_info(cpu)->enabled) {
                struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
@@ -941,43 +941,24 @@ static void uv_heartbeat_enable(int cpu)
                /* also ensure that boot cpu is enabled */
                cpu = 0;
        }
+       return 0;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void uv_heartbeat_disable(int cpu)
+static int uv_heartbeat_disable(unsigned int cpu)
 {
        if (uv_cpu_scir_info(cpu)->enabled) {
                uv_cpu_scir_info(cpu)->enabled = 0;
                del_timer(&uv_cpu_scir_info(cpu)->timer);
        }
        uv_set_cpu_scir_bits(cpu, 0xff);
-}
-
-/*
- * cpu hotplug notifier
- */
-static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action,
-                             void *hcpu)
-{
-       long cpu = (long)hcpu;
-
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_DOWN_FAILED:
-       case CPU_ONLINE:
-               uv_heartbeat_enable(cpu);
-               break;
-       case CPU_DOWN_PREPARE:
-               uv_heartbeat_disable(cpu);
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_OK;
+       return 0;
 }
 
 static __init void uv_scir_register_cpu_notifier(void)
 {
-       hotcpu_notifier(uv_scir_cpu_notify, 0);
+       cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/x2apic-uvx:online",
+                                 uv_heartbeat_enable, uv_heartbeat_disable);
 }
 
 #else /* !CONFIG_HOTPLUG_CPU */
index df04b2d..5ce5155 100644 (file)
@@ -558,55 +558,36 @@ static struct syscore_ops mc_syscore_ops = {
        .resume                 = mc_bp_resume,
 };
 
-static int
-mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
+static int mc_cpu_online(unsigned int cpu)
 {
-       unsigned int cpu = (unsigned long)hcpu;
        struct device *dev;
 
        dev = get_cpu_device(cpu);
+       microcode_update_cpu(cpu);
+       pr_debug("CPU%d added\n", cpu);
 
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_ONLINE:
-               microcode_update_cpu(cpu);
-               pr_debug("CPU%d added\n", cpu);
-               /*
-                * "break" is missing on purpose here because we want to fall
-                * through in order to create the sysfs group.
-                */
-
-       case CPU_DOWN_FAILED:
-               if (sysfs_create_group(&dev->kobj, &mc_attr_group))
-                       pr_err("Failed to create group for CPU%d\n", cpu);
-               break;
+       if (sysfs_create_group(&dev->kobj, &mc_attr_group))
+               pr_err("Failed to create group for CPU%d\n", cpu);
+       return 0;
+}
 
-       case CPU_DOWN_PREPARE:
-               /* Suspend is in progress, only remove the interface */
-               sysfs_remove_group(&dev->kobj, &mc_attr_group);
-               pr_debug("CPU%d removed\n", cpu);
-               break;
+static int mc_cpu_down_prep(unsigned int cpu)
+{
+       struct device *dev;
 
+       dev = get_cpu_device(cpu);
+       /* Suspend is in progress, only remove the interface */
+       sysfs_remove_group(&dev->kobj, &mc_attr_group);
+       pr_debug("CPU%d removed\n", cpu);
        /*
-        * case CPU_DEAD:
-        *
         * When a CPU goes offline, don't free up or invalidate the copy of
         * the microcode in kernel memory, so that we can reuse it when the
         * CPU comes back online without unnecessarily requesting the userspace
         * for it again.
         */
-       }
-
-       /* The CPU refused to come up during a system resume */
-       if (action == CPU_UP_CANCELED_FROZEN)
-               microcode_fini_cpu(cpu);
-
-       return NOTIFY_OK;
+       return 0;
 }
 
-static struct notifier_block mc_cpu_notifier = {
-       .notifier_call  = mc_cpu_callback,
-};
-
 static struct attribute *cpu_root_microcode_attrs[] = {
        &dev_attr_reload.attr,
        NULL
@@ -665,7 +646,8 @@ int __init microcode_init(void)
                goto out_ucode_group;
 
        register_syscore_ops(&mc_syscore_ops);
-       register_hotcpu_notifier(&mc_cpu_notifier);
+       cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
+                                 mc_cpu_online, mc_cpu_down_prep);
 
        pr_info("Microcode Update Driver: v" MICROCODE_VERSION
                " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
index 1726c4c..1f431f3 100644 (file)
@@ -423,12 +423,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
        kvm_spinlock_init();
 }
 
-static void kvm_guest_cpu_online(void *dummy)
-{
-       kvm_guest_cpu_init();
-}
-
-static void kvm_guest_cpu_offline(void *dummy)
+static void kvm_guest_cpu_offline(void)
 {
        kvm_disable_steal_time();
        if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -437,29 +432,21 @@ static void kvm_guest_cpu_offline(void *dummy)
        apf_task_wake_all();
 }
 
-static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
-                         void *hcpu)
+static int kvm_cpu_online(unsigned int cpu)
 {
-       int cpu = (unsigned long)hcpu;
-       switch (action) {
-       case CPU_ONLINE:
-       case CPU_DOWN_FAILED:
-       case CPU_ONLINE_FROZEN:
-               smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
-               break;
-       case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
-               smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_OK;
+       local_irq_disable();
+       kvm_guest_cpu_init();
+       local_irq_enable();
+       return 0;
 }
 
-static struct notifier_block kvm_cpu_notifier = {
-        .notifier_call  = kvm_cpu_notify,
-};
+static int kvm_cpu_down_prepare(unsigned int cpu)
+{
+       local_irq_disable();
+       kvm_guest_cpu_offline();
+       local_irq_enable();
+       return 0;
+}
 #endif
 
 static void __init kvm_apf_trap_init(void)
@@ -494,7 +481,9 @@ void __init kvm_guest_init(void)
 
 #ifdef CONFIG_SMP
        smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
-       register_cpu_notifier(&kvm_cpu_notifier);
+       if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
+                                     kvm_cpu_online, kvm_cpu_down_prepare) < 0)
+               pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
 #else
        kvm_guest_cpu_init();
 #endif
index 53b1737..96631e6 100644 (file)
@@ -78,30 +78,21 @@ static int raise_blk_irq(int cpu, struct request *rq)
 }
 #endif
 
-static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
-                         void *hcpu)
+static int blk_softirq_cpu_dead(unsigned int cpu)
 {
        /*
         * If a CPU goes away, splice its entries to the current CPU
         * and trigger a run of the softirq
         */
-       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-               int cpu = (unsigned long) hcpu;
-
-               local_irq_disable();
-               list_splice_init(&per_cpu(blk_cpu_done, cpu),
-                                this_cpu_ptr(&blk_cpu_done));
-               raise_softirq_irqoff(BLOCK_SOFTIRQ);
-               local_irq_enable();
-       }
+       local_irq_disable();
+       list_splice_init(&per_cpu(blk_cpu_done, cpu),
+                        this_cpu_ptr(&blk_cpu_done));
+       raise_softirq_irqoff(BLOCK_SOFTIRQ);
+       local_irq_enable();
 
-       return NOTIFY_OK;
+       return 0;
 }
 
-static struct notifier_block blk_cpu_notifier = {
-       .notifier_call  = blk_cpu_notify,
-};
-
 void __blk_complete_request(struct request *req)
 {
        int ccpu, cpu;
@@ -180,7 +171,9 @@ static __init int blk_softirq_init(void)
                INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
 
        open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
-       register_hotcpu_notifier(&blk_cpu_notifier);
+       cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
+                                 "block/softirq:dead", NULL,
+                                 blk_softirq_cpu_dead);
        return 0;
 }
 subsys_initcall(blk_softirq_init);
index 0553aee..13e5ac4 100644 (file)
@@ -110,55 +110,46 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
 
 static int __acpi_processor_start(struct acpi_device *device);
 
-static int acpi_cpu_soft_notify(struct notifier_block *nfb,
-                                         unsigned long action, void *hcpu)
+static int acpi_soft_cpu_online(unsigned int cpu)
 {
-       unsigned int cpu = (unsigned long)hcpu;
        struct acpi_processor *pr = per_cpu(processors, cpu);
        struct acpi_device *device;
-       action &= ~CPU_TASKS_FROZEN;
-
-       switch (action) {
-       case CPU_ONLINE:
-       case CPU_DEAD:
-               break;
-       default:
-               return NOTIFY_DONE;
-       }
 
        if (!pr || acpi_bus_get_device(pr->handle, &device))
-               return NOTIFY_DONE;
-
-       if (action == CPU_ONLINE) {
-               /*
-                * CPU got physically hotplugged and onlined for the first time:
-                * Initialize missing things.
-                */
-               if (pr->flags.need_hotplug_init) {
-                       int ret;
-
-                       pr_info("Will online and init hotplugged CPU: %d\n",
-                               pr->id);
-                       pr->flags.need_hotplug_init = 0;
-                       ret = __acpi_processor_start(device);
-                       WARN(ret, "Failed to start CPU: %d\n", pr->id);
-               } else {
-                       /* Normal CPU soft online event. */
-                       acpi_processor_ppc_has_changed(pr, 0);
-                       acpi_processor_hotplug(pr);
-                       acpi_processor_reevaluate_tstate(pr, action);
-                       acpi_processor_tstate_has_changed(pr);
-               }
-       } else if (action == CPU_DEAD) {
-               /* Invalidate flag.throttling after the CPU is offline. */
-               acpi_processor_reevaluate_tstate(pr, action);
+               return 0;
+       /*
+        * CPU got physically hotplugged and onlined for the first time:
+        * Initialize missing things.
+        */
+       if (pr->flags.need_hotplug_init) {
+               int ret;
+
+               pr_info("Will online and init hotplugged CPU: %d\n",
+                       pr->id);
+               pr->flags.need_hotplug_init = 0;
+               ret = __acpi_processor_start(device);
+               WARN(ret, "Failed to start CPU: %d\n", pr->id);
+       } else {
+               /* Normal CPU soft online event. */
+               acpi_processor_ppc_has_changed(pr, 0);
+               acpi_processor_hotplug(pr);
+               acpi_processor_reevaluate_tstate(pr, false);
+               acpi_processor_tstate_has_changed(pr);
        }
-       return NOTIFY_OK;
+       return 0;
 }
 
-static struct notifier_block acpi_cpu_notifier = {
-           .notifier_call = acpi_cpu_soft_notify,
-};
+static int acpi_soft_cpu_dead(unsigned int cpu)
+{
+       struct acpi_processor *pr = per_cpu(processors, cpu);
+       struct acpi_device *device;
+
+       if (!pr || acpi_bus_get_device(pr->handle, &device))
+               return 0;
+
+       acpi_processor_reevaluate_tstate(pr, true);
+       return 0;
+}
 
 #ifdef CONFIG_ACPI_CPU_FREQ_PSS
 static int acpi_pss_perf_init(struct acpi_processor *pr,
@@ -303,7 +294,7 @@ static int acpi_processor_stop(struct device *dev)
  * This is needed for the powernow-k8 driver, that works even without
  * ACPI, but needs symbols from this driver
  */
-
+static enum cpuhp_state hp_online;
 static int __init acpi_processor_driver_init(void)
 {
        int result = 0;
@@ -315,11 +306,22 @@ static int __init acpi_processor_driver_init(void)
        if (result < 0)
                return result;
 
-       register_hotcpu_notifier(&acpi_cpu_notifier);
+       result = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+                                          "acpi/cpu-drv:online",
+                                          acpi_soft_cpu_online, NULL);
+       if (result < 0)
+               goto err;
+       hp_online = result;
+       cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
+                                 NULL, acpi_soft_cpu_dead);
+
        acpi_thermal_cpufreq_init();
        acpi_processor_ppc_init();
        acpi_processor_throttling_init();
        return 0;
+err:
+       driver_unregister(&acpi_processor_driver);
+       return result;
 }
 
 static void __exit acpi_processor_driver_exit(void)
@@ -329,7 +331,8 @@ static void __exit acpi_processor_driver_exit(void)
 
        acpi_processor_ppc_exit();
        acpi_thermal_cpufreq_exit();
-       unregister_hotcpu_notifier(&acpi_cpu_notifier);
+       cpuhp_remove_state_nocalls(hp_online);
+       cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
        driver_unregister(&acpi_processor_driver);
 }
 
index c72e648..d51ca1c 100644 (file)
@@ -375,11 +375,11 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
  *     3. TSD domain
  */
 void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
-                                       unsigned long action)
+                                       bool is_dead)
 {
        int result = 0;
 
-       if (action == CPU_DEAD) {
+       if (is_dead) {
                /* When one CPU is offline, the T-state throttling
                 * will be invalidated.
                 */
index 5755907..4c44ba2 100644 (file)
@@ -144,15 +144,12 @@ struct cci_pmu {
        int num_cntrs;
        atomic_t active_events;
        struct mutex reserve_mutex;
-       struct list_head entry;
+       struct hlist_node node;
        cpumask_t cpus;
 };
 
 #define to_cci_pmu(c)  (container_of(c, struct cci_pmu, pmu))
 
-static DEFINE_MUTEX(cci_pmu_mutex);
-static LIST_HEAD(cci_pmu_list);
-
 enum cci_models {
 #ifdef CONFIG_ARM_CCI400_PMU
        CCI400_R0,
@@ -1506,25 +1503,21 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
        return perf_pmu_register(&cci_pmu->pmu, name, -1);
 }
 
-static int cci_pmu_offline_cpu(unsigned int cpu)
+static int cci_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
 {
-       struct cci_pmu *cci_pmu;
+       struct cci_pmu *cci_pmu = hlist_entry_safe(node, struct cci_pmu, node);
        unsigned int target;
 
-       mutex_lock(&cci_pmu_mutex);
-       list_for_each_entry(cci_pmu, &cci_pmu_list, entry) {
-               if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
-                       continue;
-               target = cpumask_any_but(cpu_online_mask, cpu);
-               if (target >= nr_cpu_ids)
-                       continue;
-               /*
-                * TODO: migrate context once core races on event->ctx have
-                * been fixed.
-                */
-               cpumask_set_cpu(target, &cci_pmu->cpus);
-       }
-       mutex_unlock(&cci_pmu_mutex);
+       if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
+               return 0;
+       target = cpumask_any_but(cpu_online_mask, cpu);
+       if (target >= nr_cpu_ids)
+               return 0;
+       /*
+        * TODO: migrate context once core races on event->ctx have
+        * been fixed.
+        */
+       cpumask_set_cpu(target, &cci_pmu->cpus);
        return 0;
 }
 
@@ -1768,10 +1761,8 @@ static int cci_pmu_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       mutex_lock(&cci_pmu_mutex);
-       list_add(&cci_pmu->entry, &cci_pmu_list);
-       mutex_unlock(&cci_pmu_mutex);
-
+       cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
+                                        &cci_pmu->node);
        pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
        return 0;
 }
@@ -1804,9 +1795,9 @@ static int __init cci_platform_init(void)
 {
        int ret;
 
-       ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
-                                       "AP_PERF_ARM_CCI_ONLINE", NULL,
-                                       cci_pmu_offline_cpu);
+       ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE,
+                                     "AP_PERF_ARM_CCI_ONLINE", NULL,
+                                     cci_pmu_offline_cpu);
        if (ret)
                return ret;
 
index 97a9185..e0ad475 100644 (file)
@@ -167,7 +167,7 @@ struct arm_ccn_dt {
        struct hrtimer hrtimer;
 
        cpumask_t cpu;
-       struct list_head entry;
+       struct hlist_node node;
 
        struct pmu pmu;
 };
@@ -189,9 +189,6 @@ struct arm_ccn {
        struct arm_ccn_dt dt;
 };
 
-static DEFINE_MUTEX(arm_ccn_mutex);
-static LIST_HEAD(arm_ccn_list);
-
 static int arm_ccn_node_to_xp(int node)
 {
        return node / CCN_NUM_XP_PORTS;
@@ -1173,30 +1170,24 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
 }
 
 
-static int arm_ccn_pmu_offline_cpu(unsigned int cpu)
+static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
 {
-       struct arm_ccn_dt *dt;
+       struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node);
+       struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
        unsigned int target;
 
-       mutex_lock(&arm_ccn_mutex);
-       list_for_each_entry(dt, &arm_ccn_list, entry) {
-               struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
-
-               if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
-                       continue;
-               target = cpumask_any_but(cpu_online_mask, cpu);
-               if (target >= nr_cpu_ids)
-                       continue;
-               perf_pmu_migrate_context(&dt->pmu, cpu, target);
-               cpumask_set_cpu(target, &dt->cpu);
-               if (ccn->irq)
-                       WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
-       }
-       mutex_unlock(&arm_ccn_mutex);
+       if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
+               return 0;
+       target = cpumask_any_but(cpu_online_mask, cpu);
+       if (target >= nr_cpu_ids)
+               return 0;
+       perf_pmu_migrate_context(&dt->pmu, cpu, target);
+       cpumask_set_cpu(target, &dt->cpu);
+       if (ccn->irq)
+               WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
        return 0;
 }
 
-
 static DEFINE_IDA(arm_ccn_pmu_ida);
 
 static int arm_ccn_pmu_init(struct arm_ccn *ccn)
@@ -1278,9 +1269,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
        if (err)
                goto error_pmu_register;
 
-       mutex_lock(&arm_ccn_mutex);
-       list_add(&ccn->dt.entry, &arm_ccn_list);
-       mutex_unlock(&arm_ccn_mutex);
+       cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+                                        &ccn->dt.node);
        return 0;
 
 error_pmu_register:
@@ -1296,10 +1286,8 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
 {
        int i;
 
-       mutex_lock(&arm_ccn_mutex);
-       list_del(&ccn->dt.entry);
-       mutex_unlock(&arm_ccn_mutex);
-
+       cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+                                           &ccn->dt.node);
        if (ccn->irq)
                irq_set_affinity_hint(ccn->irq, NULL);
        for (i = 0; i < ccn->num_xps; i++)
@@ -1527,9 +1515,9 @@ static int __init arm_ccn_init(void)
 {
        int i, ret;
 
-       ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
-                                       "AP_PERF_ARM_CCN_ONLINE", NULL,
-                                       arm_ccn_pmu_offline_cpu);
+       ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+                                     "AP_PERF_ARM_CCN_ONLINE", NULL,
+                                     arm_ccn_pmu_offline_cpu);
        if (ret)
                return ret;
 
@@ -1541,7 +1529,7 @@ static int __init arm_ccn_init(void)
 
 static void __exit arm_ccn_exit(void)
 {
-       cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE);
+       cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
        platform_driver_unregister(&arm_ccn_driver);
 }
 
index cad49bc..1b14256 100644 (file)
@@ -596,19 +596,20 @@ BUILD_PERDEV_HELPER(cpu_down)       /* int mips_cdmm_cpu_down_helper(...) */
 BUILD_PERDEV_HELPER(cpu_up)         /* int mips_cdmm_cpu_up_helper(...) */
 
 /**
- * mips_cdmm_bus_down() - Tear down the CDMM bus.
- * @data:      Pointer to unsigned int CPU number.
+ * mips_cdmm_cpu_down_prep() - Callback for CPUHP DOWN_PREP:
+ *                            Tear down the CDMM bus.
+ * @cpu:       unsigned int CPU number.
  *
  * This function is executed on the hotplugged CPU and calls the CDMM
  * driver cpu_down callback for all devices on that CPU.
  */
-static long mips_cdmm_bus_down(void *data)
+static int mips_cdmm_cpu_down_prep(unsigned int cpu)
 {
        struct mips_cdmm_bus *bus;
        long ret;
 
        /* Inform all the devices on the bus */
-       ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data,
+       ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
                               mips_cdmm_cpu_down_helper);
 
        /*
@@ -623,8 +624,8 @@ static long mips_cdmm_bus_down(void *data)
 }
 
 /**
- * mips_cdmm_bus_up() - Bring up the CDMM bus.
- * @data:      Pointer to unsigned int CPU number.
+ * mips_cdmm_cpu_online() - Callback for CPUHP ONLINE: Bring up the CDMM bus.
+ * @cpu:       unsigned int CPU number.
  *
  * This work_on_cpu callback function is executed on a given CPU to discover
  * CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
@@ -634,7 +635,7 @@ static long mips_cdmm_bus_down(void *data)
  * initialisation. When CPUs are brought online the function is
  * invoked directly on the hotplugged CPU.
  */
-static long mips_cdmm_bus_up(void *data)
+static int mips_cdmm_cpu_online(unsigned int cpu)
 {
        struct mips_cdmm_bus *bus;
        long ret;
@@ -651,50 +652,12 @@ static long mips_cdmm_bus_up(void *data)
                mips_cdmm_bus_discover(bus);
        else
                /* Inform all the devices on the bus */
-               ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data,
+               ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
                                       mips_cdmm_cpu_up_helper);
 
        return ret;
 }
 
-/**
- * mips_cdmm_cpu_notify() - Take action when a CPU is going online or offline.
- * @nb:                CPU notifier block .
- * @action:    Event that has taken place (CPU_*).
- * @data:      CPU number.
- *
- * This notifier is used to keep the CDMM buses updated as CPUs are offlined and
- * onlined. When CPUs go offline or come back online, so does their CDMM bus, so
- * devices must be informed. Also when CPUs come online for the first time the
- * devices on the CDMM bus need discovering.
- *
- * Returns:    NOTIFY_OK if event was used.
- *             NOTIFY_DONE if we didn't care.
- */
-static int mips_cdmm_cpu_notify(struct notifier_block *nb,
-                               unsigned long action, void *data)
-{
-       unsigned int cpu = (unsigned int)data;
-
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_ONLINE:
-       case CPU_DOWN_FAILED:
-               mips_cdmm_bus_up(&cpu);
-               break;
-       case CPU_DOWN_PREPARE:
-               mips_cdmm_bus_down(&cpu);
-               break;
-       default:
-               return NOTIFY_DONE;
-       }
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block mips_cdmm_cpu_nb = {
-       .notifier_call = mips_cdmm_cpu_notify,
-};
-
 /**
  * mips_cdmm_init() - Initialise CDMM bus.
  *
@@ -703,7 +666,6 @@ static struct notifier_block mips_cdmm_cpu_nb = {
  */
 static int __init mips_cdmm_init(void)
 {
-       unsigned int cpu;
        int ret;
 
        /* Register the bus */
@@ -712,19 +674,11 @@ static int __init mips_cdmm_init(void)
                return ret;
 
        /* We want to be notified about new CPUs */
-       ret = register_cpu_notifier(&mips_cdmm_cpu_nb);
-       if (ret) {
+       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "bus/cdmm:online",
+                               mips_cdmm_cpu_online, mips_cdmm_cpu_down_prep);
+       if (ret < 0)
                pr_warn("cdmm: Failed to register CPU notifier\n");
-               goto out;
-       }
-
-       /* Discover devices on CDMM of online CPUs */
-       for_each_online_cpu(cpu)
-               work_on_cpu(cpu, mips_cdmm_bus_up, &cpu);
 
-       return 0;
-out:
-       bus_unregister(&mips_cdmm_bustype);
        return ret;
 }
 subsys_initcall(mips_cdmm_init);
index 3dd4884..8b44de4 100644 (file)
@@ -1358,7 +1358,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        return add_cpu_dev_symlink(policy, cpu);
 }
 
-static void cpufreq_offline(unsigned int cpu)
+static int cpufreq_offline(unsigned int cpu)
 {
        struct cpufreq_policy *policy;
        int ret;
@@ -1368,7 +1368,7 @@ static void cpufreq_offline(unsigned int cpu)
        policy = cpufreq_cpu_get_raw(cpu);
        if (!policy) {
                pr_debug("%s: No cpu_data found\n", __func__);
-               return;
+               return 0;
        }
 
        down_write(&policy->rwsem);
@@ -1417,6 +1417,7 @@ static void cpufreq_offline(unsigned int cpu)
 
 unlock:
        up_write(&policy->rwsem);
+       return 0;
 }
 
 /**
@@ -2332,28 +2333,6 @@ unlock:
 }
 EXPORT_SYMBOL(cpufreq_update_policy);
 
-static int cpufreq_cpu_callback(struct notifier_block *nfb,
-                                       unsigned long action, void *hcpu)
-{
-       unsigned int cpu = (unsigned long)hcpu;
-
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_ONLINE:
-       case CPU_DOWN_FAILED:
-               cpufreq_online(cpu);
-               break;
-
-       case CPU_DOWN_PREPARE:
-               cpufreq_offline(cpu);
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block __refdata cpufreq_cpu_notifier = {
-       .notifier_call = cpufreq_cpu_callback,
-};
-
 /*********************************************************************
  *               BOOST                                              *
  *********************************************************************/
@@ -2455,6 +2434,7 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
 /*********************************************************************
  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
  *********************************************************************/
+static enum cpuhp_state hp_online;
 
 /**
  * cpufreq_register_driver - register a CPU Frequency driver
@@ -2517,7 +2497,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
                goto err_if_unreg;
        }
 
-       register_hotcpu_notifier(&cpufreq_cpu_notifier);
+       ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
+                                       cpufreq_online,
+                                       cpufreq_offline);
+       if (ret < 0)
+               goto err_if_unreg;
+       hp_online = ret;
+       ret = 0;
+
        pr_debug("driver %s up and running\n", driver_data->name);
        goto out;
 
@@ -2556,7 +2543,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
        get_online_cpus();
        subsys_interface_unregister(&cpufreq_interface);
        remove_boost_sysfs_file();
-       unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
+       cpuhp_remove_state_nocalls(hp_online);
 
        write_lock_irqsave(&cpufreq_driver_lock, flags);
 
index d5657d5..71e586d 100644 (file)
@@ -749,65 +749,52 @@ static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
        put_cpu();
 }
 
-/**
- * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
- * @nb: notifier block
- * @action: hotplug transition
- * @hcpu: target cpu number
- *
- * Called when a cpu is brought on or offline using hotplug.  Updates the
- * coupled cpu set appropriately
- */
-static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
-               unsigned long action, void *hcpu)
+static int coupled_cpu_online(unsigned int cpu)
 {
-       int cpu = (unsigned long)hcpu;
        struct cpuidle_device *dev;
 
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_UP_PREPARE:
-       case CPU_DOWN_PREPARE:
-       case CPU_ONLINE:
-       case CPU_DEAD:
-       case CPU_UP_CANCELED:
-       case CPU_DOWN_FAILED:
-               break;
-       default:
-               return NOTIFY_OK;
-       }
-
        mutex_lock(&cpuidle_lock);
 
        dev = per_cpu(cpuidle_devices, cpu);
-       if (!dev || !dev->coupled)
-               goto out;
-
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_UP_PREPARE:
-       case CPU_DOWN_PREPARE:
-               cpuidle_coupled_prevent_idle(dev->coupled);
-               break;
-       case CPU_ONLINE:
-       case CPU_DEAD:
+       if (dev && dev->coupled) {
                cpuidle_coupled_update_online_cpus(dev->coupled);
-               /* Fall through */
-       case CPU_UP_CANCELED:
-       case CPU_DOWN_FAILED:
                cpuidle_coupled_allow_idle(dev->coupled);
-               break;
        }
 
-out:
        mutex_unlock(&cpuidle_lock);
-       return NOTIFY_OK;
+       return 0;
 }
 
-static struct notifier_block cpuidle_coupled_cpu_notifier = {
-       .notifier_call = cpuidle_coupled_cpu_notify,
-};
+static int coupled_cpu_up_prepare(unsigned int cpu)
+{
+       struct cpuidle_device *dev;
+
+       mutex_lock(&cpuidle_lock);
+
+       dev = per_cpu(cpuidle_devices, cpu);
+       if (dev && dev->coupled)
+               cpuidle_coupled_prevent_idle(dev->coupled);
+
+       mutex_unlock(&cpuidle_lock);
+       return 0;
+}
 
 static int __init cpuidle_coupled_init(void)
 {
-       return register_cpu_notifier(&cpuidle_coupled_cpu_notifier);
+       int ret;
+
+       ret = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE,
+                                       "cpuidle/coupled:prepare",
+                                       coupled_cpu_up_prepare,
+                                       coupled_cpu_online);
+       if (ret)
+               return ret;
+       ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+                                       "cpuidle/coupled:online",
+                                       coupled_cpu_online,
+                                       coupled_cpu_up_prepare);
+       if (ret < 0)
+               cpuhp_remove_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE);
+       return ret;
 }
 core_initcall(cpuidle_coupled_init);
index f7ca891..7fe442c 100644 (file)
@@ -119,40 +119,30 @@ static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = {
                .enter = snooze_loop },
 };
 
-static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
-                       unsigned long action, void *hcpu)
+static int powernv_cpuidle_cpu_online(unsigned int cpu)
 {
-       int hotcpu = (unsigned long)hcpu;
-       struct cpuidle_device *dev =
-                               per_cpu(cpuidle_devices, hotcpu);
+       struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
 
        if (dev && cpuidle_get_driver()) {
-               switch (action) {
-               case CPU_ONLINE:
-               case CPU_ONLINE_FROZEN:
-                       cpuidle_pause_and_lock();
-                       cpuidle_enable_device(dev);
-                       cpuidle_resume_and_unlock();
-                       break;
+               cpuidle_pause_and_lock();
+               cpuidle_enable_device(dev);
+               cpuidle_resume_and_unlock();
+       }
+       return 0;
+}
 
-               case CPU_DEAD:
-               case CPU_DEAD_FROZEN:
-                       cpuidle_pause_and_lock();
-                       cpuidle_disable_device(dev);
-                       cpuidle_resume_and_unlock();
-                       break;
+static int powernv_cpuidle_cpu_dead(unsigned int cpu)
+{
+       struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
 
-               default:
-                       return NOTIFY_DONE;
-               }
+       if (dev && cpuidle_get_driver()) {
+               cpuidle_pause_and_lock();
+               cpuidle_disable_device(dev);
+               cpuidle_resume_and_unlock();
        }
-       return NOTIFY_OK;
+       return 0;
 }
 
-static struct notifier_block setup_hotplug_notifier = {
-       .notifier_call = powernv_cpuidle_add_cpu_notifier,
-};
-
 /*
  * powernv_cpuidle_driver_init()
  */
@@ -355,7 +345,14 @@ static int __init powernv_processor_idle_init(void)
                return retval;
        }
 
-       register_cpu_notifier(&setup_hotplug_notifier);
+       retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+                                          "cpuidle/powernv:online",
+                                          powernv_cpuidle_cpu_online, NULL);
+       WARN_ON(retval < 0);
+       retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD,
+                                          "cpuidle/powernv:dead", NULL,
+                                          powernv_cpuidle_cpu_dead);
+       WARN_ON(retval < 0);
        printk(KERN_DEBUG "powernv_idle_driver registered\n");
        return 0;
 }
index 07135e0..166ccd7 100644 (file)
@@ -171,40 +171,30 @@ static struct cpuidle_state shared_states[] = {
                .enter = &shared_cede_loop },
 };
 
-static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
-                       unsigned long action, void *hcpu)
+static int pseries_cpuidle_cpu_online(unsigned int cpu)
 {
-       int hotcpu = (unsigned long)hcpu;
-       struct cpuidle_device *dev =
-                               per_cpu(cpuidle_devices, hotcpu);
+       struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
 
        if (dev && cpuidle_get_driver()) {
-               switch (action) {
-               case CPU_ONLINE:
-               case CPU_ONLINE_FROZEN:
-                       cpuidle_pause_and_lock();
-                       cpuidle_enable_device(dev);
-                       cpuidle_resume_and_unlock();
-                       break;
+               cpuidle_pause_and_lock();
+               cpuidle_enable_device(dev);
+               cpuidle_resume_and_unlock();
+       }
+       return 0;
+}
 
-               case CPU_DEAD:
-               case CPU_DEAD_FROZEN:
-                       cpuidle_pause_and_lock();
-                       cpuidle_disable_device(dev);
-                       cpuidle_resume_and_unlock();
-                       break;
+static int pseries_cpuidle_cpu_dead(unsigned int cpu)
+{
+       struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
 
-               default:
-                       return NOTIFY_DONE;
-               }
+       if (dev && cpuidle_get_driver()) {
+               cpuidle_pause_and_lock();
+               cpuidle_disable_device(dev);
+               cpuidle_resume_and_unlock();
        }
-       return NOTIFY_OK;
+       return 0;
 }
 
-static struct notifier_block setup_hotplug_notifier = {
-       .notifier_call = pseries_cpuidle_add_cpu_notifier,
-};
-
 /*
  * pseries_cpuidle_driver_init()
  */
@@ -273,7 +263,14 @@ static int __init pseries_processor_idle_init(void)
                return retval;
        }
 
-       register_cpu_notifier(&setup_hotplug_notifier);
+       retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+                                          "cpuidle/pseries:online",
+                                          pseries_cpuidle_cpu_online, NULL);
+       WARN_ON(retval < 0);
+       retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD,
+                                          "cpuidle/pseries:DEAD", NULL,
+                                          pseries_cpuidle_cpu_dead);
+       WARN_ON(retval < 0);
        printk(KERN_DEBUG "pseries_idle_driver registered\n");
        return 0;
 }
index 8912407..aae8064 100644 (file)
@@ -6330,22 +6330,20 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
        return 0;
 }
 
-static void raid5_free_percpu(struct r5conf *conf)
+static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node)
 {
-       unsigned long cpu;
+       struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
+
+       free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+       return 0;
+}
 
+static void raid5_free_percpu(struct r5conf *conf)
+{
        if (!conf->percpu)
                return;
 
-#ifdef CONFIG_HOTPLUG_CPU
-       unregister_cpu_notifier(&conf->cpu_notify);
-#endif
-
-       get_online_cpus();
-       for_each_possible_cpu(cpu)
-               free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
-       put_online_cpus();
-
+       cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
        free_percpu(conf->percpu);
 }
 
@@ -6364,64 +6362,28 @@ static void free_conf(struct r5conf *conf)
        kfree(conf);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
-                             void *hcpu)
+static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
 {
-       struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
-       long cpu = (long)hcpu;
+       struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
        struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
 
-       switch (action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               if (alloc_scratch_buffer(conf, percpu)) {
-                       pr_err("%s: failed memory allocation for cpu%ld\n",
-                              __func__, cpu);
-                       return notifier_from_errno(-ENOMEM);
-               }
-               break;
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-       case CPU_UP_CANCELED:
-       case CPU_UP_CANCELED_FROZEN:
-               free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
-               break;
-       default:
-               break;
+       if (alloc_scratch_buffer(conf, percpu)) {
+               pr_err("%s: failed memory allocation for cpu%u\n",
+                      __func__, cpu);
+               return -ENOMEM;
        }
-       return NOTIFY_OK;
+       return 0;
 }
-#endif
 
 static int raid5_alloc_percpu(struct r5conf *conf)
 {
-       unsigned long cpu;
        int err = 0;
 
        conf->percpu = alloc_percpu(struct raid5_percpu);
        if (!conf->percpu)
                return -ENOMEM;
 
-#ifdef CONFIG_HOTPLUG_CPU
-       conf->cpu_notify.notifier_call = raid456_cpu_notify;
-       conf->cpu_notify.priority = 0;
-       err = register_cpu_notifier(&conf->cpu_notify);
-       if (err)
-               return err;
-#endif
-
-       get_online_cpus();
-       for_each_present_cpu(cpu) {
-               err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
-               if (err) {
-                       pr_err("%s: failed memory allocation for cpu%ld\n",
-                              __func__, cpu);
-                       break;
-               }
-       }
-       put_online_cpus();
-
+       err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
        if (!err) {
                conf->scribble_disks = max(conf->raid_disks,
                        conf->previous_raid_disks);
@@ -7953,10 +7915,21 @@ static struct md_personality raid4_personality =
 
 static int __init raid5_init(void)
 {
+       int ret;
+
        raid5_wq = alloc_workqueue("raid5wq",
                WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
        if (!raid5_wq)
                return -ENOMEM;
+
+       ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE,
+                                     "md/raid5:prepare",
+                                     raid456_cpu_up_prepare,
+                                     raid456_cpu_dead);
+       if (ret) {
+               destroy_workqueue(raid5_wq);
+               return ret;
+       }
        register_md_personality(&raid6_personality);
        register_md_personality(&raid5_personality);
        register_md_personality(&raid4_personality);
@@ -7968,6 +7941,7 @@ static void raid5_exit(void)
        unregister_md_personality(&raid6_personality);
        unregister_md_personality(&raid5_personality);
        unregister_md_personality(&raid4_personality);
+       cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
        destroy_workqueue(raid5_wq);
 }
 
index 517d4b6..57ec49f 100644 (file)
@@ -512,9 +512,7 @@ struct r5conf {
        } __percpu *percpu;
        int scribble_disks;
        int scribble_sectors;
-#ifdef CONFIG_HOTPLUG_CPU
-       struct notifier_block   cpu_notify;
-#endif
+       struct hlist_node node;
 
        /*
         * Free stripes pool
index d41c28d..b745487 100644 (file)
@@ -382,7 +382,8 @@ struct mvneta_port {
        struct mvneta_rx_queue *rxqs;
        struct mvneta_tx_queue *txqs;
        struct net_device *dev;
-       struct notifier_block cpu_notifier;
+       struct hlist_node node_online;
+       struct hlist_node node_dead;
        int rxq_def;
        /* Protect the access to the percpu interrupt registers,
         * ensuring that the configuration remains coherent.
@@ -574,6 +575,7 @@ struct mvneta_rx_queue {
        int next_desc_to_proc;
 };
 
+static enum cpuhp_state online_hpstate;
 /* The hardware supports eight (8) rx queues, but we are only allowing
  * the first one to be used. Therefore, let's just allocate one queue.
  */
@@ -3311,101 +3313,104 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
        }
 };
 
-static int mvneta_percpu_notifier(struct notifier_block *nfb,
-                                 unsigned long action, void *hcpu)
+static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
 {
-       struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
-                                             cpu_notifier);
-       int cpu = (unsigned long)hcpu, other_cpu;
+       int other_cpu;
+       struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
+                                                 node_online);
        struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
 
-       switch (action) {
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-       case CPU_DOWN_FAILED:
-       case CPU_DOWN_FAILED_FROZEN:
-               spin_lock(&pp->lock);
-               /* Configuring the driver for a new CPU while the
-                * driver is stopping is racy, so just avoid it.
-                */
-               if (pp->is_stopped) {
-                       spin_unlock(&pp->lock);
-                       break;
-               }
-               netif_tx_stop_all_queues(pp->dev);
 
-               /* We have to synchronise on tha napi of each CPU
-                * except the one just being waked up
-                */
-               for_each_online_cpu(other_cpu) {
-                       if (other_cpu != cpu) {
-                               struct mvneta_pcpu_port *other_port =
-                                       per_cpu_ptr(pp->ports, other_cpu);
+       spin_lock(&pp->lock);
+       /*
+        * Configuring the driver for a new CPU while the driver is
+        * stopping is racy, so just avoid it.
+        */
+       if (pp->is_stopped) {
+               spin_unlock(&pp->lock);
+               return 0;
+       }
+       netif_tx_stop_all_queues(pp->dev);
 
-                               napi_synchronize(&other_port->napi);
-                       }
+       /*
+        * We have to synchronise on tha napi of each CPU except the one
+        * just being woken up
+        */
+       for_each_online_cpu(other_cpu) {
+               if (other_cpu != cpu) {
+                       struct mvneta_pcpu_port *other_port =
+                               per_cpu_ptr(pp->ports, other_cpu);
+
+                       napi_synchronize(&other_port->napi);
                }
+       }
 
-               /* Mask all ethernet port interrupts */
-               on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
-               napi_enable(&port->napi);
+       /* Mask all ethernet port interrupts */
+       on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+       napi_enable(&port->napi);
 
+       /*
+        * Enable per-CPU interrupts on the CPU that is
+        * brought up.
+        */
+       mvneta_percpu_enable(pp);
 
-               /* Enable per-CPU interrupts on the CPU that is
-                * brought up.
-                */
-               mvneta_percpu_enable(pp);
+       /*
+        * Enable per-CPU interrupt on the one CPU we care
+        * about.
+        */
+       mvneta_percpu_elect(pp);
 
-               /* Enable per-CPU interrupt on the one CPU we care
-                * about.
-                */
-               mvneta_percpu_elect(pp);
-
-               /* Unmask all ethernet port interrupts */
-               on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
-               mvreg_write(pp, MVNETA_INTR_MISC_MASK,
-                       MVNETA_CAUSE_PHY_STATUS_CHANGE |
-                       MVNETA_CAUSE_LINK_CHANGE |
-                       MVNETA_CAUSE_PSC_SYNC_CHANGE);
-               netif_tx_start_all_queues(pp->dev);
-               spin_unlock(&pp->lock);
-               break;
-       case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
-               netif_tx_stop_all_queues(pp->dev);
-               /* Thanks to this lock we are sure that any pending
-                * cpu election is done
-                */
-               spin_lock(&pp->lock);
-               /* Mask all ethernet port interrupts */
-               on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
-               spin_unlock(&pp->lock);
+       /* Unmask all ethernet port interrupts */
+       on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
+       mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+                   MVNETA_CAUSE_PHY_STATUS_CHANGE |
+                   MVNETA_CAUSE_LINK_CHANGE |
+                   MVNETA_CAUSE_PSC_SYNC_CHANGE);
+       netif_tx_start_all_queues(pp->dev);
+       spin_unlock(&pp->lock);
+       return 0;
+}
 
-               napi_synchronize(&port->napi);
-               napi_disable(&port->napi);
-               /* Disable per-CPU interrupts on the CPU that is
-                * brought down.
-                */
-               mvneta_percpu_disable(pp);
+static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
+{
+       struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
+                                                 node_online);
+       struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
 
-               break;
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               /* Check if a new CPU must be elected now this on is down */
-               spin_lock(&pp->lock);
-               mvneta_percpu_elect(pp);
-               spin_unlock(&pp->lock);
-               /* Unmask all ethernet port interrupts */
-               on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
-               mvreg_write(pp, MVNETA_INTR_MISC_MASK,
-                       MVNETA_CAUSE_PHY_STATUS_CHANGE |
-                       MVNETA_CAUSE_LINK_CHANGE |
-                       MVNETA_CAUSE_PSC_SYNC_CHANGE);
-               netif_tx_start_all_queues(pp->dev);
-               break;
-       }
+       /*
+        * Thanks to this lock we are sure that any pending cpu election is
+        * done.
+        */
+       spin_lock(&pp->lock);
+       /* Mask all ethernet port interrupts */
+       on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+       spin_unlock(&pp->lock);
 
-       return NOTIFY_OK;
+       napi_synchronize(&port->napi);
+       napi_disable(&port->napi);
+       /* Disable per-CPU interrupts on the CPU that is brought down. */
+       mvneta_percpu_disable(pp);
+       return 0;
+}
+
+static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
+{
+       struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
+                                                 node_dead);
+
+       /* Check if a new CPU must be elected now this on is down */
+       spin_lock(&pp->lock);
+       mvneta_percpu_elect(pp);
+       spin_unlock(&pp->lock);
+       /* Unmask all ethernet port interrupts */
+       on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
+       mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+                   MVNETA_CAUSE_PHY_STATUS_CHANGE |
+                   MVNETA_CAUSE_LINK_CHANGE |
+                   MVNETA_CAUSE_PSC_SYNC_CHANGE);
+       netif_tx_start_all_queues(pp->dev);
+       return 0;
 }
 
 static int mvneta_open(struct net_device *dev)
@@ -3442,7 +3447,15 @@ static int mvneta_open(struct net_device *dev)
        /* Register a CPU notifier to handle the case where our CPU
         * might be taken offline.
         */
-       register_cpu_notifier(&pp->cpu_notifier);
+       ret = cpuhp_state_add_instance_nocalls(online_hpstate,
+                                              &pp->node_online);
+       if (ret)
+               goto err_free_irq;
+
+       ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+                                              &pp->node_dead);
+       if (ret)
+               goto err_free_online_hp;
 
        /* In default link is down */
        netif_carrier_off(pp->dev);
@@ -3450,15 +3463,19 @@ static int mvneta_open(struct net_device *dev)
        ret = mvneta_mdio_probe(pp);
        if (ret < 0) {
                netdev_err(dev, "cannot probe MDIO bus\n");
-               goto err_free_irq;
+               goto err_free_dead_hp;
        }
 
        mvneta_start_dev(pp);
 
        return 0;
 
+err_free_dead_hp:
+       cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+                                           &pp->node_dead);
+err_free_online_hp:
+       cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
 err_free_irq:
-       unregister_cpu_notifier(&pp->cpu_notifier);
        on_each_cpu(mvneta_percpu_disable, pp, true);
        free_percpu_irq(pp->dev->irq, pp->ports);
 err_cleanup_txqs:
@@ -3484,7 +3501,10 @@ static int mvneta_stop(struct net_device *dev)
 
        mvneta_stop_dev(pp);
        mvneta_mdio_remove(pp);
-       unregister_cpu_notifier(&pp->cpu_notifier);
+
+       cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
+       cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+                                           &pp->node_dead);
        on_each_cpu(mvneta_percpu_disable, pp, true);
        free_percpu_irq(dev->irq, pp->ports);
        mvneta_cleanup_rxqs(pp);
@@ -4024,7 +4044,6 @@ static int mvneta_probe(struct platform_device *pdev)
        err = of_property_read_string(dn, "managed", &managed);
        pp->use_inband_status = (err == 0 &&
                                 strcmp(managed, "in-band-status") == 0);
-       pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
 
        pp->rxq_def = rxq_def;
 
@@ -4227,7 +4246,42 @@ static struct platform_driver mvneta_driver = {
        },
 };
 
-module_platform_driver(mvneta_driver);
+static int __init mvneta_driver_init(void)
+{
+       int ret;
+
+       ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
+                                     mvneta_cpu_online,
+                                     mvneta_cpu_down_prepare);
+       if (ret < 0)
+               goto out;
+       online_hpstate = ret;
+       ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
+                                     NULL, mvneta_cpu_dead);
+       if (ret)
+               goto err_dead;
+
+       ret = platform_driver_register(&mvneta_driver);
+       if (ret)
+               goto err;
+       return 0;
+
+err:
+       cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
+err_dead:
+       cpuhp_remove_multi_state(online_hpstate);
+out:
+       return ret;
+}
+module_init(mvneta_driver_init);
+
+static void __exit mvneta_driver_exit(void)
+{
+       platform_driver_unregister(&mvneta_driver);
+       cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
+       cpuhp_remove_multi_state(online_hpstate);
+}
+module_exit(mvneta_driver_exit);
 
 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
index 1b5f531..fad84f3 100644 (file)
@@ -138,8 +138,9 @@ struct virtnet_info {
        /* Does the affinity hint is set for virtqueues? */
        bool affinity_hint_set;
 
-       /* CPU hot plug notifier */
-       struct notifier_block nb;
+       /* CPU hotplug instances for online & dead */
+       struct hlist_node node;
+       struct hlist_node node_dead;
 
        /* Control VQ buffers: protected by the rtnl lock */
        struct virtio_net_ctrl_hdr ctrl_hdr;
@@ -1237,25 +1238,53 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
        vi->affinity_hint_set = true;
 }
 
-static int virtnet_cpu_callback(struct notifier_block *nfb,
-                               unsigned long action, void *hcpu)
+static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
 {
-       struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
+       struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
+                                                  node);
+       virtnet_set_affinity(vi);
+       return 0;
+}
 
-       switch(action & ~CPU_TASKS_FROZEN) {
-       case CPU_ONLINE:
-       case CPU_DOWN_FAILED:
-       case CPU_DEAD:
-               virtnet_set_affinity(vi);
-               break;
-       case CPU_DOWN_PREPARE:
-               virtnet_clean_affinity(vi, (long)hcpu);
-               break;
-       default:
-               break;
-       }
+static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
+{
+       struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
+                                                  node_dead);
+       virtnet_set_affinity(vi);
+       return 0;
+}
 
-       return NOTIFY_OK;
+static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
+{
+       struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
+                                                  node);
+
+       virtnet_clean_affinity(vi, cpu);
+       return 0;
+}
+
+static enum cpuhp_state virtionet_online;
+
+static int virtnet_cpu_notif_add(struct virtnet_info *vi)
+{
+       int ret;
+
+       ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
+       if (ret)
+               return ret;
+       ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
+                                              &vi->node_dead);
+       if (!ret)
+               return ret;
+       cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
+       return ret;
+}
+
+static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
+{
+       cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
+       cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
+                                           &vi->node_dead);
 }
 
 static void virtnet_get_ringparam(struct net_device *dev,
@@ -1879,8 +1908,7 @@ static int virtnet_probe(struct virtio_device *vdev)
 
        virtio_device_ready(vdev);
 
-       vi->nb.notifier_call = &virtnet_cpu_callback;
-       err = register_hotcpu_notifier(&vi->nb);
+       err = virtnet_cpu_notif_add(vi);
        if (err) {
                pr_debug("virtio_net: registering cpu notifier failed\n");
                goto free_unregister_netdev;
@@ -1934,7 +1962,7 @@ static void virtnet_remove(struct virtio_device *vdev)
 {
        struct virtnet_info *vi = vdev->priv;
 
-       unregister_hotcpu_notifier(&vi->nb);
+       virtnet_cpu_notif_remove(vi);
 
        /* Make sure no work handler is accessing the device. */
        flush_work(&vi->config_work);
@@ -1953,7 +1981,7 @@ static int virtnet_freeze(struct virtio_device *vdev)
        struct virtnet_info *vi = vdev->priv;
        int i;
 
-       unregister_hotcpu_notifier(&vi->nb);
+       virtnet_cpu_notif_remove(vi);
 
        /* Make sure no work handler is accessing the device */
        flush_work(&vi->config_work);
@@ -1997,7 +2025,7 @@ static int virtnet_restore(struct virtio_device *vdev)
        virtnet_set_queues(vi, vi->curr_queue_pairs);
        rtnl_unlock();
 
-       err = register_hotcpu_notifier(&vi->nb);
+       err = virtnet_cpu_notif_add(vi);
        if (err)
                return err;
 
@@ -2039,7 +2067,41 @@ static struct virtio_driver virtio_net_driver = {
 #endif
 };
 
-module_virtio_driver(virtio_net_driver);
+static __init int virtio_net_driver_init(void)
+{
+       int ret;
+
+       ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "AP_VIRT_NET_ONLINE",
+                                     virtnet_cpu_online,
+                                     virtnet_cpu_down_prep);
+       if (ret < 0)
+               goto out;
+       virtionet_online = ret;
+       ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "VIRT_NET_DEAD",
+                                     NULL, virtnet_cpu_dead);
+       if (ret)
+               goto err_dead;
+
+        ret = register_virtio_driver(&virtio_net_driver);
+       if (ret)
+               goto err_virtio;
+       return 0;
+err_virtio:
+       cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
+err_dead:
+       cpuhp_remove_multi_state(virtionet_online);
+out:
+       return ret;
+}
+module_init(virtio_net_driver_init);
+
+static __exit void virtio_net_driver_exit(void)
+{
+       cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
+       cpuhp_remove_multi_state(virtionet_online);
+       unregister_virtio_driver(&virtio_net_driver);
+}
+module_exit(virtio_net_driver_exit);
 
 MODULE_DEVICE_TABLE(virtio, id_table);
 MODULE_DESCRIPTION("Virtio network driver");
index bdef916..2498a6c 100644 (file)
@@ -74,37 +74,39 @@ static void oprofile_hrtimer_stop(void)
        put_online_cpus();
 }
 
-static int oprofile_cpu_notify(struct notifier_block *self,
-                              unsigned long action, void *hcpu)
+static int oprofile_timer_online(unsigned int cpu)
 {
-       long cpu = (long) hcpu;
-
-       switch (action) {
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               smp_call_function_single(cpu, __oprofile_hrtimer_start,
-                                        NULL, 1);
-               break;
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               __oprofile_hrtimer_stop(cpu);
-               break;
-       }
-       return NOTIFY_OK;
+       local_irq_disable();
+       __oprofile_hrtimer_start(NULL);
+       local_irq_enable();
+       return 0;
 }
 
-static struct notifier_block __refdata oprofile_cpu_notifier = {
-       .notifier_call = oprofile_cpu_notify,
-};
+static int oprofile_timer_prep_down(unsigned int cpu)
+{
+       __oprofile_hrtimer_stop(cpu);
+       return 0;
+}
+
+static enum cpuhp_state hp_online;
 
 static int oprofile_hrtimer_setup(void)
 {
-       return register_hotcpu_notifier(&oprofile_cpu_notifier);
+       int ret;
+
+       ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+                                       "oprofile/timer:online",
+                                       oprofile_timer_online,
+                                       oprofile_timer_prep_down);
+       if (ret < 0)
+               return ret;
+       hp_online = ret;
+       return 0;
 }
 
 static void oprofile_hrtimer_shutdown(void)
 {
-       unregister_hotcpu_notifier(&oprofile_cpu_notifier);
+       cpuhp_remove_state_nocalls(hp_online);
 }
 
 int oprofile_timer_init(struct oprofile_operations *ops)
index c494613..b2f742f 100644 (file)
@@ -688,28 +688,20 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
        return 0;
 }
 
-static DEFINE_SPINLOCK(arm_pmu_lock);
-static LIST_HEAD(arm_pmu_list);
-
 /*
  * PMU hardware loses all context when a CPU goes offline.
  * When a CPU is hotplugged back in, since some hardware registers are
  * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
  * junk values out of them.
  */
-static int arm_perf_starting_cpu(unsigned int cpu)
+static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
 {
-       struct arm_pmu *pmu;
-
-       spin_lock(&arm_pmu_lock);
-       list_for_each_entry(pmu, &arm_pmu_list, entry) {
+       struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
 
-               if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
-                       continue;
-               if (pmu->reset)
-                       pmu->reset(pmu);
-       }
-       spin_unlock(&arm_pmu_lock);
+       if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+               return 0;
+       if (pmu->reset)
+               pmu->reset(pmu);
        return 0;
 }
 
@@ -821,9 +813,10 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
        if (!cpu_hw_events)
                return -ENOMEM;
 
-       spin_lock(&arm_pmu_lock);
-       list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
-       spin_unlock(&arm_pmu_lock);
+       err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+                                              &cpu_pmu->node);
+       if (err)
+               goto out_free;
 
        err = cpu_pm_pmu_register(cpu_pmu);
        if (err)
@@ -859,9 +852,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
        return 0;
 
 out_unregister:
-       spin_lock(&arm_pmu_lock);
-       list_del(&cpu_pmu->entry);
-       spin_unlock(&arm_pmu_lock);
+       cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+                                           &cpu_pmu->node);
+out_free:
        free_percpu(cpu_hw_events);
        return err;
 }
@@ -869,9 +862,8 @@ out_unregister:
 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 {
        cpu_pm_pmu_unregister(cpu_pmu);
-       spin_lock(&arm_pmu_lock);
-       list_del(&cpu_pmu->entry);
-       spin_unlock(&arm_pmu_lock);
+       cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+                                           &cpu_pmu->node);
        free_percpu(cpu_pmu->hw_events);
 }
 
@@ -1068,9 +1060,9 @@ static int arm_pmu_hp_init(void)
 {
        int ret;
 
-       ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING,
-                                       "AP_PERF_ARM_STARTING",
-                                       arm_perf_starting_cpu, NULL);
+       ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
+                                     "AP_PERF_ARM_STARTING",
+                                     arm_perf_starting_cpu, NULL);
        if (ret)
                pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
                       ret);
index 7dbbb29..deefab3 100644 (file)
@@ -107,8 +107,8 @@ struct virtio_scsi {
        /* If the affinity hint is set for virtqueues */
        bool affinity_hint_set;
 
-       /* CPU hotplug notifier */
-       struct notifier_block nb;
+       struct hlist_node node;
+       struct hlist_node node_dead;
 
        /* Protected by event_vq lock */
        bool stop_events;
@@ -118,6 +118,7 @@ struct virtio_scsi {
        struct virtio_scsi_vq req_vqs[];
 };
 
+static enum cpuhp_state virtioscsi_online;
 static struct kmem_cache *virtscsi_cmd_cache;
 static mempool_t *virtscsi_cmd_pool;
 
@@ -852,21 +853,33 @@ static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
        put_online_cpus();
 }
 
-static int virtscsi_cpu_callback(struct notifier_block *nfb,
-                                unsigned long action, void *hcpu)
+static int virtscsi_cpu_online(unsigned int cpu, struct hlist_node *node)
 {
-       struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
-       switch(action) {
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               __virtscsi_set_affinity(vscsi, true);
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_OK;
+       struct virtio_scsi *vscsi = hlist_entry_safe(node, struct virtio_scsi,
+                                                    node);
+       __virtscsi_set_affinity(vscsi, true);
+       return 0;
+}
+
+static int virtscsi_cpu_notif_add(struct virtio_scsi *vi)
+{
+       int ret;
+
+       ret = cpuhp_state_add_instance(virtioscsi_online, &vi->node);
+       if (ret)
+               return ret;
+
+       ret = cpuhp_state_add_instance(CPUHP_VIRT_SCSI_DEAD, &vi->node_dead);
+       if (ret)
+               cpuhp_state_remove_instance(virtioscsi_online, &vi->node);
+       return ret;
+}
+
+static void virtscsi_cpu_notif_remove(struct virtio_scsi *vi)
+{
+       cpuhp_state_remove_instance_nocalls(virtioscsi_online, &vi->node);
+       cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_SCSI_DEAD,
+                                           &vi->node_dead);
 }
 
 static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
@@ -929,8 +942,6 @@ static int virtscsi_init(struct virtio_device *vdev,
                virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
                                 vqs[i]);
 
-       virtscsi_set_affinity(vscsi, true);
-
        virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
        virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
 
@@ -987,12 +998,9 @@ static int virtscsi_probe(struct virtio_device *vdev)
        if (err)
                goto virtscsi_init_failed;
 
-       vscsi->nb.notifier_call = &virtscsi_cpu_callback;
-       err = register_hotcpu_notifier(&vscsi->nb);
-       if (err) {
-               pr_err("registering cpu notifier failed\n");
+       err = virtscsi_cpu_notif_add(vscsi);
+       if (err)
                goto scsi_add_host_failed;
-       }
 
        cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
        shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
@@ -1049,7 +1057,7 @@ static void virtscsi_remove(struct virtio_device *vdev)
 
        scsi_remove_host(shost);
 
-       unregister_hotcpu_notifier(&vscsi->nb);
+       virtscsi_cpu_notif_remove(vscsi);
 
        virtscsi_remove_vqs(vdev);
        scsi_host_put(shost);
@@ -1061,7 +1069,7 @@ static int virtscsi_freeze(struct virtio_device *vdev)
        struct Scsi_Host *sh = virtio_scsi_host(vdev);
        struct virtio_scsi *vscsi = shost_priv(sh);
 
-       unregister_hotcpu_notifier(&vscsi->nb);
+       virtscsi_cpu_notif_remove(vscsi);
        virtscsi_remove_vqs(vdev);
        return 0;
 }
@@ -1076,12 +1084,11 @@ static int virtscsi_restore(struct virtio_device *vdev)
        if (err)
                return err;
 
-       err = register_hotcpu_notifier(&vscsi->nb);
+       err = virtscsi_cpu_notif_add(vscsi);
        if (err) {
                vdev->config->del_vqs(vdev);
                return err;
        }
-
        virtio_device_ready(vdev);
 
        if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
@@ -1136,6 +1143,16 @@ static int __init init(void)
                pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
                goto error;
        }
+       ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+                                     "scsi/virtio:online",
+                                     virtscsi_cpu_online, NULL);
+       if (ret < 0)
+               goto error;
+       virtioscsi_online = ret;
+       ret = cpuhp_setup_state_multi(CPUHP_VIRT_SCSI_DEAD, "scsi/virtio:dead",
+                                     NULL, virtscsi_cpu_online);
+       if (ret)
+               goto error;
        ret = register_virtio_driver(&virtio_scsi_driver);
        if (ret < 0)
                goto error;
@@ -1151,12 +1168,17 @@ error:
                kmem_cache_destroy(virtscsi_cmd_cache);
                virtscsi_cmd_cache = NULL;
        }
+       if (virtioscsi_online)
+               cpuhp_remove_multi_state(virtioscsi_online);
+       cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD);
        return ret;
 }
 
 static void __exit fini(void)
 {
        unregister_virtio_driver(&virtio_scsi_driver);
+       cpuhp_remove_multi_state(virtioscsi_online);
+       cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD);
        mempool_destroy(virtscsi_cmd_pool);
        kmem_cache_destroy(virtscsi_cmd_cache);
 }
index bfe6b2e..f3db11c 100644 (file)
@@ -359,7 +359,7 @@ extern int acpi_processor_set_throttling(struct acpi_processor *pr,
  * onlined/offlined. In such case the flags.throttling will be updated.
  */
 extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
-                       unsigned long action);
+                       bool is_dead);
 extern const struct file_operations acpi_processor_throttling_fops;
 extern void acpi_processor_throttling_init(void);
 #else
@@ -380,7 +380,7 @@ static inline int acpi_processor_set_throttling(struct acpi_processor *pr,
 }
 
 static inline void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
-                       unsigned long action) {}
+                       bool is_dead) {}
 
 static inline void acpi_processor_throttling_init(void) {}
 #endif /* CONFIG_ACPI_CPU_FREQ_PSS */
index 797d9c8..6bf1992 100644 (file)
@@ -61,17 +61,8 @@ struct notifier_block;
 #define CPU_DOWN_PREPARE       0x0005 /* CPU (unsigned)v going down */
 #define CPU_DOWN_FAILED                0x0006 /* CPU (unsigned)v NOT going down */
 #define CPU_DEAD               0x0007 /* CPU (unsigned)v dead */
-#define CPU_DYING              0x0008 /* CPU (unsigned)v not running any task,
-                                       * not handling interrupts, soon dead.
-                                       * Called on the dying cpu, interrupts
-                                       * are already disabled. Must not
-                                       * sleep, must not fail */
 #define CPU_POST_DEAD          0x0009 /* CPU (unsigned)v dead, cpu_hotplug
                                        * lock is dropped */
-#define CPU_STARTING           0x000A /* CPU (unsigned)v soon running.
-                                       * Called on the new cpu, just before
-                                       * enabling interrupts. Must not sleep,
-                                       * must not fail */
 #define CPU_BROKEN             0x000B /* CPU (unsigned)v did not die properly,
                                        * perhaps due to preemption. */
 
@@ -86,9 +77,6 @@ struct notifier_block;
 #define CPU_DOWN_PREPARE_FROZEN        (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
 #define CPU_DEAD_FROZEN                (CPU_DEAD | CPU_TASKS_FROZEN)
-#define CPU_DYING_FROZEN       (CPU_DYING | CPU_TASKS_FROZEN)
-#define CPU_STARTING_FROZEN    (CPU_STARTING | CPU_TASKS_FROZEN)
-
 
 #ifdef CONFIG_SMP
 extern bool cpuhp_tasks_frozen;
index 2ac07d0..510205e 100644 (file)
@@ -14,6 +14,19 @@ enum cpuhp_state {
        CPUHP_PERF_SUPERH,
        CPUHP_X86_HPET_DEAD,
        CPUHP_X86_APB_DEAD,
+       CPUHP_VIRT_NET_DEAD,
+       CPUHP_SLUB_DEAD,
+       CPUHP_MM_WRITEBACK_DEAD,
+       CPUHP_SOFTIRQ_DEAD,
+       CPUHP_NET_MVNETA_DEAD,
+       CPUHP_CPUIDLE_DEAD,
+       CPUHP_ARM64_FPSIMD_DEAD,
+       CPUHP_ARM_OMAP_WAKE_DEAD,
+       CPUHP_IRQ_POLL_DEAD,
+       CPUHP_BLOCK_SOFTIRQ_DEAD,
+       CPUHP_VIRT_SCSI_DEAD,
+       CPUHP_ACPI_CPUDRV_DEAD,
+       CPUHP_S390_PFAULT_DEAD,
        CPUHP_BLK_MQ_DEAD,
        CPUHP_WORKQUEUE_PREP,
        CPUHP_POWER_NUMA_PREPARE,
@@ -21,10 +34,20 @@ enum cpuhp_state {
        CPUHP_PROFILE_PREPARE,
        CPUHP_X2APIC_PREPARE,
        CPUHP_SMPCFD_PREPARE,
+       CPUHP_RELAY_PREPARE,
+       CPUHP_SLAB_PREPARE,
+       CPUHP_MD_RAID5_PREPARE,
        CPUHP_RCUTREE_PREP,
+       CPUHP_CPUIDLE_COUPLED_PREPARE,
+       CPUHP_POWERPC_PMAC_PREPARE,
+       CPUHP_POWERPC_MMU_CTX_PREPARE,
        CPUHP_NOTIFY_PREPARE,
+       CPUHP_ARM_SHMOBILE_SCU_PREPARE,
+       CPUHP_SH_SH3X_PREPARE,
        CPUHP_BLK_MQ_PREPARE,
        CPUHP_TIMERS_DEAD,
+       CPUHP_NOTF_ERR_INJ_PREPARE,
+       CPUHP_MIPS_SOC_PREPARE,
        CPUHP_BRINGUP_CPU,
        CPUHP_AP_IDLE_DEAD,
        CPUHP_AP_OFFLINE,
@@ -70,7 +93,6 @@ enum cpuhp_state {
        CPUHP_AP_ARM64_ISNDEP_STARTING,
        CPUHP_AP_SMPCFD_DYING,
        CPUHP_AP_X86_TBOOT_DYING,
-       CPUHP_AP_NOTIFY_STARTING,
        CPUHP_AP_ONLINE,
        CPUHP_TEARDOWN_CPU,
        CPUHP_AP_ONLINE_IDLE,
index 113ee62..0f9e567 100644 (file)
@@ -151,7 +151,7 @@ struct parallel_data {
  * @flags: padata flags.
  */
 struct padata_instance {
-       struct notifier_block            cpu_notifier;
+       struct hlist_node                node;
        struct workqueue_struct         *wq;
        struct parallel_data            *pd;
        struct padata_cpumask           cpumask;
index e188438..4ad1b40 100644 (file)
@@ -109,7 +109,7 @@ struct arm_pmu {
        DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
        struct platform_device  *plat_device;
        struct pmu_hw_events    __percpu *hw_events;
-       struct list_head        entry;
+       struct hlist_node       node;
        struct notifier_block   cpu_pm_nb;
 };
 
index d7c8359..ecbb34a 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/fs.h>
 #include <linux/poll.h>
 #include <linux/kref.h>
+#include <linux/percpu.h>
 
 /*
  * Tracks changes to rchan/rchan_buf structs
@@ -63,7 +64,7 @@ struct rchan
        struct kref kref;               /* channel refcount */
        void *private_data;             /* for user-defined data */
        size_t last_toobig;             /* tried to log event > subbuf size */
-       struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
+       struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */
        int is_global;                  /* One global buffer ? */
        struct list_head list;          /* for channel list */
        struct dentry *parent;          /* parent dentry passed to open */
@@ -204,7 +205,7 @@ static inline void relay_write(struct rchan *chan,
        struct rchan_buf *buf;
 
        local_irq_save(flags);
-       buf = chan->buf[smp_processor_id()];
+       buf = *this_cpu_ptr(chan->buf);
        if (unlikely(buf->offset + length > chan->subbuf_size))
                length = relay_switch_subbuf(buf, length);
        memcpy(buf->data + buf->offset, data, length);
@@ -230,12 +231,12 @@ static inline void __relay_write(struct rchan *chan,
 {
        struct rchan_buf *buf;
 
-       buf = chan->buf[get_cpu()];
+       buf = *get_cpu_ptr(chan->buf);
        if (unlikely(buf->offset + length > buf->chan->subbuf_size))
                length = relay_switch_subbuf(buf, length);
        memcpy(buf->data + buf->offset, data, length);
        buf->offset += length;
-       put_cpu();
+       put_cpu_ptr(chan->buf);
 }
 
 /**
@@ -251,17 +252,19 @@ static inline void __relay_write(struct rchan *chan,
  */
 static inline void *relay_reserve(struct rchan *chan, size_t length)
 {
-       void *reserved;
-       struct rchan_buf *buf = chan->buf[smp_processor_id()];
+       void *reserved = NULL;
+       struct rchan_buf *buf = *get_cpu_ptr(chan->buf);
 
        if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
                length = relay_switch_subbuf(buf, length);
                if (!length)
-                       return NULL;
+                       goto end;
        }
        reserved = buf->data + buf->offset;
        buf->offset += length;
 
+end:
+       put_cpu_ptr(chan->buf);
        return reserved;
 }
 
@@ -285,5 +288,11 @@ static inline void subbuf_start_reserve(struct rchan_buf *buf,
  */
 extern const struct file_operations relay_file_operations;
 
+#ifdef CONFIG_RELAY
+int relay_prepare_cpu(unsigned int cpu);
+#else
+#define relay_prepare_cpu     NULL
+#endif
+
 #endif /* _LINUX_RELAY_H */
 
index 4293808..084b12b 100644 (file)
@@ -650,4 +650,12 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
 unsigned int kmem_cache_size(struct kmem_cache *s);
 void __init kmem_cache_init_late(void);
 
+#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
+int slab_prepare_cpu(unsigned int cpu);
+int slab_dead_cpu(unsigned int cpu);
+#else
+#define slab_prepare_cpu       NULL
+#define slab_dead_cpu          NULL
+#endif
+
 #endif /* _LINUX_SLAB_H */
index c506485..7c78387 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/tick.h>
 #include <linux/irq.h>
 #include <linux/smpboot.h>
+#include <linux/relay.h>
+#include <linux/slab.h>
 
 #include <trace/events/power.h>
 #define CREATE_TRACE_POINTS
@@ -73,15 +75,15 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
 struct cpuhp_step {
        const char              *name;
        union {
-               int             (*startup)(unsigned int cpu);
-               int             (*startup_multi)(unsigned int cpu,
-                                                struct hlist_node *node);
-       };
+               int             (*single)(unsigned int cpu);
+               int             (*multi)(unsigned int cpu,
+                                        struct hlist_node *node);
+       } startup;
        union {
-               int             (*teardown)(unsigned int cpu);
-               int             (*teardown_multi)(unsigned int cpu,
-                                                 struct hlist_node *node);
-       };
+               int             (*single)(unsigned int cpu);
+               int             (*multi)(unsigned int cpu,
+                                        struct hlist_node *node);
+       } teardown;
        struct hlist_head       list;
        bool                    skip_onerr;
        bool                    cant_stop;
@@ -127,7 +129,7 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
        int ret, cnt;
 
        if (!step->multi_instance) {
-               cb = bringup ? step->startup : step->teardown;
+               cb = bringup ? step->startup.single : step->teardown.single;
                if (!cb)
                        return 0;
                trace_cpuhp_enter(cpu, st->target, state, cb);
@@ -135,7 +137,7 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
                trace_cpuhp_exit(cpu, st->state, state, ret);
                return ret;
        }
-       cbm = bringup ? step->startup_multi : step->teardown_multi;
+       cbm = bringup ? step->startup.multi : step->teardown.multi;
        if (!cbm)
                return 0;
 
@@ -160,7 +162,7 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
        return 0;
 err:
        /* Rollback the instances if one failed */
-       cbm = !bringup ? step->startup_multi : step->teardown_multi;
+       cbm = !bringup ? step->startup.multi : step->teardown.multi;
        if (!cbm)
                return ret;
 
@@ -331,10 +333,17 @@ void cpu_hotplug_disable(void)
 }
 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
 
+static void __cpu_hotplug_enable(void)
+{
+       if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
+               return;
+       cpu_hotplug_disabled--;
+}
+
 void cpu_hotplug_enable(void)
 {
        cpu_maps_update_begin();
-       WARN_ON(--cpu_hotplug_disabled < 0);
+       __cpu_hotplug_enable();
        cpu_maps_update_done();
 }
 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
@@ -401,12 +410,6 @@ static int notify_online(unsigned int cpu)
        return 0;
 }
 
-static int notify_starting(unsigned int cpu)
-{
-       cpu_notify(CPU_STARTING, cpu);
-       return 0;
-}
-
 static int bringup_wait_for_ap(unsigned int cpu)
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
@@ -752,12 +755,6 @@ static int notify_down_prepare(unsigned int cpu)
        return err;
 }
 
-static int notify_dying(unsigned int cpu)
-{
-       cpu_notify(CPU_DYING, cpu);
-       return 0;
-}
-
 /* Take this CPU down. */
 static int take_cpu_down(void *_param)
 {
@@ -816,7 +813,7 @@ static int takedown_cpu(unsigned int cpu)
        BUG_ON(cpu_online(cpu));
 
        /*
-        * The migration_call() CPU_DYING callback will have removed all
+        * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
         * runnable tasks from the cpu, there's only the idle task left now
         * that the migration thread is done doing the stop_machine thing.
         *
@@ -869,7 +866,6 @@ void cpuhp_report_idle_dead(void)
 #define notify_down_prepare    NULL
 #define takedown_cpu           NULL
 #define notify_dead            NULL
-#define notify_dying           NULL
 #endif
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -959,10 +955,9 @@ EXPORT_SYMBOL(cpu_down);
 #endif /*CONFIG_HOTPLUG_CPU*/
 
 /**
- * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
+ * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
  * @cpu: cpu that just started
  *
- * This function calls the cpu_chain notifiers with CPU_STARTING.
  * It must be called by the arch code on the new cpu, before the new cpu
  * enables interrupts and before the "boot" cpu returns from __cpu_up().
  */
@@ -1160,7 +1155,7 @@ void enable_nonboot_cpus(void)
 
        /* Allow everyone to use the CPU hotplug again */
        cpu_maps_update_begin();
-       WARN_ON(--cpu_hotplug_disabled < 0);
+       __cpu_hotplug_enable();
        if (cpumask_empty(frozen_cpus))
                goto out;
 
@@ -1249,40 +1244,50 @@ core_initcall(cpu_hotplug_pm_sync_init);
 static struct cpuhp_step cpuhp_bp_states[] = {
        [CPUHP_OFFLINE] = {
                .name                   = "offline",
-               .startup                = NULL,
-               .teardown               = NULL,
+               .startup.single         = NULL,
+               .teardown.single        = NULL,
        },
 #ifdef CONFIG_SMP
        [CPUHP_CREATE_THREADS]= {
-               .name                   = "threads:create",
-               .startup                = smpboot_create_threads,
-               .teardown               = NULL,
+               .name                   = "threads:prepare",
+               .startup.single         = smpboot_create_threads,
+               .teardown.single        = NULL,
                .cant_stop              = true,
        },
        [CPUHP_PERF_PREPARE] = {
-               .name = "perf prepare",
-               .startup = perf_event_init_cpu,
-               .teardown = perf_event_exit_cpu,
+               .name                   = "perf:prepare",
+               .startup.single         = perf_event_init_cpu,
+               .teardown.single        = perf_event_exit_cpu,
        },
        [CPUHP_WORKQUEUE_PREP] = {
-               .name = "workqueue prepare",
-               .startup = workqueue_prepare_cpu,
-               .teardown = NULL,
+               .name                   = "workqueue:prepare",
+               .startup.single         = workqueue_prepare_cpu,
+               .teardown.single        = NULL,
        },
        [CPUHP_HRTIMERS_PREPARE] = {
-               .name = "hrtimers prepare",
-               .startup = hrtimers_prepare_cpu,
-               .teardown = hrtimers_dead_cpu,
+               .name                   = "hrtimers:prepare",
+               .startup.single         = hrtimers_prepare_cpu,
+               .teardown.single        = hrtimers_dead_cpu,
        },
        [CPUHP_SMPCFD_PREPARE] = {
-               .name = "SMPCFD prepare",
-               .startup = smpcfd_prepare_cpu,
-               .teardown = smpcfd_dead_cpu,
+               .name                   = "smpcfd:prepare",
+               .startup.single         = smpcfd_prepare_cpu,
+               .teardown.single        = smpcfd_dead_cpu,
+       },
+       [CPUHP_RELAY_PREPARE] = {
+               .name                   = "relay:prepare",
+               .startup.single         = relay_prepare_cpu,
+               .teardown.single        = NULL,
+       },
+       [CPUHP_SLAB_PREPARE] = {
+               .name                   = "slab:prepare",
+               .startup.single         = slab_prepare_cpu,
+               .teardown.single        = slab_dead_cpu,
        },
        [CPUHP_RCUTREE_PREP] = {
-               .name = "RCU-tree prepare",
-               .startup = rcutree_prepare_cpu,
-               .teardown = rcutree_dead_cpu,
+               .name                   = "RCU/tree:prepare",
+               .startup.single         = rcutree_prepare_cpu,
+               .teardown.single        = rcutree_dead_cpu,
        },
        /*
         * Preparatory and dead notifiers. Will be replaced once the notifiers
@@ -1290,8 +1295,8 @@ static struct cpuhp_step cpuhp_bp_states[] = {
         */
        [CPUHP_NOTIFY_PREPARE] = {
                .name                   = "notify:prepare",
-               .startup                = notify_prepare,
-               .teardown               = notify_dead,
+               .startup.single         = notify_prepare,
+               .teardown.single        = notify_dead,
                .skip_onerr             = true,
                .cant_stop              = true,
        },
@@ -1301,20 +1306,21 @@ static struct cpuhp_step cpuhp_bp_states[] = {
         * otherwise a RCU stall occurs.
         */
        [CPUHP_TIMERS_DEAD] = {
-               .name = "timers dead",
-               .startup = NULL,
-               .teardown = timers_dead_cpu,
+               .name                   = "timers:dead",
+               .startup.single         = NULL,
+               .teardown.single        = timers_dead_cpu,
        },
        /* Kicks the plugged cpu into life */
        [CPUHP_BRINGUP_CPU] = {
                .name                   = "cpu:bringup",
-               .startup                = bringup_cpu,
-               .teardown               = NULL,
+               .startup.single         = bringup_cpu,
+               .teardown.single        = NULL,
                .cant_stop              = true,
        },
        [CPUHP_AP_SMPCFD_DYING] = {
-               .startup = NULL,
-               .teardown = smpcfd_dying_cpu,
+               .name                   = "smpcfd:dying",
+               .startup.single         = NULL,
+               .teardown.single        = smpcfd_dying_cpu,
        },
        /*
         * Handled on controll processor until the plugged processor manages
@@ -1322,8 +1328,8 @@ static struct cpuhp_step cpuhp_bp_states[] = {
         */
        [CPUHP_TEARDOWN_CPU] = {
                .name                   = "cpu:teardown",
-               .startup                = NULL,
-               .teardown               = takedown_cpu,
+               .startup.single         = NULL,
+               .teardown.single        = takedown_cpu,
                .cant_stop              = true,
        },
 #else
@@ -1349,24 +1355,13 @@ static struct cpuhp_step cpuhp_ap_states[] = {
        /* First state is scheduler control. Interrupts are disabled */
        [CPUHP_AP_SCHED_STARTING] = {
                .name                   = "sched:starting",
-               .startup                = sched_cpu_starting,
-               .teardown               = sched_cpu_dying,
+               .startup.single         = sched_cpu_starting,
+               .teardown.single        = sched_cpu_dying,
        },
        [CPUHP_AP_RCUTREE_DYING] = {
-               .startup = NULL,
-               .teardown = rcutree_dying_cpu,
-       },
-       /*
-        * Low level startup/teardown notifiers. Run with interrupts
-        * disabled. Will be removed once the notifiers are converted to
-        * states.
-        */
-       [CPUHP_AP_NOTIFY_STARTING] = {
-               .name                   = "notify:starting",
-               .startup                = notify_starting,
-               .teardown               = notify_dying,
-               .skip_onerr             = true,
-               .cant_stop              = true,
+               .name                   = "RCU/tree:dying",
+               .startup.single         = NULL,
+               .teardown.single        = rcutree_dying_cpu,
        },
        /* Entry state on starting. Interrupts enabled from here on. Transient
         * state for synchronsization */
@@ -1375,24 +1370,24 @@ static struct cpuhp_step cpuhp_ap_states[] = {
        },
        /* Handle smpboot threads park/unpark */
        [CPUHP_AP_SMPBOOT_THREADS] = {
-               .name                   = "smpboot:threads",
-               .startup                = smpboot_unpark_threads,
-               .teardown               = NULL,
+               .name                   = "smpboot/threads:online",
+               .startup.single         = smpboot_unpark_threads,
+               .teardown.single        = NULL,
        },
        [CPUHP_AP_PERF_ONLINE] = {
-               .name = "perf online",
-               .startup = perf_event_init_cpu,
-               .teardown = perf_event_exit_cpu,
+               .name                   = "perf:online",
+               .startup.single         = perf_event_init_cpu,
+               .teardown.single        = perf_event_exit_cpu,
        },
        [CPUHP_AP_WORKQUEUE_ONLINE] = {
-               .name = "workqueue online",
-               .startup = workqueue_online_cpu,
-               .teardown = workqueue_offline_cpu,
+               .name                   = "workqueue:online",
+               .startup.single         = workqueue_online_cpu,
+               .teardown.single        = workqueue_offline_cpu,
        },
        [CPUHP_AP_RCUTREE_ONLINE] = {
-               .name = "RCU-tree online",
-               .startup = rcutree_online_cpu,
-               .teardown = rcutree_offline_cpu,
+               .name                   = "RCU/tree:online",
+               .startup.single         = rcutree_online_cpu,
+               .teardown.single        = rcutree_offline_cpu,
        },
 
        /*
@@ -1401,8 +1396,8 @@ static struct cpuhp_step cpuhp_ap_states[] = {
         */
        [CPUHP_AP_NOTIFY_ONLINE] = {
                .name                   = "notify:online",
-               .startup                = notify_online,
-               .teardown               = notify_down_prepare,
+               .startup.single         = notify_online,
+               .teardown.single        = notify_down_prepare,
                .skip_onerr             = true,
        },
 #endif
@@ -1414,16 +1409,16 @@ static struct cpuhp_step cpuhp_ap_states[] = {
        /* Last state is scheduler control setting the cpu active */
        [CPUHP_AP_ACTIVE] = {
                .name                   = "sched:active",
-               .startup                = sched_cpu_activate,
-               .teardown               = sched_cpu_deactivate,
+               .startup.single         = sched_cpu_activate,
+               .teardown.single        = sched_cpu_deactivate,
        },
 #endif
 
        /* CPU is fully up and running. */
        [CPUHP_ONLINE] = {
                .name                   = "online",
-               .startup                = NULL,
-               .teardown               = NULL,
+               .startup.single         = NULL,
+               .teardown.single        = NULL,
        },
 };
 
@@ -1446,8 +1441,8 @@ static void cpuhp_store_callbacks(enum cpuhp_state state,
 
        mutex_lock(&cpuhp_state_mutex);
        sp = cpuhp_get_step(state);
-       sp->startup = startup;
-       sp->teardown = teardown;
+       sp->startup.single = startup;
+       sp->teardown.single = teardown;
        sp->name = name;
        sp->multi_instance = multi_instance;
        INIT_HLIST_HEAD(&sp->list);
@@ -1456,7 +1451,7 @@ static void cpuhp_store_callbacks(enum cpuhp_state state,
 
 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
 {
-       return cpuhp_get_step(state)->teardown;
+       return cpuhp_get_step(state)->teardown.single;
 }
 
 /*
@@ -1469,7 +1464,8 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
        struct cpuhp_step *sp = cpuhp_get_step(state);
        int ret;
 
-       if ((bringup && !sp->startup) || (!bringup && !sp->teardown))
+       if ((bringup && !sp->startup.single) ||
+           (!bringup && !sp->teardown.single))
                return 0;
        /*
         * The non AP bound callbacks can fail on bringup. On teardown
@@ -1547,7 +1543,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
 
        get_online_cpus();
 
-       if (!invoke || !sp->startup_multi)
+       if (!invoke || !sp->startup.multi)
                goto add_node;
 
        /*
@@ -1563,7 +1559,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
 
                ret = cpuhp_issue_call(cpu, state, true, node);
                if (ret) {
-                       if (sp->teardown_multi)
+                       if (sp->teardown.multi)
                                cpuhp_rollback_install(cpu, state, node);
                        goto err;
                }
index 9932788..7848f05 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/sysfs.h>
 #include <linux/rcupdate.h>
+#include <linux/module.h>
 
 #define MAX_OBJ_NUM 1000
 
@@ -769,52 +770,43 @@ static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
                cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
 }
 
-
-static int padata_cpu_callback(struct notifier_block *nfb,
-                              unsigned long action, void *hcpu)
+static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
 {
-       int err;
        struct padata_instance *pinst;
-       int cpu = (unsigned long)hcpu;
+       int ret;
 
-       pinst = container_of(nfb, struct padata_instance, cpu_notifier);
+       pinst = hlist_entry_safe(node, struct padata_instance, node);
+       if (!pinst_has_cpu(pinst, cpu))
+               return 0;
 
-       switch (action) {
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-       case CPU_DOWN_FAILED:
-       case CPU_DOWN_FAILED_FROZEN:
-               if (!pinst_has_cpu(pinst, cpu))
-                       break;
-               mutex_lock(&pinst->lock);
-               err = __padata_add_cpu(pinst, cpu);
-               mutex_unlock(&pinst->lock);
-               if (err)
-                       return notifier_from_errno(err);
-               break;
+       mutex_lock(&pinst->lock);
+       ret = __padata_add_cpu(pinst, cpu);
+       mutex_unlock(&pinst->lock);
+       return ret;
+}
 
-       case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
-       case CPU_UP_CANCELED:
-       case CPU_UP_CANCELED_FROZEN:
-               if (!pinst_has_cpu(pinst, cpu))
-                       break;
-               mutex_lock(&pinst->lock);
-               err = __padata_remove_cpu(pinst, cpu);
-               mutex_unlock(&pinst->lock);
-               if (err)
-                       return notifier_from_errno(err);
-               break;
-       }
+static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node)
+{
+       struct padata_instance *pinst;
+       int ret;
+
+       pinst = hlist_entry_safe(node, struct padata_instance, node);
+       if (!pinst_has_cpu(pinst, cpu))
+               return 0;
 
-       return NOTIFY_OK;
+       mutex_lock(&pinst->lock);
+       ret = __padata_remove_cpu(pinst, cpu);
+       mutex_unlock(&pinst->lock);
+       return ret;
 }
+
+static enum cpuhp_state hp_online;
 #endif
 
 static void __padata_free(struct padata_instance *pinst)
 {
 #ifdef CONFIG_HOTPLUG_CPU
-       unregister_hotcpu_notifier(&pinst->cpu_notifier);
+       cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
 #endif
 
        padata_stop(pinst);
@@ -1012,11 +1004,8 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq,
        mutex_init(&pinst->lock);
 
 #ifdef CONFIG_HOTPLUG_CPU
-       pinst->cpu_notifier.notifier_call = padata_cpu_callback;
-       pinst->cpu_notifier.priority = 0;
-       register_hotcpu_notifier(&pinst->cpu_notifier);
+       cpuhp_state_add_instance_nocalls(hp_online, &pinst->node);
 #endif
-
        return pinst;
 
 err_free_masks:
@@ -1039,3 +1028,26 @@ void padata_free(struct padata_instance *pinst)
        kobject_put(&pinst->kobj);
 }
 EXPORT_SYMBOL(padata_free);
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static __init int padata_driver_init(void)
+{
+       int ret;
+
+       ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
+                                     padata_cpu_online,
+                                     padata_cpu_prep_down);
+       if (ret < 0)
+               return ret;
+       hp_online = ret;
+       return 0;
+}
+module_init(padata_driver_init);
+
+static __exit void padata_driver_exit(void)
+{
+       cpuhp_remove_multi_state(hp_online);
+}
+module_exit(padata_driver_exit);
+#endif
index d797502..fc9b4a4 100644 (file)
@@ -214,7 +214,7 @@ static void relay_destroy_buf(struct rchan_buf *buf)
                        __free_page(buf->page_array[i]);
                relay_free_page_array(buf->page_array);
        }
-       chan->buf[buf->cpu] = NULL;
+       *per_cpu_ptr(chan->buf, buf->cpu) = NULL;
        kfree(buf->padding);
        kfree(buf);
        kref_put(&chan->kref, relay_destroy_channel);
@@ -382,20 +382,21 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
  */
 void relay_reset(struct rchan *chan)
 {
+       struct rchan_buf *buf;
        unsigned int i;
 
        if (!chan)
                return;
 
-       if (chan->is_global && chan->buf[0]) {
-               __relay_reset(chan->buf[0], 0);
+       if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
+               __relay_reset(buf, 0);
                return;
        }
 
        mutex_lock(&relay_channels_mutex);
        for_each_possible_cpu(i)
-               if (chan->buf[i])
-                       __relay_reset(chan->buf[i], 0);
+               if ((buf = *per_cpu_ptr(chan->buf, i)))
+                       __relay_reset(buf, 0);
        mutex_unlock(&relay_channels_mutex);
 }
 EXPORT_SYMBOL_GPL(relay_reset);
@@ -440,7 +441,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
        struct dentry *dentry;
 
        if (chan->is_global)
-               return chan->buf[0];
+               return *per_cpu_ptr(chan->buf, 0);
 
        buf = relay_create_buf(chan);
        if (!buf)
@@ -464,7 +465,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
        __relay_reset(buf, 1);
 
        if(chan->is_global) {
-               chan->buf[0] = buf;
+               *per_cpu_ptr(chan->buf, 0) = buf;
                buf->cpu = 0;
        }
 
@@ -512,46 +513,25 @@ static void setup_callbacks(struct rchan *chan,
        chan->cb = cb;
 }
 
-/**
- *     relay_hotcpu_callback - CPU hotplug callback
- *     @nb: notifier block
- *     @action: hotplug action to take
- *     @hcpu: CPU number
- *
- *     Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
- */
-static int relay_hotcpu_callback(struct notifier_block *nb,
-                               unsigned long action,
-                               void *hcpu)
+int relay_prepare_cpu(unsigned int cpu)
 {
-       unsigned int hotcpu = (unsigned long)hcpu;
        struct rchan *chan;
+       struct rchan_buf *buf;
 
-       switch(action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               mutex_lock(&relay_channels_mutex);
-               list_for_each_entry(chan, &relay_channels, list) {
-                       if (chan->buf[hotcpu])
-                               continue;
-                       chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
-                       if(!chan->buf[hotcpu]) {
-                               printk(KERN_ERR
-                                       "relay_hotcpu_callback: cpu %d buffer "
-                                       "creation failed\n", hotcpu);
-                               mutex_unlock(&relay_channels_mutex);
-                               return notifier_from_errno(-ENOMEM);
-                       }
+       mutex_lock(&relay_channels_mutex);
+       list_for_each_entry(chan, &relay_channels, list) {
+               if ((buf = *per_cpu_ptr(chan->buf, cpu)))
+                       continue;
+               buf = relay_open_buf(chan, cpu);
+               if (!buf) {
+                       pr_err("relay: cpu %d buffer creation failed\n", cpu);
+                       mutex_unlock(&relay_channels_mutex);
+                       return -ENOMEM;
                }
-               mutex_unlock(&relay_channels_mutex);
-               break;
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               /* No need to flush the cpu : will be flushed upon
-                * final relay_flush() call. */
-               break;
+               *per_cpu_ptr(chan->buf, cpu) = buf;
        }
-       return NOTIFY_OK;
+       mutex_unlock(&relay_channels_mutex);
+       return 0;
 }
 
 /**
@@ -583,6 +563,7 @@ struct rchan *relay_open(const char *base_filename,
 {
        unsigned int i;
        struct rchan *chan;
+       struct rchan_buf *buf;
 
        if (!(subbuf_size && n_subbufs))
                return NULL;
@@ -593,6 +574,7 @@ struct rchan *relay_open(const char *base_filename,
        if (!chan)
                return NULL;
 
+       chan->buf = alloc_percpu(struct rchan_buf *);
        chan->version = RELAYFS_CHANNEL_VERSION;
        chan->n_subbufs = n_subbufs;
        chan->subbuf_size = subbuf_size;
@@ -608,9 +590,10 @@ struct rchan *relay_open(const char *base_filename,
 
        mutex_lock(&relay_channels_mutex);
        for_each_online_cpu(i) {
-               chan->buf[i] = relay_open_buf(chan, i);
-               if (!chan->buf[i])
+               buf = relay_open_buf(chan, i);
+               if (!buf)
                        goto free_bufs;
+               *per_cpu_ptr(chan->buf, i) = buf;
        }
        list_add(&chan->list, &relay_channels);
        mutex_unlock(&relay_channels_mutex);
@@ -619,8 +602,8 @@ struct rchan *relay_open(const char *base_filename,
 
 free_bufs:
        for_each_possible_cpu(i) {
-               if (chan->buf[i])
-                       relay_close_buf(chan->buf[i]);
+               if ((buf = *per_cpu_ptr(chan->buf, i)))
+                       relay_close_buf(buf);
        }
 
        kref_put(&chan->kref, relay_destroy_channel);
@@ -666,6 +649,7 @@ int relay_late_setup_files(struct rchan *chan,
        unsigned int i, curr_cpu;
        unsigned long flags;
        struct dentry *dentry;
+       struct rchan_buf *buf;
        struct rchan_percpu_buf_dispatcher disp;
 
        if (!chan || !base_filename)
@@ -684,10 +668,11 @@ int relay_late_setup_files(struct rchan *chan,
 
        if (chan->is_global) {
                err = -EINVAL;
-               if (!WARN_ON_ONCE(!chan->buf[0])) {
-                       dentry = relay_create_buf_file(chan, chan->buf[0], 0);
+               buf = *per_cpu_ptr(chan->buf, 0);
+               if (!WARN_ON_ONCE(!buf)) {
+                       dentry = relay_create_buf_file(chan, buf, 0);
                        if (dentry && !WARN_ON_ONCE(!chan->is_global)) {
-                               relay_set_buf_dentry(chan->buf[0], dentry);
+                               relay_set_buf_dentry(buf, dentry);
                                err = 0;
                        }
                }
@@ -702,13 +687,14 @@ int relay_late_setup_files(struct rchan *chan,
         * on all currently online CPUs.
         */
        for_each_online_cpu(i) {
-               if (unlikely(!chan->buf[i])) {
+               buf = *per_cpu_ptr(chan->buf, i);
+               if (unlikely(!buf)) {
                        WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
                        err = -EINVAL;
                        break;
                }
 
-               dentry = relay_create_buf_file(chan, chan->buf[i], i);
+               dentry = relay_create_buf_file(chan, buf, i);
                if (unlikely(!dentry)) {
                        err = -EINVAL;
                        break;
@@ -716,10 +702,10 @@ int relay_late_setup_files(struct rchan *chan,
 
                if (curr_cpu == i) {
                        local_irq_save(flags);
-                       relay_set_buf_dentry(chan->buf[i], dentry);
+                       relay_set_buf_dentry(buf, dentry);
                        local_irq_restore(flags);
                } else {
-                       disp.buf = chan->buf[i];
+                       disp.buf = buf;
                        disp.dentry = dentry;
                        smp_mb();
                        /* relay_channels_mutex must be held, so wait. */
@@ -822,11 +808,10 @@ void relay_subbufs_consumed(struct rchan *chan,
        if (!chan)
                return;
 
-       if (cpu >= NR_CPUS || !chan->buf[cpu] ||
-                                       subbufs_consumed > chan->n_subbufs)
+       buf = *per_cpu_ptr(chan->buf, cpu);
+       if (cpu >= NR_CPUS || !buf || subbufs_consumed > chan->n_subbufs)
                return;
 
-       buf = chan->buf[cpu];
        if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
                buf->subbufs_consumed = buf->subbufs_produced;
        else
@@ -842,18 +827,19 @@ EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
  */
 void relay_close(struct rchan *chan)
 {
+       struct rchan_buf *buf;
        unsigned int i;
 
        if (!chan)
                return;
 
        mutex_lock(&relay_channels_mutex);
-       if (chan->is_global && chan->buf[0])
-               relay_close_buf(chan->buf[0]);
+       if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0)))
+               relay_close_buf(buf);
        else
                for_each_possible_cpu(i)
-                       if (chan->buf[i])
-                               relay_close_buf(chan->buf[i]);
+                       if ((buf = *per_cpu_ptr(chan->buf, i)))
+                               relay_close_buf(buf);
 
        if (chan->last_toobig)
                printk(KERN_WARNING "relay: one or more items not logged "
@@ -874,20 +860,21 @@ EXPORT_SYMBOL_GPL(relay_close);
  */
 void relay_flush(struct rchan *chan)
 {
+       struct rchan_buf *buf;
        unsigned int i;
 
        if (!chan)
                return;
 
-       if (chan->is_global && chan->buf[0]) {
-               relay_switch_subbuf(chan->buf[0], 0);
+       if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
+               relay_switch_subbuf(buf, 0);
                return;
        }
 
        mutex_lock(&relay_channels_mutex);
        for_each_possible_cpu(i)
-               if (chan->buf[i])
-                       relay_switch_subbuf(chan->buf[i], 0);
+               if ((buf = *per_cpu_ptr(chan->buf, i)))
+                       relay_switch_subbuf(buf, 0);
        mutex_unlock(&relay_channels_mutex);
 }
 EXPORT_SYMBOL_GPL(relay_flush);
@@ -1377,12 +1364,3 @@ const struct file_operations relay_file_operations = {
        .splice_read    = relay_file_splice_read,
 };
 EXPORT_SYMBOL_GPL(relay_file_operations);
-
-static __init int relay_init(void)
-{
-
-       hotcpu_notifier(relay_hotcpu_callback, 0);
-       return 0;
-}
-
-early_initcall(relay_init);
index 17caf4b..c372114 100644 (file)
@@ -700,7 +700,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
        BUG();
 }
 
-static void takeover_tasklets(unsigned int cpu)
+static int takeover_tasklets(unsigned int cpu)
 {
        /* CPU is dead, so no lock needed. */
        local_irq_disable();
@@ -723,27 +723,12 @@ static void takeover_tasklets(unsigned int cpu)
        raise_softirq_irqoff(HI_SOFTIRQ);
 
        local_irq_enable();
+       return 0;
 }
+#else
+#define takeover_tasklets      NULL
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static int cpu_callback(struct notifier_block *nfb, unsigned long action,
-                       void *hcpu)
-{
-       switch (action) {
-#ifdef CONFIG_HOTPLUG_CPU
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               takeover_tasklets((unsigned long)hcpu);
-               break;
-#endif /* CONFIG_HOTPLUG_CPU */
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block cpu_nfb = {
-       .notifier_call = cpu_callback
-};
-
 static struct smp_hotplug_thread softirq_threads = {
        .store                  = &ksoftirqd,
        .thread_should_run      = ksoftirqd_should_run,
@@ -753,8 +738,8 @@ static struct smp_hotplug_thread softirq_threads = {
 
 static __init int spawn_ksoftirqd(void)
 {
-       register_cpu_notifier(&cpu_nfb);
-
+       cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
+                                 takeover_tasklets);
        BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
 
        return 0;
index 707ca24..0e2c9a1 100644 (file)
@@ -8,16 +8,47 @@ static int priority;
 module_param(priority, int, 0);
 MODULE_PARM_DESC(priority, "specify cpu notifier priority");
 
+#define UP_PREPARE 0
+#define UP_PREPARE_FROZEN 0
+#define DOWN_PREPARE 0
+#define DOWN_PREPARE_FROZEN 0
+
 static struct notifier_err_inject cpu_notifier_err_inject = {
        .actions = {
-               { NOTIFIER_ERR_INJECT_ACTION(CPU_UP_PREPARE) },
-               { NOTIFIER_ERR_INJECT_ACTION(CPU_UP_PREPARE_FROZEN) },
-               { NOTIFIER_ERR_INJECT_ACTION(CPU_DOWN_PREPARE) },
-               { NOTIFIER_ERR_INJECT_ACTION(CPU_DOWN_PREPARE_FROZEN) },
+               { NOTIFIER_ERR_INJECT_ACTION(UP_PREPARE) },
+               { NOTIFIER_ERR_INJECT_ACTION(UP_PREPARE_FROZEN) },
+               { NOTIFIER_ERR_INJECT_ACTION(DOWN_PREPARE) },
+               { NOTIFIER_ERR_INJECT_ACTION(DOWN_PREPARE_FROZEN) },
                {}
        }
 };
 
+static int notf_err_handle(struct notifier_err_inject_action *action)
+{
+       int ret;
+
+       ret = action->error;
+       if (ret)
+               pr_info("Injecting error (%d) to %s\n", ret, action->name);
+       return ret;
+}
+
+static int notf_err_inj_up_prepare(unsigned int cpu)
+{
+       if (!cpuhp_tasks_frozen)
+               return notf_err_handle(&cpu_notifier_err_inject.actions[0]);
+       else
+               return notf_err_handle(&cpu_notifier_err_inject.actions[1]);
+}
+
+static int notf_err_inj_dead(unsigned int cpu)
+{
+       if (!cpuhp_tasks_frozen)
+               return notf_err_handle(&cpu_notifier_err_inject.actions[2]);
+       else
+               return notf_err_handle(&cpu_notifier_err_inject.actions[3]);
+}
+
 static struct dentry *dir;
 
 static int err_inject_init(void)
@@ -29,7 +60,10 @@ static int err_inject_init(void)
        if (IS_ERR(dir))
                return PTR_ERR(dir);
 
-       err = register_hotcpu_notifier(&cpu_notifier_err_inject.nb);
+       err = cpuhp_setup_state_nocalls(CPUHP_NOTF_ERR_INJ_PREPARE,
+                                       "cpu-err-notif:prepare",
+                                       notf_err_inj_up_prepare,
+                                       notf_err_inj_dead);
        if (err)
                debugfs_remove_recursive(dir);
 
@@ -38,7 +72,7 @@ static int err_inject_init(void)
 
 static void err_inject_exit(void)
 {
-       unregister_hotcpu_notifier(&cpu_notifier_err_inject.nb);
+       cpuhp_remove_state_nocalls(CPUHP_NOTF_ERR_INJ_PREPARE);
        debugfs_remove_recursive(dir);
 }
 
index 836f7db..2be5569 100644 (file)
@@ -184,30 +184,21 @@ void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
 }
 EXPORT_SYMBOL(irq_poll_init);
 
-static int irq_poll_cpu_notify(struct notifier_block *self,
-                                unsigned long action, void *hcpu)
+static int irq_poll_cpu_dead(unsigned int cpu)
 {
        /*
         * If a CPU goes away, splice its entries to the current CPU
         * and trigger a run of the softirq
         */
-       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-               int cpu = (unsigned long) hcpu;
-
-               local_irq_disable();
-               list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
-                                this_cpu_ptr(&blk_cpu_iopoll));
-               __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
-               local_irq_enable();
-       }
+       local_irq_disable();
+       list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
+                        this_cpu_ptr(&blk_cpu_iopoll));
+       __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+       local_irq_enable();
 
-       return NOTIFY_OK;
+       return 0;
 }
 
-static struct notifier_block irq_poll_cpu_notifier = {
-       .notifier_call  = irq_poll_cpu_notify,
-};
-
 static __init int irq_poll_setup(void)
 {
        int i;
@@ -216,7 +207,8 @@ static __init int irq_poll_setup(void)
                INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
 
        open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);
-       register_hotcpu_notifier(&irq_poll_cpu_notifier);
+       cpuhp_setup_state_nocalls(CPUHP_IRQ_POLL_DEAD, "irq_poll:dead", NULL,
+                                 irq_poll_cpu_dead);
        return 0;
 }
 subsys_initcall(irq_poll_setup);
index f4cd7d8..28d6f36 100644 (file)
@@ -2080,26 +2080,12 @@ void writeback_set_ratelimit(void)
                ratelimit_pages = 16;
 }
 
-static int
-ratelimit_handler(struct notifier_block *self, unsigned long action,
-                 void *hcpu)
+static int page_writeback_cpu_online(unsigned int cpu)
 {
-
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_ONLINE:
-       case CPU_DEAD:
-               writeback_set_ratelimit();
-               return NOTIFY_OK;
-       default:
-               return NOTIFY_DONE;
-       }
+       writeback_set_ratelimit();
+       return 0;
 }
 
-static struct notifier_block ratelimit_nb = {
-       .notifier_call  = ratelimit_handler,
-       .next           = NULL,
-};
-
 /*
  * Called early on to tune the page writeback dirty limits.
  *
@@ -2122,8 +2108,10 @@ void __init page_writeback_init(void)
 {
        BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
 
-       writeback_set_ratelimit();
-       register_cpu_notifier(&ratelimit_nb);
+       cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
+                         page_writeback_cpu_online, NULL);
+       cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
+                         page_writeback_cpu_online);
 }
 
 /**
index b672710..090fb26 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -886,6 +886,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
        return 0;
 }
 
+#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
 /*
  * Allocates and initializes node for a node on each slab cache, used for
  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
@@ -908,6 +909,7 @@ static int init_cache_node_node(int node)
 
        return 0;
 }
+#endif
 
 static int setup_kmem_cache_node(struct kmem_cache *cachep,
                                int node, gfp_t gfp, bool force_change)
@@ -975,6 +977,8 @@ fail:
        return ret;
 }
 
+#ifdef CONFIG_SMP
+
 static void cpuup_canceled(long cpu)
 {
        struct kmem_cache *cachep;
@@ -1075,65 +1079,54 @@ bad:
        return -ENOMEM;
 }
 
-static int cpuup_callback(struct notifier_block *nfb,
-                                   unsigned long action, void *hcpu)
+int slab_prepare_cpu(unsigned int cpu)
 {
-       long cpu = (long)hcpu;
-       int err = 0;
+       int err;
 
-       switch (action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               mutex_lock(&slab_mutex);
-               err = cpuup_prepare(cpu);
-               mutex_unlock(&slab_mutex);
-               break;
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               start_cpu_timer(cpu);
-               break;
-#ifdef CONFIG_HOTPLUG_CPU
-       case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
-               /*
-                * Shutdown cache reaper. Note that the slab_mutex is
-                * held so that if cache_reap() is invoked it cannot do
-                * anything expensive but will only modify reap_work
-                * and reschedule the timer.
-               */
-               cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
-               /* Now the cache_reaper is guaranteed to be not running. */
-               per_cpu(slab_reap_work, cpu).work.func = NULL;
-               break;
-       case CPU_DOWN_FAILED:
-       case CPU_DOWN_FAILED_FROZEN:
-               start_cpu_timer(cpu);
-               break;
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               /*
-                * Even if all the cpus of a node are down, we don't free the
-                * kmem_cache_node of any cache. This to avoid a race between
-                * cpu_down, and a kmalloc allocation from another cpu for
-                * memory from the node of the cpu going down.  The node
-                * structure is usually allocated from kmem_cache_create() and
-                * gets destroyed at kmem_cache_destroy().
-                */
-               /* fall through */
+       mutex_lock(&slab_mutex);
+       err = cpuup_prepare(cpu);
+       mutex_unlock(&slab_mutex);
+       return err;
+}
+
+/*
+ * This is called for a failed online attempt and for a successful
+ * offline.
+ *
+ * Even if all the cpus of a node are down, we don't free the
+ * kmem_list3 of any cache. This to avoid a race between cpu_down, and
+ * a kmalloc allocation from another cpu for memory from the node of
+ * the cpu going down.  The list3 structure is usually allocated from
+ * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
+ */
+int slab_dead_cpu(unsigned int cpu)
+{
+       mutex_lock(&slab_mutex);
+       cpuup_canceled(cpu);
+       mutex_unlock(&slab_mutex);
+       return 0;
+}
 #endif
-       case CPU_UP_CANCELED:
-       case CPU_UP_CANCELED_FROZEN:
-               mutex_lock(&slab_mutex);
-               cpuup_canceled(cpu);
-               mutex_unlock(&slab_mutex);
-               break;
-       }
-       return notifier_from_errno(err);
+
+static int slab_online_cpu(unsigned int cpu)
+{
+       start_cpu_timer(cpu);
+       return 0;
 }
 
-static struct notifier_block cpucache_notifier = {
-       &cpuup_callback, NULL, 0
-};
+static int slab_offline_cpu(unsigned int cpu)
+{
+       /*
+        * Shutdown cache reaper. Note that the slab_mutex is held so
+        * that if cache_reap() is invoked it cannot do anything
+        * expensive but will only modify reap_work and reschedule the
+        * timer.
+        */
+       cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
+       /* Now the cache_reaper is guaranteed to be not running. */
+       per_cpu(slab_reap_work, cpu).work.func = NULL;
+       return 0;
+}
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
 /*
@@ -1336,12 +1329,6 @@ void __init kmem_cache_init_late(void)
        /* Done! */
        slab_state = FULL;
 
-       /*
-        * Register a cpu startup notifier callback that initializes
-        * cpu_cache_get for all new cpus
-        */
-       register_cpu_notifier(&cpucache_notifier);
-
 #ifdef CONFIG_NUMA
        /*
         * Register a memory hotplug callback that initializes and frees
@@ -1358,13 +1345,14 @@ void __init kmem_cache_init_late(void)
 
 static int __init cpucache_init(void)
 {
-       int cpu;
+       int ret;
 
        /*
         * Register the timers that return unneeded pages to the page allocator
         */
-       for_each_online_cpu(cpu)
-               start_cpu_timer(cpu);
+       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
+                               slab_online_cpu, slab_offline_cpu);
+       WARN_ON(ret < 0);
 
        /* Done! */
        slab_state = FULL;
index 9adae58..2b3e740 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -194,10 +194,6 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
 #define __OBJECT_POISON                0x80000000UL /* Poison object */
 #define __CMPXCHG_DOUBLE       0x40000000UL /* Use cmpxchg_double */
 
-#ifdef CONFIG_SMP
-static struct notifier_block slab_notifier;
-#endif
-
 /*
  * Tracking user of a slab.
  */
@@ -2304,6 +2300,25 @@ static void flush_all(struct kmem_cache *s)
        on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
 }
 
+/*
+ * Use the cpu notifier to insure that the cpu slabs are flushed when
+ * necessary.
+ */
+static int slub_cpu_dead(unsigned int cpu)
+{
+       struct kmem_cache *s;
+       unsigned long flags;
+
+       mutex_lock(&slab_mutex);
+       list_for_each_entry(s, &slab_caches, list) {
+               local_irq_save(flags);
+               __flush_cpu_slab(s, cpu);
+               local_irq_restore(flags);
+       }
+       mutex_unlock(&slab_mutex);
+       return 0;
+}
+
 /*
  * Check if the objects in a per cpu structure fit numa
  * locality expectations.
@@ -4144,9 +4159,8 @@ void __init kmem_cache_init(void)
        /* Setup random freelists for each cache */
        init_freelist_randomization();
 
-#ifdef CONFIG_SMP
-       register_cpu_notifier(&slab_notifier);
-#endif
+       cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
+                                 slub_cpu_dead);
 
        pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n",
                cache_line_size(),
@@ -4210,43 +4224,6 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
        return err;
 }
 
-#ifdef CONFIG_SMP
-/*
- * Use the cpu notifier to insure that the cpu slabs are flushed when
- * necessary.
- */
-static int slab_cpuup_callback(struct notifier_block *nfb,
-               unsigned long action, void *hcpu)
-{
-       long cpu = (long)hcpu;
-       struct kmem_cache *s;
-       unsigned long flags;
-
-       switch (action) {
-       case CPU_UP_CANCELED:
-       case CPU_UP_CANCELED_FROZEN:
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               mutex_lock(&slab_mutex);
-               list_for_each_entry(s, &slab_caches, list) {
-                       local_irq_save(flags);
-                       __flush_cpu_slab(s, cpu);
-                       local_irq_restore(flags);
-               }
-               mutex_unlock(&slab_mutex);
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block slab_notifier = {
-       .notifier_call = slab_cpuup_callback
-};
-
-#endif
-
 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
 {
        struct kmem_cache *s;
index 60a4045..7cf4121 100644 (file)
@@ -7,19 +7,8 @@
 #define CPU_DOWN_PREPARE       0x0005 /* CPU (unsigned)v going down */
 #define CPU_DOWN_FAILED                0x0006 /* CPU (unsigned)v NOT going down */
 #define CPU_DEAD               0x0007 /* CPU (unsigned)v dead */
-#define CPU_DYING              0x0008 /* CPU (unsigned)v not running any task,
-                                       * not handling interrupts, soon dead.
-                                       * Called on the dying cpu, interrupts
-                                       * are already disabled. Must not
-                                       * sleep, must not fail */
 #define CPU_POST_DEAD          0x0009 /* CPU (unsigned)v dead, cpu_hotplug
                                        * lock is dropped */
-#define CPU_STARTING           0x000A /* CPU (unsigned)v soon running.
-                                       * Called on the new cpu, just before
-                                       * enabling interrupts. Must not sleep,
-                                       * must not fail */
-#define CPU_DYING_IDLE         0x000B /* CPU (unsigned)v dying, reached
-                                       * idle loop. */
 #define CPU_BROKEN             0x000C /* CPU (unsigned)v did not die properly,
                                        * perhaps due to preemption. */
 #define CPU_TASKS_FROZEN       0x0010
@@ -30,5 +19,3 @@
 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
 #define CPU_DOWN_FAILED_FROZEN  (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
 #define CPU_DEAD_FROZEN                (CPU_DEAD | CPU_TASKS_FROZEN)
-#define CPU_DYING_FROZEN       (CPU_DYING | CPU_TASKS_FROZEN)
-#define CPU_STARTING_FROZEN    (CPU_STARTING | CPU_TASKS_FROZEN)