Merge branch 'x86/ptrace' into x86/tsc
authorIngo Molnar <mingo@elte.hu>
Tue, 23 Dec 2008 15:29:31 +0000 (16:29 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 23 Dec 2008 15:29:31 +0000 (16:29 +0100)
Conflicts:
arch/x86/kernel/cpu/intel.c

1  2 
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/ds.c

@@@ -41,16 -41,6 +41,16 @@@ static void __cpuinit early_init_intel(
        if (c->x86 == 15 && c->x86_cache_alignment == 64)
                c->x86_cache_alignment = 128;
  #endif
 +
 +      /*
 +       * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 +       * with P/T states and does not stop in deep C-states
 +       */
 +      if (c->x86_power & (1 << 8)) {
 +              set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 +              set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 +      }
 +
  }
  
  #ifdef CONFIG_X86_32
@@@ -252,13 -242,6 +252,13 @@@ static void __cpuinit init_intel(struc
  
        intel_workarounds(c);
  
 +      /*
 +       * Detect the extended topology information if available. This
 +       * will reinitialise the initial_apicid which will be used
 +       * in init_intel_cacheinfo()
 +       */
 +      detect_extended_topology(c);
 +
        l2 = init_intel_cacheinfo(c);
        if (c->cpuid_level > 9) {
                unsigned eax = cpuid_eax(10);
                set_cpu_cap(c, X86_FEATURE_P4);
        if (c->x86 == 6)
                set_cpu_cap(c, X86_FEATURE_P3);
+ #endif
  
        if (cpu_has_bts)
                ptrace_bts_init_intel(c);
  
- #endif
 -      detect_extended_topology(c);
        if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
                /*
                 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
diff --combined arch/x86/kernel/ds.c
@@@ -21,6 -21,8 +21,6 @@@
   */
  
  
 -#ifdef CONFIG_X86_DS
 -
  #include <asm/ds.h>
  
  #include <linux/errno.h>
@@@ -209,15 -211,14 +209,15 @@@ static DEFINE_PER_CPU(struct ds_contex
  static inline struct ds_context *ds_get_context(struct task_struct *task)
  {
        struct ds_context *context;
 +      unsigned long irq;
  
 -      spin_lock(&ds_lock);
 +      spin_lock_irqsave(&ds_lock, irq);
  
        context = (task ? task->thread.ds_ctx : this_system_context);
        if (context)
                context->count++;
  
 -      spin_unlock(&ds_lock);
 +      spin_unlock_irqrestore(&ds_lock, irq);
  
        return context;
  }
  /*
   * Same as ds_get_context, but allocates the context and it's DS
   * structure, if necessary; returns NULL; if out of memory.
 - *
 - * pre: requires ds_lock to be held
   */
  static inline struct ds_context *ds_alloc_context(struct task_struct *task)
  {
        struct ds_context **p_context =
                (task ? &task->thread.ds_ctx : &this_system_context);
        struct ds_context *context = *p_context;
 +      unsigned long irq;
  
        if (!context) {
                context = kzalloc(sizeof(*context), GFP_KERNEL);
 -
                if (!context)
                        return NULL;
  
                        return NULL;
                }
  
 -              *p_context = context;
 +              spin_lock_irqsave(&ds_lock, irq);
  
 -              context->this = p_context;
 -              context->task = task;
 +              if (*p_context) {
 +                      kfree(context->ds);
 +                      kfree(context);
 +
 +                      context = *p_context;
 +              } else {
 +                      *p_context = context;
  
 -              if (task)
 -                      set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
 +                      context->this = p_context;
 +                      context->task = task;
  
 -              if (!task || (task == current))
 -                      wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0);
 +                      if (task)
 +                              set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
  
 -              get_tracer(task);
 +                      if (!task || (task == current))
 +                              wrmsrl(MSR_IA32_DS_AREA,
 +                                     (unsigned long)context->ds);
 +              }
 +              spin_unlock_irqrestore(&ds_lock, irq);
        }
  
        context->count++;
   */
  static inline void ds_put_context(struct ds_context *context)
  {
 +      unsigned long irq;
 +
        if (!context)
                return;
  
 -      spin_lock(&ds_lock);
 +      spin_lock_irqsave(&ds_lock, irq);
  
        if (--context->count)
                goto out;
        kfree(context->ds);
        kfree(context);
   out:
 -      spin_unlock(&ds_lock);
 +      spin_unlock_irqrestore(&ds_lock, irq);
  }
  
  
@@@ -376,7 -368,6 +376,7 @@@ static int ds_request(struct task_struc
        struct ds_context *context;
        unsigned long buffer, adj;
        const unsigned long alignment = (1 << 3);
 +      unsigned long irq;
        int error = 0;
  
        if (!ds_cfg.sizeof_ds)
                return -EOPNOTSUPP;
  
  
 -      spin_lock(&ds_lock);
 -
 -      if (!check_tracer(task))
 -              return -EPERM;
 -
 -      error = -ENOMEM;
        context = ds_alloc_context(task);
        if (!context)
 +              return -ENOMEM;
 +
 +      spin_lock_irqsave(&ds_lock, irq);
 +
 +      error = -EPERM;
 +      if (!check_tracer(task))
                goto out_unlock;
  
 +      get_tracer(task);
 +
        error = -EALREADY;
        if (context->owner[qual] == current)
 -              goto out_unlock;
 +              goto out_put_tracer;
        error = -EPERM;
        if (context->owner[qual] != NULL)
 -              goto out_unlock;
 +              goto out_put_tracer;
        context->owner[qual] = current;
  
 -      spin_unlock(&ds_lock);
 +      spin_unlock_irqrestore(&ds_lock, irq);
  
  
        error = -ENOMEM;
   out_release:
        context->owner[qual] = NULL;
        ds_put_context(context);
 +      put_tracer(task);
 +      return error;
 +
 + out_put_tracer:
 +      spin_unlock_irqrestore(&ds_lock, irq);
 +      ds_put_context(context);
 +      put_tracer(task);
        return error;
  
   out_unlock:
 -      spin_unlock(&ds_lock);
 +      spin_unlock_irqrestore(&ds_lock, irq);
        ds_put_context(context);
        return error;
  }
@@@ -819,21 -801,13 +819,21 @@@ static const struct ds_configuration ds
        .sizeof_ds    = sizeof(long) * 12,
        .sizeof_field = sizeof(long),
        .sizeof_rec[ds_bts]   = sizeof(long) * 3,
 +#ifdef __i386__
        .sizeof_rec[ds_pebs]  = sizeof(long) * 10
 +#else
 +      .sizeof_rec[ds_pebs]  = sizeof(long) * 18
 +#endif
  };
  static const struct ds_configuration ds_cfg_64 = {
        .sizeof_ds    = 8 * 12,
        .sizeof_field = 8,
        .sizeof_rec[ds_bts]   = 8 * 3,
 +#ifdef __i386__
        .sizeof_rec[ds_pebs]  = 8 * 10
 +#else
 +      .sizeof_rec[ds_pebs]  = 8 * 18
 +#endif
  };
  
  static inline void
@@@ -847,17 -821,16 +847,16 @@@ void __cpuinit ds_init_intel(struct cpu
        switch (c->x86) {
        case 0x6:
                switch (c->x86_model) {
+               case 0 ... 0xC:
+                       /* sorry, don't know about them */
+                       break;
                case 0xD:
                case 0xE: /* Pentium M */
                        ds_configure(&ds_cfg_var);
                        break;
-               case 0xF: /* Core2 */
-               case 0x1C: /* Atom */
+               default: /* Core2, Atom, ... */
                        ds_configure(&ds_cfg_64);
                        break;
-               default:
-                       /* sorry, don't know about them */
-                       break;
                }
                break;
        case 0xF:
@@@ -887,3 -860,4 +886,3 @@@ void ds_free(struct ds_context *context
        while (leftovers--)
                ds_put_context(context);
  }
 -#endif /* CONFIG_X86_DS */