Merge tag 'parisc-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6
[cascardo/linux.git] / arch / x86 / kernel / cpu / perf_event_amd.c
1 #include <linux/perf_event.h>
2 #include <linux/export.h>
3 #include <linux/types.h>
4 #include <linux/init.h>
5 #include <linux/slab.h>
6 #include <asm/apicdef.h>
7
8 #include "perf_event.h"
9
10 static __initconst const u64 amd_hw_cache_event_ids
11                                 [PERF_COUNT_HW_CACHE_MAX]
12                                 [PERF_COUNT_HW_CACHE_OP_MAX]
13                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
14 {
15  [ C(L1D) ] = {
16         [ C(OP_READ) ] = {
17                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
18                 [ C(RESULT_MISS)   ] = 0x0141, /* Data Cache Misses          */
19         },
20         [ C(OP_WRITE) ] = {
21                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
22                 [ C(RESULT_MISS)   ] = 0,
23         },
24         [ C(OP_PREFETCH) ] = {
25                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
26                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
27         },
28  },
29  [ C(L1I ) ] = {
30         [ C(OP_READ) ] = {
31                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
32                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
33         },
34         [ C(OP_WRITE) ] = {
35                 [ C(RESULT_ACCESS) ] = -1,
36                 [ C(RESULT_MISS)   ] = -1,
37         },
38         [ C(OP_PREFETCH) ] = {
39                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
40                 [ C(RESULT_MISS)   ] = 0,
41         },
42  },
43  [ C(LL  ) ] = {
44         [ C(OP_READ) ] = {
45                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
46                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
47         },
48         [ C(OP_WRITE) ] = {
49                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
50                 [ C(RESULT_MISS)   ] = 0,
51         },
52         [ C(OP_PREFETCH) ] = {
53                 [ C(RESULT_ACCESS) ] = 0,
54                 [ C(RESULT_MISS)   ] = 0,
55         },
56  },
57  [ C(DTLB) ] = {
58         [ C(OP_READ) ] = {
59                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
60                 [ C(RESULT_MISS)   ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
61         },
62         [ C(OP_WRITE) ] = {
63                 [ C(RESULT_ACCESS) ] = 0,
64                 [ C(RESULT_MISS)   ] = 0,
65         },
66         [ C(OP_PREFETCH) ] = {
67                 [ C(RESULT_ACCESS) ] = 0,
68                 [ C(RESULT_MISS)   ] = 0,
69         },
70  },
71  [ C(ITLB) ] = {
72         [ C(OP_READ) ] = {
73                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
74                 [ C(RESULT_MISS)   ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
75         },
76         [ C(OP_WRITE) ] = {
77                 [ C(RESULT_ACCESS) ] = -1,
78                 [ C(RESULT_MISS)   ] = -1,
79         },
80         [ C(OP_PREFETCH) ] = {
81                 [ C(RESULT_ACCESS) ] = -1,
82                 [ C(RESULT_MISS)   ] = -1,
83         },
84  },
85  [ C(BPU ) ] = {
86         [ C(OP_READ) ] = {
87                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
88                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
89         },
90         [ C(OP_WRITE) ] = {
91                 [ C(RESULT_ACCESS) ] = -1,
92                 [ C(RESULT_MISS)   ] = -1,
93         },
94         [ C(OP_PREFETCH) ] = {
95                 [ C(RESULT_ACCESS) ] = -1,
96                 [ C(RESULT_MISS)   ] = -1,
97         },
98  },
99  [ C(NODE) ] = {
100         [ C(OP_READ) ] = {
101                 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
102                 [ C(RESULT_MISS)   ] = 0x98e9, /* CPU Request to Memory, r   */
103         },
104         [ C(OP_WRITE) ] = {
105                 [ C(RESULT_ACCESS) ] = -1,
106                 [ C(RESULT_MISS)   ] = -1,
107         },
108         [ C(OP_PREFETCH) ] = {
109                 [ C(RESULT_ACCESS) ] = -1,
110                 [ C(RESULT_MISS)   ] = -1,
111         },
112  },
113 };
114
115 /*
116  * AMD Performance Monitor K7 and later.
117  */
118 static const u64 amd_perfmon_event_map[] =
119 {
120   [PERF_COUNT_HW_CPU_CYCLES]                    = 0x0076,
121   [PERF_COUNT_HW_INSTRUCTIONS]                  = 0x00c0,
122   [PERF_COUNT_HW_CACHE_REFERENCES]              = 0x0080,
123   [PERF_COUNT_HW_CACHE_MISSES]                  = 0x0081,
124   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]           = 0x00c2,
125   [PERF_COUNT_HW_BRANCH_MISSES]                 = 0x00c3,
126   [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]       = 0x00d0, /* "Decoder empty" event */
127   [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]        = 0x00d1, /* "Dispatch stalls" event */
128 };
129
130 static u64 amd_pmu_event_map(int hw_event)
131 {
132         return amd_perfmon_event_map[hw_event];
133 }
134
135 static int amd_pmu_hw_config(struct perf_event *event)
136 {
137         int ret = x86_pmu_hw_config(event);
138
139         if (ret)
140                 return ret;
141
142         if (has_branch_stack(event))
143                 return -EOPNOTSUPP;
144
145         if (event->attr.exclude_host && event->attr.exclude_guest)
146                 /*
147                  * When HO == GO == 1 the hardware treats that as GO == HO == 0
148                  * and will count in both modes. We don't want to count in that
149                  * case so we emulate no-counting by setting US = OS = 0.
150                  */
151                 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
152                                       ARCH_PERFMON_EVENTSEL_OS);
153         else if (event->attr.exclude_host)
154                 event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
155         else if (event->attr.exclude_guest)
156                 event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
157
158         if (event->attr.type != PERF_TYPE_RAW)
159                 return 0;
160
161         event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
162
163         return 0;
164 }
165
166 /*
167  * AMD64 events are detected based on their event codes.
168  */
169 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
170 {
171         return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
172 }
173
174 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
175 {
176         return (hwc->config & 0xe0) == 0xe0;
177 }
178
179 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
180 {
181         struct amd_nb *nb = cpuc->amd_nb;
182
183         return nb && nb->nb_id != -1;
184 }
185
186 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
187                                       struct perf_event *event)
188 {
189         struct hw_perf_event *hwc = &event->hw;
190         struct amd_nb *nb = cpuc->amd_nb;
191         int i;
192
193         /*
194          * only care about NB events
195          */
196         if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
197                 return;
198
199         /*
200          * need to scan whole list because event may not have
201          * been assigned during scheduling
202          *
203          * no race condition possible because event can only
204          * be removed on one CPU at a time AND PMU is disabled
205          * when we come here
206          */
207         for (i = 0; i < x86_pmu.num_counters; i++) {
208                 if (nb->owners[i] == event) {
209                         cmpxchg(nb->owners+i, event, NULL);
210                         break;
211                 }
212         }
213 }
214
215  /*
216   * AMD64 NorthBridge events need special treatment because
217   * counter access needs to be synchronized across all cores
218   * of a package. Refer to BKDG section 3.12
219   *
220   * NB events are events measuring L3 cache, Hypertransport
221   * traffic. They are identified by an event code >= 0xe00.
222   * They measure events on the NorthBride which is shared
223   * by all cores on a package. NB events are counted on a
224   * shared set of counters. When a NB event is programmed
225   * in a counter, the data actually comes from a shared
226   * counter. Thus, access to those counters needs to be
227   * synchronized.
228   *
229   * We implement the synchronization such that no two cores
230   * can be measuring NB events using the same counters. Thus,
231   * we maintain a per-NB allocation table. The available slot
232   * is propagated using the event_constraint structure.
233   *
234   * We provide only one choice for each NB event based on
235   * the fact that only NB events have restrictions. Consequently,
236   * if a counter is available, there is a guarantee the NB event
237   * will be assigned to it. If no slot is available, an empty
238   * constraint is returned and scheduling will eventually fail
239   * for this event.
240   *
241   * Note that all cores attached the same NB compete for the same
242   * counters to host NB events, this is why we use atomic ops. Some
243   * multi-chip CPUs may have more than one NB.
244   *
245   * Given that resources are allocated (cmpxchg), they must be
246   * eventually freed for others to use. This is accomplished by
247   * calling amd_put_event_constraints().
248   *
249   * Non NB events are not impacted by this restriction.
250   */
251 static struct event_constraint *
252 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
253 {
254         struct hw_perf_event *hwc = &event->hw;
255         struct amd_nb *nb = cpuc->amd_nb;
256         struct perf_event *old = NULL;
257         int max = x86_pmu.num_counters;
258         int i, j, k = -1;
259
260         /*
261          * if not NB event or no NB, then no constraints
262          */
263         if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
264                 return &unconstrained;
265
266         /*
267          * detect if already present, if so reuse
268          *
269          * cannot merge with actual allocation
270          * because of possible holes
271          *
272          * event can already be present yet not assigned (in hwc->idx)
273          * because of successive calls to x86_schedule_events() from
274          * hw_perf_group_sched_in() without hw_perf_enable()
275          */
276         for (i = 0; i < max; i++) {
277                 /*
278                  * keep track of first free slot
279                  */
280                 if (k == -1 && !nb->owners[i])
281                         k = i;
282
283                 /* already present, reuse */
284                 if (nb->owners[i] == event)
285                         goto done;
286         }
287         /*
288          * not present, so grab a new slot
289          * starting either at:
290          */
291         if (hwc->idx != -1) {
292                 /* previous assignment */
293                 i = hwc->idx;
294         } else if (k != -1) {
295                 /* start from free slot found */
296                 i = k;
297         } else {
298                 /*
299                  * event not found, no slot found in
300                  * first pass, try again from the
301                  * beginning
302                  */
303                 i = 0;
304         }
305         j = i;
306         do {
307                 old = cmpxchg(nb->owners+i, NULL, event);
308                 if (!old)
309                         break;
310                 if (++i == max)
311                         i = 0;
312         } while (i != j);
313 done:
314         if (!old)
315                 return &nb->event_constraints[i];
316
317         return &emptyconstraint;
318 }
319
320 static struct amd_nb *amd_alloc_nb(int cpu)
321 {
322         struct amd_nb *nb;
323         int i;
324
325         nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
326                           cpu_to_node(cpu));
327         if (!nb)
328                 return NULL;
329
330         nb->nb_id = -1;
331
332         /*
333          * initialize all possible NB constraints
334          */
335         for (i = 0; i < x86_pmu.num_counters; i++) {
336                 __set_bit(i, nb->event_constraints[i].idxmsk);
337                 nb->event_constraints[i].weight = 1;
338         }
339         return nb;
340 }
341
342 static int amd_pmu_cpu_prepare(int cpu)
343 {
344         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
345
346         WARN_ON_ONCE(cpuc->amd_nb);
347
348         if (boot_cpu_data.x86_max_cores < 2)
349                 return NOTIFY_OK;
350
351         cpuc->amd_nb = amd_alloc_nb(cpu);
352         if (!cpuc->amd_nb)
353                 return NOTIFY_BAD;
354
355         return NOTIFY_OK;
356 }
357
358 static void amd_pmu_cpu_starting(int cpu)
359 {
360         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
361         struct amd_nb *nb;
362         int i, nb_id;
363
364         cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
365
366         if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
367                 return;
368
369         nb_id = amd_get_nb_id(cpu);
370         WARN_ON_ONCE(nb_id == BAD_APICID);
371
372         for_each_online_cpu(i) {
373                 nb = per_cpu(cpu_hw_events, i).amd_nb;
374                 if (WARN_ON_ONCE(!nb))
375                         continue;
376
377                 if (nb->nb_id == nb_id) {
378                         cpuc->kfree_on_online = cpuc->amd_nb;
379                         cpuc->amd_nb = nb;
380                         break;
381                 }
382         }
383
384         cpuc->amd_nb->nb_id = nb_id;
385         cpuc->amd_nb->refcnt++;
386 }
387
388 static void amd_pmu_cpu_dead(int cpu)
389 {
390         struct cpu_hw_events *cpuhw;
391
392         if (boot_cpu_data.x86_max_cores < 2)
393                 return;
394
395         cpuhw = &per_cpu(cpu_hw_events, cpu);
396
397         if (cpuhw->amd_nb) {
398                 struct amd_nb *nb = cpuhw->amd_nb;
399
400                 if (nb->nb_id == -1 || --nb->refcnt == 0)
401                         kfree(nb);
402
403                 cpuhw->amd_nb = NULL;
404         }
405 }
406
407 static __initconst const struct x86_pmu amd_pmu = {
408         .name                   = "AMD",
409         .handle_irq             = x86_pmu_handle_irq,
410         .disable_all            = x86_pmu_disable_all,
411         .enable_all             = x86_pmu_enable_all,
412         .enable                 = x86_pmu_enable_event,
413         .disable                = x86_pmu_disable_event,
414         .hw_config              = amd_pmu_hw_config,
415         .schedule_events        = x86_schedule_events,
416         .eventsel               = MSR_K7_EVNTSEL0,
417         .perfctr                = MSR_K7_PERFCTR0,
418         .event_map              = amd_pmu_event_map,
419         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
420         .num_counters           = AMD64_NUM_COUNTERS,
421         .cntval_bits            = 48,
422         .cntval_mask            = (1ULL << 48) - 1,
423         .apic                   = 1,
424         /* use highest bit to detect overflow */
425         .max_period             = (1ULL << 47) - 1,
426         .get_event_constraints  = amd_get_event_constraints,
427         .put_event_constraints  = amd_put_event_constraints,
428
429         .cpu_prepare            = amd_pmu_cpu_prepare,
430         .cpu_starting           = amd_pmu_cpu_starting,
431         .cpu_dead               = amd_pmu_cpu_dead,
432 };
433
434 /* AMD Family 15h */
435
436 #define AMD_EVENT_TYPE_MASK     0x000000F0ULL
437
438 #define AMD_EVENT_FP            0x00000000ULL ... 0x00000010ULL
439 #define AMD_EVENT_LS            0x00000020ULL ... 0x00000030ULL
440 #define AMD_EVENT_DC            0x00000040ULL ... 0x00000050ULL
441 #define AMD_EVENT_CU            0x00000060ULL ... 0x00000070ULL
442 #define AMD_EVENT_IC_DE         0x00000080ULL ... 0x00000090ULL
443 #define AMD_EVENT_EX_LS         0x000000C0ULL
444 #define AMD_EVENT_DE            0x000000D0ULL
445 #define AMD_EVENT_NB            0x000000E0ULL ... 0x000000F0ULL
446
447 /*
448  * AMD family 15h event code/PMC mappings:
449  *
450  * type = event_code & 0x0F0:
451  *
452  * 0x000        FP      PERF_CTL[5:3]
453  * 0x010        FP      PERF_CTL[5:3]
454  * 0x020        LS      PERF_CTL[5:0]
455  * 0x030        LS      PERF_CTL[5:0]
456  * 0x040        DC      PERF_CTL[5:0]
457  * 0x050        DC      PERF_CTL[5:0]
458  * 0x060        CU      PERF_CTL[2:0]
459  * 0x070        CU      PERF_CTL[2:0]
460  * 0x080        IC/DE   PERF_CTL[2:0]
461  * 0x090        IC/DE   PERF_CTL[2:0]
462  * 0x0A0        ---
463  * 0x0B0        ---
464  * 0x0C0        EX/LS   PERF_CTL[5:0]
465  * 0x0D0        DE      PERF_CTL[2:0]
466  * 0x0E0        NB      NB_PERF_CTL[3:0]
467  * 0x0F0        NB      NB_PERF_CTL[3:0]
468  *
469  * Exceptions:
470  *
471  * 0x000        FP      PERF_CTL[3], PERF_CTL[5:3] (*)
472  * 0x003        FP      PERF_CTL[3]
473  * 0x004        FP      PERF_CTL[3], PERF_CTL[5:3] (*)
474  * 0x00B        FP      PERF_CTL[3]
475  * 0x00D        FP      PERF_CTL[3]
476  * 0x023        DE      PERF_CTL[2:0]
477  * 0x02D        LS      PERF_CTL[3]
478  * 0x02E        LS      PERF_CTL[3,0]
479  * 0x043        CU      PERF_CTL[2:0]
480  * 0x045        CU      PERF_CTL[2:0]
481  * 0x046        CU      PERF_CTL[2:0]
482  * 0x054        CU      PERF_CTL[2:0]
483  * 0x055        CU      PERF_CTL[2:0]
484  * 0x08F        IC      PERF_CTL[0]
485  * 0x187        DE      PERF_CTL[0]
486  * 0x188        DE      PERF_CTL[0]
487  * 0x0DB        EX      PERF_CTL[5:0]
488  * 0x0DC        LS      PERF_CTL[5:0]
489  * 0x0DD        LS      PERF_CTL[5:0]
490  * 0x0DE        LS      PERF_CTL[5:0]
491  * 0x0DF        LS      PERF_CTL[5:0]
492  * 0x1D6        EX      PERF_CTL[5:0]
493  * 0x1D8        EX      PERF_CTL[5:0]
494  *
495  * (*) depending on the umask all FPU counters may be used
496  */
497
498 static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
499 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
500 static struct event_constraint amd_f15_PMC3  = EVENT_CONSTRAINT(0, 0x08, 0);
501 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
502 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
503 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
504
505 static struct event_constraint *
506 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
507 {
508         struct hw_perf_event *hwc = &event->hw;
509         unsigned int event_code = amd_get_event_code(hwc);
510
511         switch (event_code & AMD_EVENT_TYPE_MASK) {
512         case AMD_EVENT_FP:
513                 switch (event_code) {
514                 case 0x000:
515                         if (!(hwc->config & 0x0000F000ULL))
516                                 break;
517                         if (!(hwc->config & 0x00000F00ULL))
518                                 break;
519                         return &amd_f15_PMC3;
520                 case 0x004:
521                         if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
522                                 break;
523                         return &amd_f15_PMC3;
524                 case 0x003:
525                 case 0x00B:
526                 case 0x00D:
527                         return &amd_f15_PMC3;
528                 }
529                 return &amd_f15_PMC53;
530         case AMD_EVENT_LS:
531         case AMD_EVENT_DC:
532         case AMD_EVENT_EX_LS:
533                 switch (event_code) {
534                 case 0x023:
535                 case 0x043:
536                 case 0x045:
537                 case 0x046:
538                 case 0x054:
539                 case 0x055:
540                         return &amd_f15_PMC20;
541                 case 0x02D:
542                         return &amd_f15_PMC3;
543                 case 0x02E:
544                         return &amd_f15_PMC30;
545                 default:
546                         return &amd_f15_PMC50;
547                 }
548         case AMD_EVENT_CU:
549         case AMD_EVENT_IC_DE:
550         case AMD_EVENT_DE:
551                 switch (event_code) {
552                 case 0x08F:
553                 case 0x187:
554                 case 0x188:
555                         return &amd_f15_PMC0;
556                 case 0x0DB ... 0x0DF:
557                 case 0x1D6:
558                 case 0x1D8:
559                         return &amd_f15_PMC50;
560                 default:
561                         return &amd_f15_PMC20;
562                 }
563         case AMD_EVENT_NB:
564                 /* not yet implemented */
565                 return &emptyconstraint;
566         default:
567                 return &emptyconstraint;
568         }
569 }
570
571 static __initconst const struct x86_pmu amd_pmu_f15h = {
572         .name                   = "AMD Family 15h",
573         .handle_irq             = x86_pmu_handle_irq,
574         .disable_all            = x86_pmu_disable_all,
575         .enable_all             = x86_pmu_enable_all,
576         .enable                 = x86_pmu_enable_event,
577         .disable                = x86_pmu_disable_event,
578         .hw_config              = amd_pmu_hw_config,
579         .schedule_events        = x86_schedule_events,
580         .eventsel               = MSR_F15H_PERF_CTL,
581         .perfctr                = MSR_F15H_PERF_CTR,
582         .event_map              = amd_pmu_event_map,
583         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
584         .num_counters           = AMD64_NUM_COUNTERS_F15H,
585         .cntval_bits            = 48,
586         .cntval_mask            = (1ULL << 48) - 1,
587         .apic                   = 1,
588         /* use highest bit to detect overflow */
589         .max_period             = (1ULL << 47) - 1,
590         .get_event_constraints  = amd_get_event_constraints_f15h,
591         /* nortbridge counters not yet implemented: */
592 #if 0
593         .put_event_constraints  = amd_put_event_constraints,
594
595         .cpu_prepare            = amd_pmu_cpu_prepare,
596         .cpu_dead               = amd_pmu_cpu_dead,
597 #endif
598         .cpu_starting           = amd_pmu_cpu_starting,
599 };
600
601 __init int amd_pmu_init(void)
602 {
603         /* Performance-monitoring supported from K7 and later: */
604         if (boot_cpu_data.x86 < 6)
605                 return -ENODEV;
606
607         /*
608          * If core performance counter extensions exists, it must be
609          * family 15h, otherwise fail. See x86_pmu_addr_offset().
610          */
611         switch (boot_cpu_data.x86) {
612         case 0x15:
613                 if (!cpu_has_perfctr_core)
614                         return -ENODEV;
615                 x86_pmu = amd_pmu_f15h;
616                 break;
617         default:
618                 if (cpu_has_perfctr_core)
619                         return -ENODEV;
620                 x86_pmu = amd_pmu;
621                 break;
622         }
623
624         /* Events are common for all AMDs */
625         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
626                sizeof(hw_cache_event_ids));
627
628         return 0;
629 }
630
631 void amd_pmu_enable_virt(void)
632 {
633         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
634
635         cpuc->perf_ctr_virt_mask = 0;
636
637         /* Reload all events */
638         x86_pmu_disable_all();
639         x86_pmu_enable_all(0);
640 }
641 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
642
643 void amd_pmu_disable_virt(void)
644 {
645         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
646
647         /*
648          * We only mask out the Host-only bit so that host-only counting works
649          * when SVM is disabled. If someone sets up a guest-only counter when
650          * SVM is disabled the Guest-only bits still gets set and the counter
651          * will not count anything.
652          */
653         cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
654
655         /* Reload all events */
656         x86_pmu_disable_all();
657         x86_pmu_enable_all(0);
658 }
659 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);