perf/x86: Move perf_event_intel_uncore_snbep.c => x86/events/intel/uncore_snbep.c
[cascardo/linux.git] / arch / x86 / events / intel / uncore_snbep.c
1 /* SandyBridge-EP/IvyTown uncore support */
2 #include "uncore.h"
3
4 /* SNB-EP Box level control */
5 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
6 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
7 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
8 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
9 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
10                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
11                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
12 /* SNB-EP event control */
13 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
14 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
15 #define SNBEP_PMON_CTL_RST              (1 << 17)
16 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
17 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
18 #define SNBEP_PMON_CTL_EN               (1 << 22)
19 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
20 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
21 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
22                                          SNBEP_PMON_CTL_UMASK_MASK | \
23                                          SNBEP_PMON_CTL_EDGE_DET | \
24                                          SNBEP_PMON_CTL_INVERT | \
25                                          SNBEP_PMON_CTL_TRESH_MASK)
26
27 /* SNB-EP Ubox event control */
28 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
29 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
30                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
31                                  SNBEP_PMON_CTL_UMASK_MASK | \
32                                  SNBEP_PMON_CTL_EDGE_DET | \
33                                  SNBEP_PMON_CTL_INVERT | \
34                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
35
36 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
37 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
38                                                  SNBEP_CBO_PMON_CTL_TID_EN)
39
40 /* SNB-EP PCU event control */
41 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
42 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
43 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
44 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
45 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
46                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
47                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
48                                  SNBEP_PMON_CTL_EDGE_DET | \
49                                  SNBEP_PMON_CTL_EV_SEL_EXT | \
50                                  SNBEP_PMON_CTL_INVERT | \
51                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
52                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
53                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
54
55 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
56                                 (SNBEP_PMON_RAW_EVENT_MASK | \
57                                  SNBEP_PMON_CTL_EV_SEL_EXT)
58
59 /* SNB-EP pci control register */
60 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
61 #define SNBEP_PCI_PMON_CTL0                     0xd8
62 /* SNB-EP pci counter register */
63 #define SNBEP_PCI_PMON_CTR0                     0xa0
64
65 /* SNB-EP home agent register */
66 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
67 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
68 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
69 /* SNB-EP memory controller register */
70 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
71 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
72 /* SNB-EP QPI register */
73 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
74 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
75 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
76 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
77
78 /* SNB-EP Ubox register */
79 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
80 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
81
82 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
83 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
84
85 /* SNB-EP Cbo register */
86 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
87 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
88 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
89 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
90 #define SNBEP_CBO_MSR_OFFSET                    0x20
91
92 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
93 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
94 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
95 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
96
97 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
98         .event = (e),                           \
99         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
100         .config_mask = (m),                     \
101         .idx = (i)                              \
102 }
103
104 /* SNB-EP PCU register */
105 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
106 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
107 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
108 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
109 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
110 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
111 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
112
113 /* IVBEP event control */
114 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
115                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
116 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
117                                          SNBEP_PMON_CTL_UMASK_MASK | \
118                                          SNBEP_PMON_CTL_EDGE_DET | \
119                                          SNBEP_PMON_CTL_TRESH_MASK)
120 /* IVBEP Ubox */
121 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
122 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
123 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
124
125 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
126                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
127                                  SNBEP_PMON_CTL_UMASK_MASK | \
128                                  SNBEP_PMON_CTL_EDGE_DET | \
129                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
130 /* IVBEP Cbo */
131 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
132                                                  SNBEP_CBO_PMON_CTL_TID_EN)
133
134 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
135 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
136 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
137 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
142
143 /* IVBEP home agent */
144 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
145 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
146                                 (IVBEP_PMON_RAW_EVENT_MASK | \
147                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
148 /* IVBEP PCU */
149 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
150                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
151                                  SNBEP_PMON_CTL_EV_SEL_EXT | \
152                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
153                                  SNBEP_PMON_CTL_EDGE_DET | \
154                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
155                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
156                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
157 /* IVBEP QPI */
158 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
159                                 (IVBEP_PMON_RAW_EVENT_MASK | \
160                                  SNBEP_PMON_CTL_EV_SEL_EXT)
161
162 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
163                                 ((1ULL << (n)) - 1)))
164
165 /* Haswell-EP Ubox */
166 #define HSWEP_U_MSR_PMON_CTR0                   0x709
167 #define HSWEP_U_MSR_PMON_CTL0                   0x705
168 #define HSWEP_U_MSR_PMON_FILTER                 0x707
169
170 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
171 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
172
173 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
174 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
175 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
176                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
177                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
178
179 /* Haswell-EP CBo */
180 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
181 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
182 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
183 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
184 #define HSWEP_CBO_MSR_OFFSET                    0x10
185
186
187 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
188 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
189 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
195
196
197 /* Haswell-EP Sbox */
198 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
199 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
200 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
201 #define HSWEP_SBOX_MSR_OFFSET                   0xa
202 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
203                                                  SNBEP_CBO_PMON_CTL_TID_EN)
204
205 /* Haswell-EP PCU */
206 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
207 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
208 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
209 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
210
211 /* KNL Ubox */
212 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
213                                         (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
214                                                 SNBEP_CBO_PMON_CTL_TID_EN)
215 /* KNL CHA */
216 #define KNL_CHA_MSR_OFFSET                      0xc
217 #define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
218 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
219                                         (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
220                                          KNL_CHA_MSR_PMON_CTL_QOR)
221 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
222 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
223 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
224
225 /* KNL EDC/MC UCLK */
226 #define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
227 #define KNL_UCLK_MSR_PMON_CTL0                  0x420
228 #define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
229 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
230 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
231 #define KNL_PMON_FIXED_CTL_EN                   0x1
232
233 /* KNL EDC */
234 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
235 #define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
236 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
237 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
238 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
239
240 /* KNL MC */
241 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
242 #define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
243 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
244 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
245 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
246
247 /* KNL IRP */
248 #define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
249 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
250                                                  KNL_CHA_MSR_PMON_CTL_QOR)
251 /* KNL PCU */
252 #define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
253 #define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
254 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
255 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
256                                 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
257                                  KNL_PCU_PMON_CTL_USE_OCC_CTR | \
258                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
259                                  SNBEP_PMON_CTL_EDGE_DET | \
260                                  SNBEP_CBO_PMON_CTL_TID_EN | \
261                                  SNBEP_PMON_CTL_EV_SEL_EXT | \
262                                  SNBEP_PMON_CTL_INVERT | \
263                                  KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
264                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
265                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
266
267 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
268 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
269 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
270 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
271 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
272 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
273 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
274 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
275 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
276 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
277 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
278 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
279 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
280 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
281 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
282 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
283 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
284 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
285 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
286 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
287 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
288 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
289 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
290 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
291 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
292 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
293 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
294 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
295 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
296 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
297 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
298 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
299 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
300 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
301 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
302 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
303 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
304 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
305 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
306 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
307 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
308 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
309 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
310 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
311 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
312 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
313 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
314 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
315 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
316 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
317 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
318 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
319 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
320 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
321 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
322 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
323 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
324 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
325 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
326 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
327 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
328
329 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
330 {
331         struct pci_dev *pdev = box->pci_dev;
332         int box_ctl = uncore_pci_box_ctl(box);
333         u32 config = 0;
334
335         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
336                 config |= SNBEP_PMON_BOX_CTL_FRZ;
337                 pci_write_config_dword(pdev, box_ctl, config);
338         }
339 }
340
341 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
342 {
343         struct pci_dev *pdev = box->pci_dev;
344         int box_ctl = uncore_pci_box_ctl(box);
345         u32 config = 0;
346
347         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
348                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
349                 pci_write_config_dword(pdev, box_ctl, config);
350         }
351 }
352
353 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
354 {
355         struct pci_dev *pdev = box->pci_dev;
356         struct hw_perf_event *hwc = &event->hw;
357
358         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
359 }
360
361 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
362 {
363         struct pci_dev *pdev = box->pci_dev;
364         struct hw_perf_event *hwc = &event->hw;
365
366         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
367 }
368
369 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
370 {
371         struct pci_dev *pdev = box->pci_dev;
372         struct hw_perf_event *hwc = &event->hw;
373         u64 count = 0;
374
375         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
376         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
377
378         return count;
379 }
380
381 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
382 {
383         struct pci_dev *pdev = box->pci_dev;
384         int box_ctl = uncore_pci_box_ctl(box);
385
386         pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
387 }
388
389 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
390 {
391         u64 config;
392         unsigned msr;
393
394         msr = uncore_msr_box_ctl(box);
395         if (msr) {
396                 rdmsrl(msr, config);
397                 config |= SNBEP_PMON_BOX_CTL_FRZ;
398                 wrmsrl(msr, config);
399         }
400 }
401
402 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
403 {
404         u64 config;
405         unsigned msr;
406
407         msr = uncore_msr_box_ctl(box);
408         if (msr) {
409                 rdmsrl(msr, config);
410                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
411                 wrmsrl(msr, config);
412         }
413 }
414
415 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
416 {
417         struct hw_perf_event *hwc = &event->hw;
418         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
419
420         if (reg1->idx != EXTRA_REG_NONE)
421                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
422
423         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
424 }
425
426 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
427                                         struct perf_event *event)
428 {
429         struct hw_perf_event *hwc = &event->hw;
430
431         wrmsrl(hwc->config_base, hwc->config);
432 }
433
434 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
435 {
436         unsigned msr = uncore_msr_box_ctl(box);
437
438         if (msr)
439                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
440 }
441
442 static struct attribute *snbep_uncore_formats_attr[] = {
443         &format_attr_event.attr,
444         &format_attr_umask.attr,
445         &format_attr_edge.attr,
446         &format_attr_inv.attr,
447         &format_attr_thresh8.attr,
448         NULL,
449 };
450
451 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
452         &format_attr_event.attr,
453         &format_attr_umask.attr,
454         &format_attr_edge.attr,
455         &format_attr_inv.attr,
456         &format_attr_thresh5.attr,
457         NULL,
458 };
459
460 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
461         &format_attr_event.attr,
462         &format_attr_umask.attr,
463         &format_attr_edge.attr,
464         &format_attr_tid_en.attr,
465         &format_attr_inv.attr,
466         &format_attr_thresh8.attr,
467         &format_attr_filter_tid.attr,
468         &format_attr_filter_nid.attr,
469         &format_attr_filter_state.attr,
470         &format_attr_filter_opc.attr,
471         NULL,
472 };
473
474 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
475         &format_attr_event_ext.attr,
476         &format_attr_occ_sel.attr,
477         &format_attr_edge.attr,
478         &format_attr_inv.attr,
479         &format_attr_thresh5.attr,
480         &format_attr_occ_invert.attr,
481         &format_attr_occ_edge.attr,
482         &format_attr_filter_band0.attr,
483         &format_attr_filter_band1.attr,
484         &format_attr_filter_band2.attr,
485         &format_attr_filter_band3.attr,
486         NULL,
487 };
488
489 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
490         &format_attr_event_ext.attr,
491         &format_attr_umask.attr,
492         &format_attr_edge.attr,
493         &format_attr_inv.attr,
494         &format_attr_thresh8.attr,
495         &format_attr_match_rds.attr,
496         &format_attr_match_rnid30.attr,
497         &format_attr_match_rnid4.attr,
498         &format_attr_match_dnid.attr,
499         &format_attr_match_mc.attr,
500         &format_attr_match_opc.attr,
501         &format_attr_match_vnw.attr,
502         &format_attr_match0.attr,
503         &format_attr_match1.attr,
504         &format_attr_mask_rds.attr,
505         &format_attr_mask_rnid30.attr,
506         &format_attr_mask_rnid4.attr,
507         &format_attr_mask_dnid.attr,
508         &format_attr_mask_mc.attr,
509         &format_attr_mask_opc.attr,
510         &format_attr_mask_vnw.attr,
511         &format_attr_mask0.attr,
512         &format_attr_mask1.attr,
513         NULL,
514 };
515
516 static struct uncore_event_desc snbep_uncore_imc_events[] = {
517         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
518         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
519         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
520         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
521         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
522         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
523         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
524         { /* end: all zeroes */ },
525 };
526
527 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
528         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
529         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
530         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
531         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
532         { /* end: all zeroes */ },
533 };
534
535 static struct attribute_group snbep_uncore_format_group = {
536         .name = "format",
537         .attrs = snbep_uncore_formats_attr,
538 };
539
540 static struct attribute_group snbep_uncore_ubox_format_group = {
541         .name = "format",
542         .attrs = snbep_uncore_ubox_formats_attr,
543 };
544
545 static struct attribute_group snbep_uncore_cbox_format_group = {
546         .name = "format",
547         .attrs = snbep_uncore_cbox_formats_attr,
548 };
549
550 static struct attribute_group snbep_uncore_pcu_format_group = {
551         .name = "format",
552         .attrs = snbep_uncore_pcu_formats_attr,
553 };
554
555 static struct attribute_group snbep_uncore_qpi_format_group = {
556         .name = "format",
557         .attrs = snbep_uncore_qpi_formats_attr,
558 };
559
560 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
561         .disable_box    = snbep_uncore_msr_disable_box,         \
562         .enable_box     = snbep_uncore_msr_enable_box,          \
563         .disable_event  = snbep_uncore_msr_disable_event,       \
564         .enable_event   = snbep_uncore_msr_enable_event,        \
565         .read_counter   = uncore_msr_read_counter
566
567 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
568         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
569         .init_box       = snbep_uncore_msr_init_box             \
570
571 static struct intel_uncore_ops snbep_uncore_msr_ops = {
572         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
573 };
574
575 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
576         .init_box       = snbep_uncore_pci_init_box,            \
577         .disable_box    = snbep_uncore_pci_disable_box,         \
578         .enable_box     = snbep_uncore_pci_enable_box,          \
579         .disable_event  = snbep_uncore_pci_disable_event,       \
580         .read_counter   = snbep_uncore_pci_read_counter
581
582 static struct intel_uncore_ops snbep_uncore_pci_ops = {
583         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
584         .enable_event   = snbep_uncore_pci_enable_event,        \
585 };
586
587 static struct event_constraint snbep_uncore_cbox_constraints[] = {
588         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
589         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
590         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
591         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
592         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
593         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
594         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
595         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
596         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
597         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
598         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
599         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
600         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
601         EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
602         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
603         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
604         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
605         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
606         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
607         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
608         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
609         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
610         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
611         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
612         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
613         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
614         EVENT_CONSTRAINT_END
615 };
616
617 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
618         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
619         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
620         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
621         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
622         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
623         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
624         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
625         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
626         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
627         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
628         EVENT_CONSTRAINT_END
629 };
630
631 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
632         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
633         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
634         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
635         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
636         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
637         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
638         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
639         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
640         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
641         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
642         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
643         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
644         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
645         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
646         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
647         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
648         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
649         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
650         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
651         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
652         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
653         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
654         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
655         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
656         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
657         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
658         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
659         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
660         EVENT_CONSTRAINT_END
661 };
662
663 static struct intel_uncore_type snbep_uncore_ubox = {
664         .name           = "ubox",
665         .num_counters   = 2,
666         .num_boxes      = 1,
667         .perf_ctr_bits  = 44,
668         .fixed_ctr_bits = 48,
669         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
670         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
671         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
672         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
673         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
674         .ops            = &snbep_uncore_msr_ops,
675         .format_group   = &snbep_uncore_ubox_format_group,
676 };
677
678 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
679         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
680                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
681         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
682         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
683         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
684         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
685         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
686         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
687         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
688         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
689         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
690         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
691         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
692         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
693         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
694         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
695         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
696         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
697         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
698         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
699         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
700         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
701         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
702         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
703         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
704         EVENT_EXTRA_END
705 };
706
707 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
708 {
709         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
710         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
711         int i;
712
713         if (uncore_box_is_fake(box))
714                 return;
715
716         for (i = 0; i < 5; i++) {
717                 if (reg1->alloc & (0x1 << i))
718                         atomic_sub(1 << (i * 6), &er->ref);
719         }
720         reg1->alloc = 0;
721 }
722
723 static struct event_constraint *
724 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
725                             u64 (*cbox_filter_mask)(int fields))
726 {
727         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
728         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
729         int i, alloc = 0;
730         unsigned long flags;
731         u64 mask;
732
733         if (reg1->idx == EXTRA_REG_NONE)
734                 return NULL;
735
736         raw_spin_lock_irqsave(&er->lock, flags);
737         for (i = 0; i < 5; i++) {
738                 if (!(reg1->idx & (0x1 << i)))
739                         continue;
740                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
741                         continue;
742
743                 mask = cbox_filter_mask(0x1 << i);
744                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
745                     !((reg1->config ^ er->config) & mask)) {
746                         atomic_add(1 << (i * 6), &er->ref);
747                         er->config &= ~mask;
748                         er->config |= reg1->config & mask;
749                         alloc |= (0x1 << i);
750                 } else {
751                         break;
752                 }
753         }
754         raw_spin_unlock_irqrestore(&er->lock, flags);
755         if (i < 5)
756                 goto fail;
757
758         if (!uncore_box_is_fake(box))
759                 reg1->alloc |= alloc;
760
761         return NULL;
762 fail:
763         for (; i >= 0; i--) {
764                 if (alloc & (0x1 << i))
765                         atomic_sub(1 << (i * 6), &er->ref);
766         }
767         return &uncore_constraint_empty;
768 }
769
770 static u64 snbep_cbox_filter_mask(int fields)
771 {
772         u64 mask = 0;
773
774         if (fields & 0x1)
775                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
776         if (fields & 0x2)
777                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
778         if (fields & 0x4)
779                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
780         if (fields & 0x8)
781                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
782
783         return mask;
784 }
785
786 static struct event_constraint *
787 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
788 {
789         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
790 }
791
792 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
793 {
794         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
795         struct extra_reg *er;
796         int idx = 0;
797
798         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
799                 if (er->event != (event->hw.config & er->config_mask))
800                         continue;
801                 idx |= er->idx;
802         }
803
804         if (idx) {
805                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
806                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
807                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
808                 reg1->idx = idx;
809         }
810         return 0;
811 }
812
813 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
814         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
815         .hw_config              = snbep_cbox_hw_config,
816         .get_constraint         = snbep_cbox_get_constraint,
817         .put_constraint         = snbep_cbox_put_constraint,
818 };
819
820 static struct intel_uncore_type snbep_uncore_cbox = {
821         .name                   = "cbox",
822         .num_counters           = 4,
823         .num_boxes              = 8,
824         .perf_ctr_bits          = 44,
825         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
826         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
827         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
828         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
829         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
830         .num_shared_regs        = 1,
831         .constraints            = snbep_uncore_cbox_constraints,
832         .ops                    = &snbep_uncore_cbox_ops,
833         .format_group           = &snbep_uncore_cbox_format_group,
834 };
835
836 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
837 {
838         struct hw_perf_event *hwc = &event->hw;
839         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
840         u64 config = reg1->config;
841
842         if (new_idx > reg1->idx)
843                 config <<= 8 * (new_idx - reg1->idx);
844         else
845                 config >>= 8 * (reg1->idx - new_idx);
846
847         if (modify) {
848                 hwc->config += new_idx - reg1->idx;
849                 reg1->config = config;
850                 reg1->idx = new_idx;
851         }
852         return config;
853 }
854
855 static struct event_constraint *
856 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
857 {
858         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
859         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
860         unsigned long flags;
861         int idx = reg1->idx;
862         u64 mask, config1 = reg1->config;
863         bool ok = false;
864
865         if (reg1->idx == EXTRA_REG_NONE ||
866             (!uncore_box_is_fake(box) && reg1->alloc))
867                 return NULL;
868 again:
869         mask = 0xffULL << (idx * 8);
870         raw_spin_lock_irqsave(&er->lock, flags);
871         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
872             !((config1 ^ er->config) & mask)) {
873                 atomic_add(1 << (idx * 8), &er->ref);
874                 er->config &= ~mask;
875                 er->config |= config1 & mask;
876                 ok = true;
877         }
878         raw_spin_unlock_irqrestore(&er->lock, flags);
879
880         if (!ok) {
881                 idx = (idx + 1) % 4;
882                 if (idx != reg1->idx) {
883                         config1 = snbep_pcu_alter_er(event, idx, false);
884                         goto again;
885                 }
886                 return &uncore_constraint_empty;
887         }
888
889         if (!uncore_box_is_fake(box)) {
890                 if (idx != reg1->idx)
891                         snbep_pcu_alter_er(event, idx, true);
892                 reg1->alloc = 1;
893         }
894         return NULL;
895 }
896
897 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
898 {
899         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
900         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
901
902         if (uncore_box_is_fake(box) || !reg1->alloc)
903                 return;
904
905         atomic_sub(1 << (reg1->idx * 8), &er->ref);
906         reg1->alloc = 0;
907 }
908
909 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
910 {
911         struct hw_perf_event *hwc = &event->hw;
912         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
913         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
914
915         if (ev_sel >= 0xb && ev_sel <= 0xe) {
916                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
917                 reg1->idx = ev_sel - 0xb;
918                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
919         }
920         return 0;
921 }
922
923 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
924         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
925         .hw_config              = snbep_pcu_hw_config,
926         .get_constraint         = snbep_pcu_get_constraint,
927         .put_constraint         = snbep_pcu_put_constraint,
928 };
929
930 static struct intel_uncore_type snbep_uncore_pcu = {
931         .name                   = "pcu",
932         .num_counters           = 4,
933         .num_boxes              = 1,
934         .perf_ctr_bits          = 48,
935         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
936         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
937         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
938         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
939         .num_shared_regs        = 1,
940         .ops                    = &snbep_uncore_pcu_ops,
941         .format_group           = &snbep_uncore_pcu_format_group,
942 };
943
944 static struct intel_uncore_type *snbep_msr_uncores[] = {
945         &snbep_uncore_ubox,
946         &snbep_uncore_cbox,
947         &snbep_uncore_pcu,
948         NULL,
949 };
950
951 void snbep_uncore_cpu_init(void)
952 {
953         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
954                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
955         uncore_msr_uncores = snbep_msr_uncores;
956 }
957
958 enum {
959         SNBEP_PCI_QPI_PORT0_FILTER,
960         SNBEP_PCI_QPI_PORT1_FILTER,
961         HSWEP_PCI_PCU_3,
962 };
963
964 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
965 {
966         struct hw_perf_event *hwc = &event->hw;
967         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
968         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
969
970         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
971                 reg1->idx = 0;
972                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
973                 reg1->config = event->attr.config1;
974                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
975                 reg2->config = event->attr.config2;
976         }
977         return 0;
978 }
979
980 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
981 {
982         struct pci_dev *pdev = box->pci_dev;
983         struct hw_perf_event *hwc = &event->hw;
984         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
985         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
986
987         if (reg1->idx != EXTRA_REG_NONE) {
988                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
989                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx];
990                 if (filter_pdev) {
991                         pci_write_config_dword(filter_pdev, reg1->reg,
992                                                 (u32)reg1->config);
993                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
994                                                 (u32)(reg1->config >> 32));
995                         pci_write_config_dword(filter_pdev, reg2->reg,
996                                                 (u32)reg2->config);
997                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
998                                                 (u32)(reg2->config >> 32));
999                 }
1000         }
1001
1002         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1003 }
1004
1005 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1006         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1007         .enable_event           = snbep_qpi_enable_event,
1008         .hw_config              = snbep_qpi_hw_config,
1009         .get_constraint         = uncore_get_constraint,
1010         .put_constraint         = uncore_put_constraint,
1011 };
1012
1013 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1014         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1015         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1016         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1017         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1018         .ops            = &snbep_uncore_pci_ops,                \
1019         .format_group   = &snbep_uncore_format_group
1020
1021 static struct intel_uncore_type snbep_uncore_ha = {
1022         .name           = "ha",
1023         .num_counters   = 4,
1024         .num_boxes      = 1,
1025         .perf_ctr_bits  = 48,
1026         SNBEP_UNCORE_PCI_COMMON_INIT(),
1027 };
1028
1029 static struct intel_uncore_type snbep_uncore_imc = {
1030         .name           = "imc",
1031         .num_counters   = 4,
1032         .num_boxes      = 4,
1033         .perf_ctr_bits  = 48,
1034         .fixed_ctr_bits = 48,
1035         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1036         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1037         .event_descs    = snbep_uncore_imc_events,
1038         SNBEP_UNCORE_PCI_COMMON_INIT(),
1039 };
1040
1041 static struct intel_uncore_type snbep_uncore_qpi = {
1042         .name                   = "qpi",
1043         .num_counters           = 4,
1044         .num_boxes              = 2,
1045         .perf_ctr_bits          = 48,
1046         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1047         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1048         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1049         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1050         .num_shared_regs        = 1,
1051         .ops                    = &snbep_uncore_qpi_ops,
1052         .event_descs            = snbep_uncore_qpi_events,
1053         .format_group           = &snbep_uncore_qpi_format_group,
1054 };
1055
1056
1057 static struct intel_uncore_type snbep_uncore_r2pcie = {
1058         .name           = "r2pcie",
1059         .num_counters   = 4,
1060         .num_boxes      = 1,
1061         .perf_ctr_bits  = 44,
1062         .constraints    = snbep_uncore_r2pcie_constraints,
1063         SNBEP_UNCORE_PCI_COMMON_INIT(),
1064 };
1065
1066 static struct intel_uncore_type snbep_uncore_r3qpi = {
1067         .name           = "r3qpi",
1068         .num_counters   = 3,
1069         .num_boxes      = 2,
1070         .perf_ctr_bits  = 44,
1071         .constraints    = snbep_uncore_r3qpi_constraints,
1072         SNBEP_UNCORE_PCI_COMMON_INIT(),
1073 };
1074
1075 enum {
1076         SNBEP_PCI_UNCORE_HA,
1077         SNBEP_PCI_UNCORE_IMC,
1078         SNBEP_PCI_UNCORE_QPI,
1079         SNBEP_PCI_UNCORE_R2PCIE,
1080         SNBEP_PCI_UNCORE_R3QPI,
1081 };
1082
1083 static struct intel_uncore_type *snbep_pci_uncores[] = {
1084         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1085         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1086         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1087         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1088         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1089         NULL,
1090 };
1091
1092 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1093         { /* Home Agent */
1094                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1095                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1096         },
1097         { /* MC Channel 0 */
1098                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1099                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1100         },
1101         { /* MC Channel 1 */
1102                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1103                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1104         },
1105         { /* MC Channel 2 */
1106                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1107                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1108         },
1109         { /* MC Channel 3 */
1110                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1111                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1112         },
1113         { /* QPI Port 0 */
1114                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1115                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1116         },
1117         { /* QPI Port 1 */
1118                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1119                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1120         },
1121         { /* R2PCIe */
1122                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1123                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1124         },
1125         { /* R3QPI Link 0 */
1126                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1127                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1128         },
1129         { /* R3QPI Link 1 */
1130                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1131                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1132         },
1133         { /* QPI Port 0 filter  */
1134                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1135                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1136                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1137         },
1138         { /* QPI Port 0 filter  */
1139                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1140                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1141                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1142         },
1143         { /* end: all zeroes */ }
1144 };
1145
1146 static struct pci_driver snbep_uncore_pci_driver = {
1147         .name           = "snbep_uncore",
1148         .id_table       = snbep_uncore_pci_ids,
1149 };
1150
1151 /*
1152  * build pci bus to socket mapping
1153  */
1154 static int snbep_pci2phy_map_init(int devid)
1155 {
1156         struct pci_dev *ubox_dev = NULL;
1157         int i, bus, nodeid, segment;
1158         struct pci2phy_map *map;
1159         int err = 0;
1160         u32 config = 0;
1161
1162         while (1) {
1163                 /* find the UBOX device */
1164                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1165                 if (!ubox_dev)
1166                         break;
1167                 bus = ubox_dev->bus->number;
1168                 /* get the Node ID of the local register */
1169                 err = pci_read_config_dword(ubox_dev, 0x40, &config);
1170                 if (err)
1171                         break;
1172                 nodeid = config;
1173                 /* get the Node ID mapping */
1174                 err = pci_read_config_dword(ubox_dev, 0x54, &config);
1175                 if (err)
1176                         break;
1177
1178                 segment = pci_domain_nr(ubox_dev->bus);
1179                 raw_spin_lock(&pci2phy_map_lock);
1180                 map = __find_pci2phy_map(segment);
1181                 if (!map) {
1182                         raw_spin_unlock(&pci2phy_map_lock);
1183                         err = -ENOMEM;
1184                         break;
1185                 }
1186
1187                 /*
1188                  * every three bits in the Node ID mapping register maps
1189                  * to a particular node.
1190                  */
1191                 for (i = 0; i < 8; i++) {
1192                         if (nodeid == ((config >> (3 * i)) & 0x7)) {
1193                                 map->pbus_to_physid[bus] = i;
1194                                 break;
1195                         }
1196                 }
1197                 raw_spin_unlock(&pci2phy_map_lock);
1198         }
1199
1200         if (!err) {
1201                 /*
1202                  * For PCI bus with no UBOX device, find the next bus
1203                  * that has UBOX device and use its mapping.
1204                  */
1205                 raw_spin_lock(&pci2phy_map_lock);
1206                 list_for_each_entry(map, &pci2phy_map_head, list) {
1207                         i = -1;
1208                         for (bus = 255; bus >= 0; bus--) {
1209                                 if (map->pbus_to_physid[bus] >= 0)
1210                                         i = map->pbus_to_physid[bus];
1211                                 else
1212                                         map->pbus_to_physid[bus] = i;
1213                         }
1214                 }
1215                 raw_spin_unlock(&pci2phy_map_lock);
1216         }
1217
1218         pci_dev_put(ubox_dev);
1219
1220         return err ? pcibios_err_to_errno(err) : 0;
1221 }
1222
1223 int snbep_uncore_pci_init(void)
1224 {
1225         int ret = snbep_pci2phy_map_init(0x3ce0);
1226         if (ret)
1227                 return ret;
1228         uncore_pci_uncores = snbep_pci_uncores;
1229         uncore_pci_driver = &snbep_uncore_pci_driver;
1230         return 0;
1231 }
1232 /* end of Sandy Bridge-EP uncore support */
1233
1234 /* IvyTown uncore support */
1235 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1236 {
1237         unsigned msr = uncore_msr_box_ctl(box);
1238         if (msr)
1239                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1240 }
1241
1242 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1243 {
1244         struct pci_dev *pdev = box->pci_dev;
1245
1246         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1247 }
1248
1249 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1250         .init_box       = ivbep_uncore_msr_init_box,            \
1251         .disable_box    = snbep_uncore_msr_disable_box,         \
1252         .enable_box     = snbep_uncore_msr_enable_box,          \
1253         .disable_event  = snbep_uncore_msr_disable_event,       \
1254         .enable_event   = snbep_uncore_msr_enable_event,        \
1255         .read_counter   = uncore_msr_read_counter
1256
1257 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1258         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1259 };
1260
1261 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1262         .init_box       = ivbep_uncore_pci_init_box,
1263         .disable_box    = snbep_uncore_pci_disable_box,
1264         .enable_box     = snbep_uncore_pci_enable_box,
1265         .disable_event  = snbep_uncore_pci_disable_event,
1266         .enable_event   = snbep_uncore_pci_enable_event,
1267         .read_counter   = snbep_uncore_pci_read_counter,
1268 };
1269
1270 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1271         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1272         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1273         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1274         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1275         .ops            = &ivbep_uncore_pci_ops,                        \
1276         .format_group   = &ivbep_uncore_format_group
1277
1278 static struct attribute *ivbep_uncore_formats_attr[] = {
1279         &format_attr_event.attr,
1280         &format_attr_umask.attr,
1281         &format_attr_edge.attr,
1282         &format_attr_inv.attr,
1283         &format_attr_thresh8.attr,
1284         NULL,
1285 };
1286
1287 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1288         &format_attr_event.attr,
1289         &format_attr_umask.attr,
1290         &format_attr_edge.attr,
1291         &format_attr_inv.attr,
1292         &format_attr_thresh5.attr,
1293         NULL,
1294 };
1295
1296 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1297         &format_attr_event.attr,
1298         &format_attr_umask.attr,
1299         &format_attr_edge.attr,
1300         &format_attr_tid_en.attr,
1301         &format_attr_thresh8.attr,
1302         &format_attr_filter_tid.attr,
1303         &format_attr_filter_link.attr,
1304         &format_attr_filter_state2.attr,
1305         &format_attr_filter_nid2.attr,
1306         &format_attr_filter_opc2.attr,
1307         &format_attr_filter_nc.attr,
1308         &format_attr_filter_c6.attr,
1309         &format_attr_filter_isoc.attr,
1310         NULL,
1311 };
1312
1313 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1314         &format_attr_event_ext.attr,
1315         &format_attr_occ_sel.attr,
1316         &format_attr_edge.attr,
1317         &format_attr_thresh5.attr,
1318         &format_attr_occ_invert.attr,
1319         &format_attr_occ_edge.attr,
1320         &format_attr_filter_band0.attr,
1321         &format_attr_filter_band1.attr,
1322         &format_attr_filter_band2.attr,
1323         &format_attr_filter_band3.attr,
1324         NULL,
1325 };
1326
1327 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1328         &format_attr_event_ext.attr,
1329         &format_attr_umask.attr,
1330         &format_attr_edge.attr,
1331         &format_attr_thresh8.attr,
1332         &format_attr_match_rds.attr,
1333         &format_attr_match_rnid30.attr,
1334         &format_attr_match_rnid4.attr,
1335         &format_attr_match_dnid.attr,
1336         &format_attr_match_mc.attr,
1337         &format_attr_match_opc.attr,
1338         &format_attr_match_vnw.attr,
1339         &format_attr_match0.attr,
1340         &format_attr_match1.attr,
1341         &format_attr_mask_rds.attr,
1342         &format_attr_mask_rnid30.attr,
1343         &format_attr_mask_rnid4.attr,
1344         &format_attr_mask_dnid.attr,
1345         &format_attr_mask_mc.attr,
1346         &format_attr_mask_opc.attr,
1347         &format_attr_mask_vnw.attr,
1348         &format_attr_mask0.attr,
1349         &format_attr_mask1.attr,
1350         NULL,
1351 };
1352
1353 static struct attribute_group ivbep_uncore_format_group = {
1354         .name = "format",
1355         .attrs = ivbep_uncore_formats_attr,
1356 };
1357
1358 static struct attribute_group ivbep_uncore_ubox_format_group = {
1359         .name = "format",
1360         .attrs = ivbep_uncore_ubox_formats_attr,
1361 };
1362
1363 static struct attribute_group ivbep_uncore_cbox_format_group = {
1364         .name = "format",
1365         .attrs = ivbep_uncore_cbox_formats_attr,
1366 };
1367
1368 static struct attribute_group ivbep_uncore_pcu_format_group = {
1369         .name = "format",
1370         .attrs = ivbep_uncore_pcu_formats_attr,
1371 };
1372
1373 static struct attribute_group ivbep_uncore_qpi_format_group = {
1374         .name = "format",
1375         .attrs = ivbep_uncore_qpi_formats_attr,
1376 };
1377
1378 static struct intel_uncore_type ivbep_uncore_ubox = {
1379         .name           = "ubox",
1380         .num_counters   = 2,
1381         .num_boxes      = 1,
1382         .perf_ctr_bits  = 44,
1383         .fixed_ctr_bits = 48,
1384         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1385         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1386         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1387         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1388         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1389         .ops            = &ivbep_uncore_msr_ops,
1390         .format_group   = &ivbep_uncore_ubox_format_group,
1391 };
1392
1393 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1394         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1395                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1396         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1397         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1398         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1399         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1400         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1401         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1402         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1403         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1404         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1405         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1406         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1407         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1408         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1409         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1410         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1411         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1412         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1413         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1414         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1415         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1416         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1417         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1418         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1419         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1420         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1421         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1422         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1423         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1424         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1425         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1426         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1427         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1428         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1429         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1430         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1431         EVENT_EXTRA_END
1432 };
1433
1434 static u64 ivbep_cbox_filter_mask(int fields)
1435 {
1436         u64 mask = 0;
1437
1438         if (fields & 0x1)
1439                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1440         if (fields & 0x2)
1441                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1442         if (fields & 0x4)
1443                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1444         if (fields & 0x8)
1445                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1446         if (fields & 0x10) {
1447                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1448                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1449                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1450                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1451         }
1452
1453         return mask;
1454 }
1455
1456 static struct event_constraint *
1457 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1458 {
1459         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1460 }
1461
1462 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1463 {
1464         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1465         struct extra_reg *er;
1466         int idx = 0;
1467
1468         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1469                 if (er->event != (event->hw.config & er->config_mask))
1470                         continue;
1471                 idx |= er->idx;
1472         }
1473
1474         if (idx) {
1475                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1476                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1477                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1478                 reg1->idx = idx;
1479         }
1480         return 0;
1481 }
1482
1483 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1484 {
1485         struct hw_perf_event *hwc = &event->hw;
1486         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1487
1488         if (reg1->idx != EXTRA_REG_NONE) {
1489                 u64 filter = uncore_shared_reg_config(box, 0);
1490                 wrmsrl(reg1->reg, filter & 0xffffffff);
1491                 wrmsrl(reg1->reg + 6, filter >> 32);
1492         }
1493
1494         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1495 }
1496
1497 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1498         .init_box               = ivbep_uncore_msr_init_box,
1499         .disable_box            = snbep_uncore_msr_disable_box,
1500         .enable_box             = snbep_uncore_msr_enable_box,
1501         .disable_event          = snbep_uncore_msr_disable_event,
1502         .enable_event           = ivbep_cbox_enable_event,
1503         .read_counter           = uncore_msr_read_counter,
1504         .hw_config              = ivbep_cbox_hw_config,
1505         .get_constraint         = ivbep_cbox_get_constraint,
1506         .put_constraint         = snbep_cbox_put_constraint,
1507 };
1508
1509 static struct intel_uncore_type ivbep_uncore_cbox = {
1510         .name                   = "cbox",
1511         .num_counters           = 4,
1512         .num_boxes              = 15,
1513         .perf_ctr_bits          = 44,
1514         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1515         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1516         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1517         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1518         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1519         .num_shared_regs        = 1,
1520         .constraints            = snbep_uncore_cbox_constraints,
1521         .ops                    = &ivbep_uncore_cbox_ops,
1522         .format_group           = &ivbep_uncore_cbox_format_group,
1523 };
1524
1525 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1526         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1527         .hw_config              = snbep_pcu_hw_config,
1528         .get_constraint         = snbep_pcu_get_constraint,
1529         .put_constraint         = snbep_pcu_put_constraint,
1530 };
1531
1532 static struct intel_uncore_type ivbep_uncore_pcu = {
1533         .name                   = "pcu",
1534         .num_counters           = 4,
1535         .num_boxes              = 1,
1536         .perf_ctr_bits          = 48,
1537         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1538         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1539         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1540         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1541         .num_shared_regs        = 1,
1542         .ops                    = &ivbep_uncore_pcu_ops,
1543         .format_group           = &ivbep_uncore_pcu_format_group,
1544 };
1545
1546 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1547         &ivbep_uncore_ubox,
1548         &ivbep_uncore_cbox,
1549         &ivbep_uncore_pcu,
1550         NULL,
1551 };
1552
1553 void ivbep_uncore_cpu_init(void)
1554 {
1555         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1556                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1557         uncore_msr_uncores = ivbep_msr_uncores;
1558 }
1559
1560 static struct intel_uncore_type ivbep_uncore_ha = {
1561         .name           = "ha",
1562         .num_counters   = 4,
1563         .num_boxes      = 2,
1564         .perf_ctr_bits  = 48,
1565         IVBEP_UNCORE_PCI_COMMON_INIT(),
1566 };
1567
1568 static struct intel_uncore_type ivbep_uncore_imc = {
1569         .name           = "imc",
1570         .num_counters   = 4,
1571         .num_boxes      = 8,
1572         .perf_ctr_bits  = 48,
1573         .fixed_ctr_bits = 48,
1574         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1575         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1576         .event_descs    = snbep_uncore_imc_events,
1577         IVBEP_UNCORE_PCI_COMMON_INIT(),
1578 };
1579
1580 /* registers in IRP boxes are not properly aligned */
1581 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1582 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1583
1584 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1585 {
1586         struct pci_dev *pdev = box->pci_dev;
1587         struct hw_perf_event *hwc = &event->hw;
1588
1589         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1590                                hwc->config | SNBEP_PMON_CTL_EN);
1591 }
1592
1593 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1594 {
1595         struct pci_dev *pdev = box->pci_dev;
1596         struct hw_perf_event *hwc = &event->hw;
1597
1598         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1599 }
1600
1601 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1602 {
1603         struct pci_dev *pdev = box->pci_dev;
1604         struct hw_perf_event *hwc = &event->hw;
1605         u64 count = 0;
1606
1607         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1608         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1609
1610         return count;
1611 }
1612
1613 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1614         .init_box       = ivbep_uncore_pci_init_box,
1615         .disable_box    = snbep_uncore_pci_disable_box,
1616         .enable_box     = snbep_uncore_pci_enable_box,
1617         .disable_event  = ivbep_uncore_irp_disable_event,
1618         .enable_event   = ivbep_uncore_irp_enable_event,
1619         .read_counter   = ivbep_uncore_irp_read_counter,
1620 };
1621
1622 static struct intel_uncore_type ivbep_uncore_irp = {
1623         .name                   = "irp",
1624         .num_counters           = 4,
1625         .num_boxes              = 1,
1626         .perf_ctr_bits          = 48,
1627         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1628         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1629         .ops                    = &ivbep_uncore_irp_ops,
1630         .format_group           = &ivbep_uncore_format_group,
1631 };
1632
1633 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1634         .init_box       = ivbep_uncore_pci_init_box,
1635         .disable_box    = snbep_uncore_pci_disable_box,
1636         .enable_box     = snbep_uncore_pci_enable_box,
1637         .disable_event  = snbep_uncore_pci_disable_event,
1638         .enable_event   = snbep_qpi_enable_event,
1639         .read_counter   = snbep_uncore_pci_read_counter,
1640         .hw_config      = snbep_qpi_hw_config,
1641         .get_constraint = uncore_get_constraint,
1642         .put_constraint = uncore_put_constraint,
1643 };
1644
1645 static struct intel_uncore_type ivbep_uncore_qpi = {
1646         .name                   = "qpi",
1647         .num_counters           = 4,
1648         .num_boxes              = 3,
1649         .perf_ctr_bits          = 48,
1650         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1651         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1652         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1653         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1654         .num_shared_regs        = 1,
1655         .ops                    = &ivbep_uncore_qpi_ops,
1656         .format_group           = &ivbep_uncore_qpi_format_group,
1657 };
1658
1659 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1660         .name           = "r2pcie",
1661         .num_counters   = 4,
1662         .num_boxes      = 1,
1663         .perf_ctr_bits  = 44,
1664         .constraints    = snbep_uncore_r2pcie_constraints,
1665         IVBEP_UNCORE_PCI_COMMON_INIT(),
1666 };
1667
1668 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1669         .name           = "r3qpi",
1670         .num_counters   = 3,
1671         .num_boxes      = 2,
1672         .perf_ctr_bits  = 44,
1673         .constraints    = snbep_uncore_r3qpi_constraints,
1674         IVBEP_UNCORE_PCI_COMMON_INIT(),
1675 };
1676
1677 enum {
1678         IVBEP_PCI_UNCORE_HA,
1679         IVBEP_PCI_UNCORE_IMC,
1680         IVBEP_PCI_UNCORE_IRP,
1681         IVBEP_PCI_UNCORE_QPI,
1682         IVBEP_PCI_UNCORE_R2PCIE,
1683         IVBEP_PCI_UNCORE_R3QPI,
1684 };
1685
1686 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1687         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1688         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1689         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1690         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1691         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1692         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1693         NULL,
1694 };
1695
1696 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1697         { /* Home Agent 0 */
1698                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1699                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1700         },
1701         { /* Home Agent 1 */
1702                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1703                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1704         },
1705         { /* MC0 Channel 0 */
1706                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1707                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1708         },
1709         { /* MC0 Channel 1 */
1710                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1711                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1712         },
1713         { /* MC0 Channel 3 */
1714                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1715                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1716         },
1717         { /* MC0 Channel 4 */
1718                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1719                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1720         },
1721         { /* MC1 Channel 0 */
1722                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1723                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1724         },
1725         { /* MC1 Channel 1 */
1726                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1727                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1728         },
1729         { /* MC1 Channel 3 */
1730                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1731                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1732         },
1733         { /* MC1 Channel 4 */
1734                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1735                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1736         },
1737         { /* IRP */
1738                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1739                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1740         },
1741         { /* QPI0 Port 0 */
1742                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1743                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1744         },
1745         { /* QPI0 Port 1 */
1746                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1747                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1748         },
1749         { /* QPI1 Port 2 */
1750                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1751                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1752         },
1753         { /* R2PCIe */
1754                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1755                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1756         },
1757         { /* R3QPI0 Link 0 */
1758                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1759                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1760         },
1761         { /* R3QPI0 Link 1 */
1762                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1763                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1764         },
1765         { /* R3QPI1 Link 2 */
1766                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1767                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1768         },
1769         { /* QPI Port 0 filter  */
1770                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1771                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1772                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1773         },
1774         { /* QPI Port 0 filter  */
1775                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1776                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1777                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1778         },
1779         { /* end: all zeroes */ }
1780 };
1781
1782 static struct pci_driver ivbep_uncore_pci_driver = {
1783         .name           = "ivbep_uncore",
1784         .id_table       = ivbep_uncore_pci_ids,
1785 };
1786
1787 int ivbep_uncore_pci_init(void)
1788 {
1789         int ret = snbep_pci2phy_map_init(0x0e1e);
1790         if (ret)
1791                 return ret;
1792         uncore_pci_uncores = ivbep_pci_uncores;
1793         uncore_pci_driver = &ivbep_uncore_pci_driver;
1794         return 0;
1795 }
1796 /* end of IvyTown uncore support */
1797
1798 /* KNL uncore support */
1799 static struct attribute *knl_uncore_ubox_formats_attr[] = {
1800         &format_attr_event.attr,
1801         &format_attr_umask.attr,
1802         &format_attr_edge.attr,
1803         &format_attr_tid_en.attr,
1804         &format_attr_inv.attr,
1805         &format_attr_thresh5.attr,
1806         NULL,
1807 };
1808
1809 static struct attribute_group knl_uncore_ubox_format_group = {
1810         .name = "format",
1811         .attrs = knl_uncore_ubox_formats_attr,
1812 };
1813
1814 static struct intel_uncore_type knl_uncore_ubox = {
1815         .name                   = "ubox",
1816         .num_counters           = 2,
1817         .num_boxes              = 1,
1818         .perf_ctr_bits          = 48,
1819         .fixed_ctr_bits         = 48,
1820         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1821         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1822         .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1823         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1824         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1825         .ops                    = &snbep_uncore_msr_ops,
1826         .format_group           = &knl_uncore_ubox_format_group,
1827 };
1828
1829 static struct attribute *knl_uncore_cha_formats_attr[] = {
1830         &format_attr_event.attr,
1831         &format_attr_umask.attr,
1832         &format_attr_qor.attr,
1833         &format_attr_edge.attr,
1834         &format_attr_tid_en.attr,
1835         &format_attr_inv.attr,
1836         &format_attr_thresh8.attr,
1837         &format_attr_filter_tid4.attr,
1838         &format_attr_filter_link3.attr,
1839         &format_attr_filter_state4.attr,
1840         &format_attr_filter_local.attr,
1841         &format_attr_filter_all_op.attr,
1842         &format_attr_filter_nnm.attr,
1843         &format_attr_filter_opc3.attr,
1844         &format_attr_filter_nc.attr,
1845         &format_attr_filter_isoc.attr,
1846         NULL,
1847 };
1848
1849 static struct attribute_group knl_uncore_cha_format_group = {
1850         .name = "format",
1851         .attrs = knl_uncore_cha_formats_attr,
1852 };
1853
1854 static struct event_constraint knl_uncore_cha_constraints[] = {
1855         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1856         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
1857         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1858         EVENT_CONSTRAINT_END
1859 };
1860
1861 static struct extra_reg knl_uncore_cha_extra_regs[] = {
1862         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1863                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1864         SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
1865         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
1866         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
1867         EVENT_EXTRA_END
1868 };
1869
1870 static u64 knl_cha_filter_mask(int fields)
1871 {
1872         u64 mask = 0;
1873
1874         if (fields & 0x1)
1875                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
1876         if (fields & 0x2)
1877                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
1878         if (fields & 0x4)
1879                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
1880         return mask;
1881 }
1882
1883 static struct event_constraint *
1884 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1885 {
1886         return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
1887 }
1888
1889 static int knl_cha_hw_config(struct intel_uncore_box *box,
1890                              struct perf_event *event)
1891 {
1892         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1893         struct extra_reg *er;
1894         int idx = 0;
1895
1896         for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
1897                 if (er->event != (event->hw.config & er->config_mask))
1898                         continue;
1899                 idx |= er->idx;
1900         }
1901
1902         if (idx) {
1903                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1904                             KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
1905                 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
1906                 reg1->idx = idx;
1907         }
1908         return 0;
1909 }
1910
1911 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
1912                                     struct perf_event *event);
1913
1914 static struct intel_uncore_ops knl_uncore_cha_ops = {
1915         .init_box               = snbep_uncore_msr_init_box,
1916         .disable_box            = snbep_uncore_msr_disable_box,
1917         .enable_box             = snbep_uncore_msr_enable_box,
1918         .disable_event          = snbep_uncore_msr_disable_event,
1919         .enable_event           = hswep_cbox_enable_event,
1920         .read_counter           = uncore_msr_read_counter,
1921         .hw_config              = knl_cha_hw_config,
1922         .get_constraint         = knl_cha_get_constraint,
1923         .put_constraint         = snbep_cbox_put_constraint,
1924 };
1925
1926 static struct intel_uncore_type knl_uncore_cha = {
1927         .name                   = "cha",
1928         .num_counters           = 4,
1929         .num_boxes              = 38,
1930         .perf_ctr_bits          = 48,
1931         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
1932         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
1933         .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
1934         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
1935         .msr_offset             = KNL_CHA_MSR_OFFSET,
1936         .num_shared_regs        = 1,
1937         .constraints            = knl_uncore_cha_constraints,
1938         .ops                    = &knl_uncore_cha_ops,
1939         .format_group           = &knl_uncore_cha_format_group,
1940 };
1941
1942 static struct attribute *knl_uncore_pcu_formats_attr[] = {
1943         &format_attr_event2.attr,
1944         &format_attr_use_occ_ctr.attr,
1945         &format_attr_occ_sel.attr,
1946         &format_attr_edge.attr,
1947         &format_attr_tid_en.attr,
1948         &format_attr_inv.attr,
1949         &format_attr_thresh6.attr,
1950         &format_attr_occ_invert.attr,
1951         &format_attr_occ_edge_det.attr,
1952         NULL,
1953 };
1954
1955 static struct attribute_group knl_uncore_pcu_format_group = {
1956         .name = "format",
1957         .attrs = knl_uncore_pcu_formats_attr,
1958 };
1959
1960 static struct intel_uncore_type knl_uncore_pcu = {
1961         .name                   = "pcu",
1962         .num_counters           = 4,
1963         .num_boxes              = 1,
1964         .perf_ctr_bits          = 48,
1965         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
1966         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
1967         .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
1968         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
1969         .ops                    = &snbep_uncore_msr_ops,
1970         .format_group           = &knl_uncore_pcu_format_group,
1971 };
1972
1973 static struct intel_uncore_type *knl_msr_uncores[] = {
1974         &knl_uncore_ubox,
1975         &knl_uncore_cha,
1976         &knl_uncore_pcu,
1977         NULL,
1978 };
1979
1980 void knl_uncore_cpu_init(void)
1981 {
1982         uncore_msr_uncores = knl_msr_uncores;
1983 }
1984
1985 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
1986 {
1987         struct pci_dev *pdev = box->pci_dev;
1988         int box_ctl = uncore_pci_box_ctl(box);
1989
1990         pci_write_config_dword(pdev, box_ctl, 0);
1991 }
1992
1993 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
1994                                         struct perf_event *event)
1995 {
1996         struct pci_dev *pdev = box->pci_dev;
1997         struct hw_perf_event *hwc = &event->hw;
1998
1999         if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2000                                                         == UNCORE_FIXED_EVENT)
2001                 pci_write_config_dword(pdev, hwc->config_base,
2002                                        hwc->config | KNL_PMON_FIXED_CTL_EN);
2003         else
2004                 pci_write_config_dword(pdev, hwc->config_base,
2005                                        hwc->config | SNBEP_PMON_CTL_EN);
2006 }
2007
2008 static struct intel_uncore_ops knl_uncore_imc_ops = {
2009         .init_box       = snbep_uncore_pci_init_box,
2010         .disable_box    = snbep_uncore_pci_disable_box,
2011         .enable_box     = knl_uncore_imc_enable_box,
2012         .read_counter   = snbep_uncore_pci_read_counter,
2013         .enable_event   = knl_uncore_imc_enable_event,
2014         .disable_event  = snbep_uncore_pci_disable_event,
2015 };
2016
2017 static struct intel_uncore_type knl_uncore_imc_uclk = {
2018         .name                   = "imc_uclk",
2019         .num_counters           = 4,
2020         .num_boxes              = 2,
2021         .perf_ctr_bits          = 48,
2022         .fixed_ctr_bits         = 48,
2023         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2024         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2025         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2026         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2027         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2028         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2029         .ops                    = &knl_uncore_imc_ops,
2030         .format_group           = &snbep_uncore_format_group,
2031 };
2032
2033 static struct intel_uncore_type knl_uncore_imc_dclk = {
2034         .name                   = "imc",
2035         .num_counters           = 4,
2036         .num_boxes              = 6,
2037         .perf_ctr_bits          = 48,
2038         .fixed_ctr_bits         = 48,
2039         .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2040         .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2041         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2042         .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2043         .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2044         .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2045         .ops                    = &knl_uncore_imc_ops,
2046         .format_group           = &snbep_uncore_format_group,
2047 };
2048
2049 static struct intel_uncore_type knl_uncore_edc_uclk = {
2050         .name                   = "edc_uclk",
2051         .num_counters           = 4,
2052         .num_boxes              = 8,
2053         .perf_ctr_bits          = 48,
2054         .fixed_ctr_bits         = 48,
2055         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2056         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2057         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2058         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2059         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2060         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2061         .ops                    = &knl_uncore_imc_ops,
2062         .format_group           = &snbep_uncore_format_group,
2063 };
2064
2065 static struct intel_uncore_type knl_uncore_edc_eclk = {
2066         .name                   = "edc_eclk",
2067         .num_counters           = 4,
2068         .num_boxes              = 8,
2069         .perf_ctr_bits          = 48,
2070         .fixed_ctr_bits         = 48,
2071         .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2072         .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2073         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2074         .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2075         .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2076         .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2077         .ops                    = &knl_uncore_imc_ops,
2078         .format_group           = &snbep_uncore_format_group,
2079 };
2080
2081 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2082         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2083         EVENT_CONSTRAINT_END
2084 };
2085
2086 static struct intel_uncore_type knl_uncore_m2pcie = {
2087         .name           = "m2pcie",
2088         .num_counters   = 4,
2089         .num_boxes      = 1,
2090         .perf_ctr_bits  = 48,
2091         .constraints    = knl_uncore_m2pcie_constraints,
2092         SNBEP_UNCORE_PCI_COMMON_INIT(),
2093 };
2094
2095 static struct attribute *knl_uncore_irp_formats_attr[] = {
2096         &format_attr_event.attr,
2097         &format_attr_umask.attr,
2098         &format_attr_qor.attr,
2099         &format_attr_edge.attr,
2100         &format_attr_inv.attr,
2101         &format_attr_thresh8.attr,
2102         NULL,
2103 };
2104
2105 static struct attribute_group knl_uncore_irp_format_group = {
2106         .name = "format",
2107         .attrs = knl_uncore_irp_formats_attr,
2108 };
2109
2110 static struct intel_uncore_type knl_uncore_irp = {
2111         .name                   = "irp",
2112         .num_counters           = 2,
2113         .num_boxes              = 1,
2114         .perf_ctr_bits          = 48,
2115         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2116         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2117         .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2118         .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2119         .ops                    = &snbep_uncore_pci_ops,
2120         .format_group           = &knl_uncore_irp_format_group,
2121 };
2122
2123 enum {
2124         KNL_PCI_UNCORE_MC_UCLK,
2125         KNL_PCI_UNCORE_MC_DCLK,
2126         KNL_PCI_UNCORE_EDC_UCLK,
2127         KNL_PCI_UNCORE_EDC_ECLK,
2128         KNL_PCI_UNCORE_M2PCIE,
2129         KNL_PCI_UNCORE_IRP,
2130 };
2131
2132 static struct intel_uncore_type *knl_pci_uncores[] = {
2133         [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2134         [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2135         [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2136         [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2137         [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2138         [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2139         NULL,
2140 };
2141
2142 /*
2143  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2144  * device type. prior to KNL, each instance of a PMU device type had a unique
2145  * device ID.
2146  *
2147  *      PCI Device ID   Uncore PMU Devices
2148  *      ----------------------------------
2149  *      0x7841          MC0 UClk, MC1 UClk
2150  *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2151  *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2152  *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2153  *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2154  *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2155  *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2156  *      0x7817          M2PCIe
2157  *      0x7814          IRP
2158 */
2159
2160 static const struct pci_device_id knl_uncore_pci_ids[] = {
2161         { /* MC UClk */
2162                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2163                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_MC_UCLK, 0),
2164         },
2165         { /* MC DClk Channel */
2166                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2167                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_MC_DCLK, 0),
2168         },
2169         { /* EDC UClk */
2170                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2171                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_EDC_UCLK, 0),
2172         },
2173         { /* EDC EClk */
2174                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2175                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_EDC_ECLK, 0),
2176         },
2177         { /* M2PCIe */
2178                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2179                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2180         },
2181         { /* IRP */
2182                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2183                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2184         },
2185         { /* end: all zeroes */ }
2186 };
2187
2188 static struct pci_driver knl_uncore_pci_driver = {
2189         .name           = "knl_uncore",
2190         .id_table       = knl_uncore_pci_ids,
2191 };
2192
2193 int knl_uncore_pci_init(void)
2194 {
2195         int ret;
2196
2197         /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2198         ret = snb_pci2phy_map_init(0x7814); /* IRP */
2199         if (ret)
2200                 return ret;
2201         ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2202         if (ret)
2203                 return ret;
2204         uncore_pci_uncores = knl_pci_uncores;
2205         uncore_pci_driver = &knl_uncore_pci_driver;
2206         return 0;
2207 }
2208
2209 /* end of KNL uncore support */
2210
2211 /* Haswell-EP uncore support */
2212 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2213         &format_attr_event.attr,
2214         &format_attr_umask.attr,
2215         &format_attr_edge.attr,
2216         &format_attr_inv.attr,
2217         &format_attr_thresh5.attr,
2218         &format_attr_filter_tid2.attr,
2219         &format_attr_filter_cid.attr,
2220         NULL,
2221 };
2222
2223 static struct attribute_group hswep_uncore_ubox_format_group = {
2224         .name = "format",
2225         .attrs = hswep_uncore_ubox_formats_attr,
2226 };
2227
2228 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2229 {
2230         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2231         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2232         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2233         reg1->idx = 0;
2234         return 0;
2235 }
2236
2237 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2238         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2239         .hw_config              = hswep_ubox_hw_config,
2240         .get_constraint         = uncore_get_constraint,
2241         .put_constraint         = uncore_put_constraint,
2242 };
2243
2244 static struct intel_uncore_type hswep_uncore_ubox = {
2245         .name                   = "ubox",
2246         .num_counters           = 2,
2247         .num_boxes              = 1,
2248         .perf_ctr_bits          = 44,
2249         .fixed_ctr_bits         = 48,
2250         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2251         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2252         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2253         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2254         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2255         .num_shared_regs        = 1,
2256         .ops                    = &hswep_uncore_ubox_ops,
2257         .format_group           = &hswep_uncore_ubox_format_group,
2258 };
2259
2260 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2261         &format_attr_event.attr,
2262         &format_attr_umask.attr,
2263         &format_attr_edge.attr,
2264         &format_attr_tid_en.attr,
2265         &format_attr_thresh8.attr,
2266         &format_attr_filter_tid3.attr,
2267         &format_attr_filter_link2.attr,
2268         &format_attr_filter_state3.attr,
2269         &format_attr_filter_nid2.attr,
2270         &format_attr_filter_opc2.attr,
2271         &format_attr_filter_nc.attr,
2272         &format_attr_filter_c6.attr,
2273         &format_attr_filter_isoc.attr,
2274         NULL,
2275 };
2276
2277 static struct attribute_group hswep_uncore_cbox_format_group = {
2278         .name = "format",
2279         .attrs = hswep_uncore_cbox_formats_attr,
2280 };
2281
2282 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2283         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2284         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2285         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2286         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2287         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2288         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2289         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2290         EVENT_CONSTRAINT_END
2291 };
2292
2293 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2294         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2295                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2296         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2297         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2298         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2299         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2300         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2301         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2302         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2303         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2304         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2305         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2306         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2307         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2308         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2309         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2310         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2311         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2312         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2313         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2314         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2315         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2316         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2317         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2318         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2319         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2320         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2321         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2322         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2323         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2324         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2325         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2326         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2327         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2328         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2329         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2330         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2331         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2332         EVENT_EXTRA_END
2333 };
2334
2335 static u64 hswep_cbox_filter_mask(int fields)
2336 {
2337         u64 mask = 0;
2338         if (fields & 0x1)
2339                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2340         if (fields & 0x2)
2341                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2342         if (fields & 0x4)
2343                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2344         if (fields & 0x8)
2345                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2346         if (fields & 0x10) {
2347                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2348                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2349                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2350                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2351         }
2352         return mask;
2353 }
2354
2355 static struct event_constraint *
2356 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2357 {
2358         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2359 }
2360
2361 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2362 {
2363         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2364         struct extra_reg *er;
2365         int idx = 0;
2366
2367         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2368                 if (er->event != (event->hw.config & er->config_mask))
2369                         continue;
2370                 idx |= er->idx;
2371         }
2372
2373         if (idx) {
2374                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2375                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2376                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2377                 reg1->idx = idx;
2378         }
2379         return 0;
2380 }
2381
2382 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2383                                   struct perf_event *event)
2384 {
2385         struct hw_perf_event *hwc = &event->hw;
2386         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2387
2388         if (reg1->idx != EXTRA_REG_NONE) {
2389                 u64 filter = uncore_shared_reg_config(box, 0);
2390                 wrmsrl(reg1->reg, filter & 0xffffffff);
2391                 wrmsrl(reg1->reg + 1, filter >> 32);
2392         }
2393
2394         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2395 }
2396
2397 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2398         .init_box               = snbep_uncore_msr_init_box,
2399         .disable_box            = snbep_uncore_msr_disable_box,
2400         .enable_box             = snbep_uncore_msr_enable_box,
2401         .disable_event          = snbep_uncore_msr_disable_event,
2402         .enable_event           = hswep_cbox_enable_event,
2403         .read_counter           = uncore_msr_read_counter,
2404         .hw_config              = hswep_cbox_hw_config,
2405         .get_constraint         = hswep_cbox_get_constraint,
2406         .put_constraint         = snbep_cbox_put_constraint,
2407 };
2408
2409 static struct intel_uncore_type hswep_uncore_cbox = {
2410         .name                   = "cbox",
2411         .num_counters           = 4,
2412         .num_boxes              = 18,
2413         .perf_ctr_bits          = 48,
2414         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2415         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2416         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2417         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2418         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2419         .num_shared_regs        = 1,
2420         .constraints            = hswep_uncore_cbox_constraints,
2421         .ops                    = &hswep_uncore_cbox_ops,
2422         .format_group           = &hswep_uncore_cbox_format_group,
2423 };
2424
2425 /*
2426  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2427  */
2428 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2429 {
2430         unsigned msr = uncore_msr_box_ctl(box);
2431
2432         if (msr) {
2433                 u64 init = SNBEP_PMON_BOX_CTL_INT;
2434                 u64 flags = 0;
2435                 int i;
2436
2437                 for_each_set_bit(i, (unsigned long *)&init, 64) {
2438                         flags |= (1ULL << i);
2439                         wrmsrl(msr, flags);
2440                 }
2441         }
2442 }
2443
2444 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2445         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2446         .init_box               = hswep_uncore_sbox_msr_init_box
2447 };
2448
2449 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2450         &format_attr_event.attr,
2451         &format_attr_umask.attr,
2452         &format_attr_edge.attr,
2453         &format_attr_tid_en.attr,
2454         &format_attr_inv.attr,
2455         &format_attr_thresh8.attr,
2456         NULL,
2457 };
2458
2459 static struct attribute_group hswep_uncore_sbox_format_group = {
2460         .name = "format",
2461         .attrs = hswep_uncore_sbox_formats_attr,
2462 };
2463
2464 static struct intel_uncore_type hswep_uncore_sbox = {
2465         .name                   = "sbox",
2466         .num_counters           = 4,
2467         .num_boxes              = 4,
2468         .perf_ctr_bits          = 44,
2469         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2470         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2471         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2472         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2473         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2474         .ops                    = &hswep_uncore_sbox_msr_ops,
2475         .format_group           = &hswep_uncore_sbox_format_group,
2476 };
2477
2478 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2479 {
2480         struct hw_perf_event *hwc = &event->hw;
2481         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2482         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2483
2484         if (ev_sel >= 0xb && ev_sel <= 0xe) {
2485                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2486                 reg1->idx = ev_sel - 0xb;
2487                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2488         }
2489         return 0;
2490 }
2491
2492 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2493         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2494         .hw_config              = hswep_pcu_hw_config,
2495         .get_constraint         = snbep_pcu_get_constraint,
2496         .put_constraint         = snbep_pcu_put_constraint,
2497 };
2498
2499 static struct intel_uncore_type hswep_uncore_pcu = {
2500         .name                   = "pcu",
2501         .num_counters           = 4,
2502         .num_boxes              = 1,
2503         .perf_ctr_bits          = 48,
2504         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2505         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2506         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2507         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2508         .num_shared_regs        = 1,
2509         .ops                    = &hswep_uncore_pcu_ops,
2510         .format_group           = &snbep_uncore_pcu_format_group,
2511 };
2512
2513 static struct intel_uncore_type *hswep_msr_uncores[] = {
2514         &hswep_uncore_ubox,
2515         &hswep_uncore_cbox,
2516         &hswep_uncore_sbox,
2517         &hswep_uncore_pcu,
2518         NULL,
2519 };
2520
2521 void hswep_uncore_cpu_init(void)
2522 {
2523         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2524                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2525
2526         /* Detect 6-8 core systems with only two SBOXes */
2527         if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
2528                 u32 capid4;
2529
2530                 pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
2531                                       0x94, &capid4);
2532                 if (((capid4 >> 6) & 0x3) == 0)
2533                         hswep_uncore_sbox.num_boxes = 2;
2534         }
2535
2536         uncore_msr_uncores = hswep_msr_uncores;
2537 }
2538
2539 static struct intel_uncore_type hswep_uncore_ha = {
2540         .name           = "ha",
2541         .num_counters   = 5,
2542         .num_boxes      = 2,
2543         .perf_ctr_bits  = 48,
2544         SNBEP_UNCORE_PCI_COMMON_INIT(),
2545 };
2546
2547 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2548         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2549         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2550         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2551         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2552         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2553         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2554         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2555         { /* end: all zeroes */ },
2556 };
2557
2558 static struct intel_uncore_type hswep_uncore_imc = {
2559         .name           = "imc",
2560         .num_counters   = 5,
2561         .num_boxes      = 8,
2562         .perf_ctr_bits  = 48,
2563         .fixed_ctr_bits = 48,
2564         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2565         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2566         .event_descs    = hswep_uncore_imc_events,
2567         SNBEP_UNCORE_PCI_COMMON_INIT(),
2568 };
2569
2570 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2571
2572 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2573 {
2574         struct pci_dev *pdev = box->pci_dev;
2575         struct hw_perf_event *hwc = &event->hw;
2576         u64 count = 0;
2577
2578         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2579         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2580
2581         return count;
2582 }
2583
2584 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2585         .init_box       = snbep_uncore_pci_init_box,
2586         .disable_box    = snbep_uncore_pci_disable_box,
2587         .enable_box     = snbep_uncore_pci_enable_box,
2588         .disable_event  = ivbep_uncore_irp_disable_event,
2589         .enable_event   = ivbep_uncore_irp_enable_event,
2590         .read_counter   = hswep_uncore_irp_read_counter,
2591 };
2592
2593 static struct intel_uncore_type hswep_uncore_irp = {
2594         .name                   = "irp",
2595         .num_counters           = 4,
2596         .num_boxes              = 1,
2597         .perf_ctr_bits          = 48,
2598         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2599         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2600         .ops                    = &hswep_uncore_irp_ops,
2601         .format_group           = &snbep_uncore_format_group,
2602 };
2603
2604 static struct intel_uncore_type hswep_uncore_qpi = {
2605         .name                   = "qpi",
2606         .num_counters           = 5,
2607         .num_boxes              = 3,
2608         .perf_ctr_bits          = 48,
2609         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2610         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2611         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2612         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2613         .num_shared_regs        = 1,
2614         .ops                    = &snbep_uncore_qpi_ops,
2615         .format_group           = &snbep_uncore_qpi_format_group,
2616 };
2617
2618 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2619         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2620         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2621         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2622         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2623         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2624         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2625         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2626         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2627         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2628         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2629         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2630         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2631         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2632         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2633         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2634         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2635         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2636         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2637         EVENT_CONSTRAINT_END
2638 };
2639
2640 static struct intel_uncore_type hswep_uncore_r2pcie = {
2641         .name           = "r2pcie",
2642         .num_counters   = 4,
2643         .num_boxes      = 1,
2644         .perf_ctr_bits  = 48,
2645         .constraints    = hswep_uncore_r2pcie_constraints,
2646         SNBEP_UNCORE_PCI_COMMON_INIT(),
2647 };
2648
2649 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2650         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2651         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2652         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2653         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2654         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2655         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2656         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2657         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2658         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2659         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2660         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2661         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2662         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2663         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2664         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2665         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2666         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2667         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2668         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2669         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2670         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2671         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2672         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2673         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2674         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2675         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2676         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2677         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2678         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2679         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2680         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2681         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2682         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2683         EVENT_CONSTRAINT_END
2684 };
2685
2686 static struct intel_uncore_type hswep_uncore_r3qpi = {
2687         .name           = "r3qpi",
2688         .num_counters   = 4,
2689         .num_boxes      = 3,
2690         .perf_ctr_bits  = 44,
2691         .constraints    = hswep_uncore_r3qpi_constraints,
2692         SNBEP_UNCORE_PCI_COMMON_INIT(),
2693 };
2694
2695 enum {
2696         HSWEP_PCI_UNCORE_HA,
2697         HSWEP_PCI_UNCORE_IMC,
2698         HSWEP_PCI_UNCORE_IRP,
2699         HSWEP_PCI_UNCORE_QPI,
2700         HSWEP_PCI_UNCORE_R2PCIE,
2701         HSWEP_PCI_UNCORE_R3QPI,
2702 };
2703
2704 static struct intel_uncore_type *hswep_pci_uncores[] = {
2705         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2706         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2707         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2708         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2709         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2710         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2711         NULL,
2712 };
2713
2714 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2715         { /* Home Agent 0 */
2716                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2717                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2718         },
2719         { /* Home Agent 1 */
2720                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2721                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2722         },
2723         { /* MC0 Channel 0 */
2724                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2725                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2726         },
2727         { /* MC0 Channel 1 */
2728                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2729                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2730         },
2731         { /* MC0 Channel 2 */
2732                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2733                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2734         },
2735         { /* MC0 Channel 3 */
2736                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2737                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2738         },
2739         { /* MC1 Channel 0 */
2740                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2741                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2742         },
2743         { /* MC1 Channel 1 */
2744                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2745                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2746         },
2747         { /* MC1 Channel 2 */
2748                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2749                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2750         },
2751         { /* MC1 Channel 3 */
2752                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2753                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2754         },
2755         { /* IRP */
2756                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2757                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2758         },
2759         { /* QPI0 Port 0 */
2760                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2761                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2762         },
2763         { /* QPI0 Port 1 */
2764                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2765                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2766         },
2767         { /* QPI1 Port 2 */
2768                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2769                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2770         },
2771         { /* R2PCIe */
2772                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2773                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2774         },
2775         { /* R3QPI0 Link 0 */
2776                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2777                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2778         },
2779         { /* R3QPI0 Link 1 */
2780                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2781                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2782         },
2783         { /* R3QPI1 Link 2 */
2784                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2785                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2786         },
2787         { /* QPI Port 0 filter  */
2788                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2789                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2790                                                    SNBEP_PCI_QPI_PORT0_FILTER),
2791         },
2792         { /* QPI Port 1 filter  */
2793                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2794                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2795                                                    SNBEP_PCI_QPI_PORT1_FILTER),
2796         },
2797         { /* PCU.3 (for Capability registers) */
2798                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2799                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2800                                                    HSWEP_PCI_PCU_3),
2801         },
2802         { /* end: all zeroes */ }
2803 };
2804
2805 static struct pci_driver hswep_uncore_pci_driver = {
2806         .name           = "hswep_uncore",
2807         .id_table       = hswep_uncore_pci_ids,
2808 };
2809
2810 int hswep_uncore_pci_init(void)
2811 {
2812         int ret = snbep_pci2phy_map_init(0x2f1e);
2813         if (ret)
2814                 return ret;
2815         uncore_pci_uncores = hswep_pci_uncores;
2816         uncore_pci_driver = &hswep_uncore_pci_driver;
2817         return 0;
2818 }
2819 /* end of Haswell-EP uncore support */
2820
2821 /* BDX uncore support */
2822
2823 static struct intel_uncore_type bdx_uncore_ubox = {
2824         .name                   = "ubox",
2825         .num_counters           = 2,
2826         .num_boxes              = 1,
2827         .perf_ctr_bits          = 48,
2828         .fixed_ctr_bits         = 48,
2829         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2830         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2831         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2832         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2833         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2834         .num_shared_regs        = 1,
2835         .ops                    = &ivbep_uncore_msr_ops,
2836         .format_group           = &ivbep_uncore_ubox_format_group,
2837 };
2838
2839 static struct event_constraint bdx_uncore_cbox_constraints[] = {
2840         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
2841         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2842         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2843         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2844         EVENT_CONSTRAINT_END
2845 };
2846
2847 static struct intel_uncore_type bdx_uncore_cbox = {
2848         .name                   = "cbox",
2849         .num_counters           = 4,
2850         .num_boxes              = 24,
2851         .perf_ctr_bits          = 48,
2852         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2853         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2854         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2855         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2856         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2857         .num_shared_regs        = 1,
2858         .constraints            = bdx_uncore_cbox_constraints,
2859         .ops                    = &hswep_uncore_cbox_ops,
2860         .format_group           = &hswep_uncore_cbox_format_group,
2861 };
2862
2863 static struct intel_uncore_type bdx_uncore_sbox = {
2864         .name                   = "sbox",
2865         .num_counters           = 4,
2866         .num_boxes              = 4,
2867         .perf_ctr_bits          = 48,
2868         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2869         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2870         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2871         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2872         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2873         .ops                    = &hswep_uncore_sbox_msr_ops,
2874         .format_group           = &hswep_uncore_sbox_format_group,
2875 };
2876
2877 static struct intel_uncore_type *bdx_msr_uncores[] = {
2878         &bdx_uncore_ubox,
2879         &bdx_uncore_cbox,
2880         &bdx_uncore_sbox,
2881         &hswep_uncore_pcu,
2882         NULL,
2883 };
2884
2885 void bdx_uncore_cpu_init(void)
2886 {
2887         if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2888                 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2889         uncore_msr_uncores = bdx_msr_uncores;
2890 }
2891
2892 static struct intel_uncore_type bdx_uncore_ha = {
2893         .name           = "ha",
2894         .num_counters   = 4,
2895         .num_boxes      = 2,
2896         .perf_ctr_bits  = 48,
2897         SNBEP_UNCORE_PCI_COMMON_INIT(),
2898 };
2899
2900 static struct intel_uncore_type bdx_uncore_imc = {
2901         .name           = "imc",
2902         .num_counters   = 5,
2903         .num_boxes      = 8,
2904         .perf_ctr_bits  = 48,
2905         .fixed_ctr_bits = 48,
2906         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2907         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2908         .event_descs    = hswep_uncore_imc_events,
2909         SNBEP_UNCORE_PCI_COMMON_INIT(),
2910 };
2911
2912 static struct intel_uncore_type bdx_uncore_irp = {
2913         .name                   = "irp",
2914         .num_counters           = 4,
2915         .num_boxes              = 1,
2916         .perf_ctr_bits          = 48,
2917         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2918         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2919         .ops                    = &hswep_uncore_irp_ops,
2920         .format_group           = &snbep_uncore_format_group,
2921 };
2922
2923 static struct intel_uncore_type bdx_uncore_qpi = {
2924         .name                   = "qpi",
2925         .num_counters           = 4,
2926         .num_boxes              = 3,
2927         .perf_ctr_bits          = 48,
2928         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2929         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2930         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2931         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2932         .num_shared_regs        = 1,
2933         .ops                    = &snbep_uncore_qpi_ops,
2934         .format_group           = &snbep_uncore_qpi_format_group,
2935 };
2936
2937 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
2938         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2939         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2940         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2941         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2942         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2943         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2944         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2945         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2946         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2947         EVENT_CONSTRAINT_END
2948 };
2949
2950 static struct intel_uncore_type bdx_uncore_r2pcie = {
2951         .name           = "r2pcie",
2952         .num_counters   = 4,
2953         .num_boxes      = 1,
2954         .perf_ctr_bits  = 48,
2955         .constraints    = bdx_uncore_r2pcie_constraints,
2956         SNBEP_UNCORE_PCI_COMMON_INIT(),
2957 };
2958
2959 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
2960         UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
2961         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2962         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2963         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2964         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2965         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2966         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2967         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2968         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2969         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2970         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2971         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2972         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2973         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2974         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2975         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2976         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2977         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2978         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2979         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2980         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2981         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2982         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2983         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2984         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2985         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2986         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2987         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2988         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2989         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2990         EVENT_CONSTRAINT_END
2991 };
2992
2993 static struct intel_uncore_type bdx_uncore_r3qpi = {
2994         .name           = "r3qpi",
2995         .num_counters   = 3,
2996         .num_boxes      = 3,
2997         .perf_ctr_bits  = 48,
2998         .constraints    = bdx_uncore_r3qpi_constraints,
2999         SNBEP_UNCORE_PCI_COMMON_INIT(),
3000 };
3001
3002 enum {
3003         BDX_PCI_UNCORE_HA,
3004         BDX_PCI_UNCORE_IMC,
3005         BDX_PCI_UNCORE_IRP,
3006         BDX_PCI_UNCORE_QPI,
3007         BDX_PCI_UNCORE_R2PCIE,
3008         BDX_PCI_UNCORE_R3QPI,
3009 };
3010
3011 static struct intel_uncore_type *bdx_pci_uncores[] = {
3012         [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3013         [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3014         [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3015         [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3016         [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3017         [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3018         NULL,
3019 };
3020
3021 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3022         { /* Home Agent 0 */
3023                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3024                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3025         },
3026         { /* Home Agent 1 */
3027                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3028                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3029         },
3030         { /* MC0 Channel 0 */
3031                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3032                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3033         },
3034         { /* MC0 Channel 1 */
3035                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3036                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3037         },
3038         { /* MC0 Channel 2 */
3039                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3040                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3041         },
3042         { /* MC0 Channel 3 */
3043                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3044                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3045         },
3046         { /* MC1 Channel 0 */
3047                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3048                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3049         },
3050         { /* MC1 Channel 1 */
3051                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3052                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3053         },
3054         { /* MC1 Channel 2 */
3055                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3056                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3057         },
3058         { /* MC1 Channel 3 */
3059                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3060                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3061         },
3062         { /* IRP */
3063                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3064                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3065         },
3066         { /* QPI0 Port 0 */
3067                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3068                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3069         },
3070         { /* QPI0 Port 1 */
3071                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3072                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3073         },
3074         { /* QPI1 Port 2 */
3075                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3076                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3077         },
3078         { /* R2PCIe */
3079                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3080                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3081         },
3082         { /* R3QPI0 Link 0 */
3083                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3084                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3085         },
3086         { /* R3QPI0 Link 1 */
3087                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3088                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3089         },
3090         { /* R3QPI1 Link 2 */
3091                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3092                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3093         },
3094         { /* QPI Port 0 filter  */
3095                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3096                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
3097         },
3098         { /* QPI Port 1 filter  */
3099                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3100                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
3101         },
3102         { /* QPI Port 2 filter  */
3103                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3104                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
3105         },
3106         { /* end: all zeroes */ }
3107 };
3108
3109 static struct pci_driver bdx_uncore_pci_driver = {
3110         .name           = "bdx_uncore",
3111         .id_table       = bdx_uncore_pci_ids,
3112 };
3113
3114 int bdx_uncore_pci_init(void)
3115 {
3116         int ret = snbep_pci2phy_map_init(0x6f1e);
3117
3118         if (ret)
3119                 return ret;
3120         uncore_pci_uncores = bdx_pci_uncores;
3121         uncore_pci_driver = &bdx_uncore_pci_driver;
3122         return 0;
3123 }
3124
3125 /* end of BDX uncore support */