2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_ih.h"
27 #include "amdgpu_gfx.h"
31 #include "amdgpu_ucode.h"
32 #include "clearstate_ci.h"
34 #include "dce/dce_8_0_d.h"
35 #include "dce/dce_8_0_sh_mask.h"
37 #include "bif/bif_4_1_d.h"
38 #include "bif/bif_4_1_sh_mask.h"
40 #include "gca/gfx_7_0_d.h"
41 #include "gca/gfx_7_2_enum.h"
42 #include "gca/gfx_7_2_sh_mask.h"
44 #include "gmc/gmc_7_0_d.h"
45 #include "gmc/gmc_7_0_sh_mask.h"
47 #include "oss/oss_2_0_d.h"
48 #include "oss/oss_2_0_sh_mask.h"
50 #define GFX7_NUM_GFX_RINGS 1
51 #define GFX7_NUM_COMPUTE_RINGS 8
53 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
54 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
57 MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
58 MODULE_FIRMWARE("radeon/bonaire_me.bin");
59 MODULE_FIRMWARE("radeon/bonaire_ce.bin");
60 MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
61 MODULE_FIRMWARE("radeon/bonaire_mec.bin");
63 MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
64 MODULE_FIRMWARE("radeon/hawaii_me.bin");
65 MODULE_FIRMWARE("radeon/hawaii_ce.bin");
66 MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
67 MODULE_FIRMWARE("radeon/hawaii_mec.bin");
69 MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
70 MODULE_FIRMWARE("radeon/kaveri_me.bin");
71 MODULE_FIRMWARE("radeon/kaveri_ce.bin");
72 MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
73 MODULE_FIRMWARE("radeon/kaveri_mec.bin");
74 MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
76 MODULE_FIRMWARE("radeon/kabini_pfp.bin");
77 MODULE_FIRMWARE("radeon/kabini_me.bin");
78 MODULE_FIRMWARE("radeon/kabini_ce.bin");
79 MODULE_FIRMWARE("radeon/kabini_rlc.bin");
80 MODULE_FIRMWARE("radeon/kabini_mec.bin");
82 MODULE_FIRMWARE("radeon/mullins_pfp.bin");
83 MODULE_FIRMWARE("radeon/mullins_me.bin");
84 MODULE_FIRMWARE("radeon/mullins_ce.bin");
85 MODULE_FIRMWARE("radeon/mullins_rlc.bin");
86 MODULE_FIRMWARE("radeon/mullins_mec.bin");
88 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
90 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
91 {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
92 {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
93 {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
94 {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
95 {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
96 {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
97 {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
98 {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
99 {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
100 {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
101 {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
102 {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
103 {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
104 {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
105 {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
108 static const u32 spectre_rlc_save_restore_register_list[] =
110 (0x0e00 << 16) | (0xc12c >> 2),
112 (0x0e00 << 16) | (0xc140 >> 2),
114 (0x0e00 << 16) | (0xc150 >> 2),
116 (0x0e00 << 16) | (0xc15c >> 2),
118 (0x0e00 << 16) | (0xc168 >> 2),
120 (0x0e00 << 16) | (0xc170 >> 2),
122 (0x0e00 << 16) | (0xc178 >> 2),
124 (0x0e00 << 16) | (0xc204 >> 2),
126 (0x0e00 << 16) | (0xc2b4 >> 2),
128 (0x0e00 << 16) | (0xc2b8 >> 2),
130 (0x0e00 << 16) | (0xc2bc >> 2),
132 (0x0e00 << 16) | (0xc2c0 >> 2),
134 (0x0e00 << 16) | (0x8228 >> 2),
136 (0x0e00 << 16) | (0x829c >> 2),
138 (0x0e00 << 16) | (0x869c >> 2),
140 (0x0600 << 16) | (0x98f4 >> 2),
142 (0x0e00 << 16) | (0x98f8 >> 2),
144 (0x0e00 << 16) | (0x9900 >> 2),
146 (0x0e00 << 16) | (0xc260 >> 2),
148 (0x0e00 << 16) | (0x90e8 >> 2),
150 (0x0e00 << 16) | (0x3c000 >> 2),
152 (0x0e00 << 16) | (0x3c00c >> 2),
154 (0x0e00 << 16) | (0x8c1c >> 2),
156 (0x0e00 << 16) | (0x9700 >> 2),
158 (0x0e00 << 16) | (0xcd20 >> 2),
160 (0x4e00 << 16) | (0xcd20 >> 2),
162 (0x5e00 << 16) | (0xcd20 >> 2),
164 (0x6e00 << 16) | (0xcd20 >> 2),
166 (0x7e00 << 16) | (0xcd20 >> 2),
168 (0x8e00 << 16) | (0xcd20 >> 2),
170 (0x9e00 << 16) | (0xcd20 >> 2),
172 (0xae00 << 16) | (0xcd20 >> 2),
174 (0xbe00 << 16) | (0xcd20 >> 2),
176 (0x0e00 << 16) | (0x89bc >> 2),
178 (0x0e00 << 16) | (0x8900 >> 2),
181 (0x0e00 << 16) | (0xc130 >> 2),
183 (0x0e00 << 16) | (0xc134 >> 2),
185 (0x0e00 << 16) | (0xc1fc >> 2),
187 (0x0e00 << 16) | (0xc208 >> 2),
189 (0x0e00 << 16) | (0xc264 >> 2),
191 (0x0e00 << 16) | (0xc268 >> 2),
193 (0x0e00 << 16) | (0xc26c >> 2),
195 (0x0e00 << 16) | (0xc270 >> 2),
197 (0x0e00 << 16) | (0xc274 >> 2),
199 (0x0e00 << 16) | (0xc278 >> 2),
201 (0x0e00 << 16) | (0xc27c >> 2),
203 (0x0e00 << 16) | (0xc280 >> 2),
205 (0x0e00 << 16) | (0xc284 >> 2),
207 (0x0e00 << 16) | (0xc288 >> 2),
209 (0x0e00 << 16) | (0xc28c >> 2),
211 (0x0e00 << 16) | (0xc290 >> 2),
213 (0x0e00 << 16) | (0xc294 >> 2),
215 (0x0e00 << 16) | (0xc298 >> 2),
217 (0x0e00 << 16) | (0xc29c >> 2),
219 (0x0e00 << 16) | (0xc2a0 >> 2),
221 (0x0e00 << 16) | (0xc2a4 >> 2),
223 (0x0e00 << 16) | (0xc2a8 >> 2),
225 (0x0e00 << 16) | (0xc2ac >> 2),
227 (0x0e00 << 16) | (0xc2b0 >> 2),
229 (0x0e00 << 16) | (0x301d0 >> 2),
231 (0x0e00 << 16) | (0x30238 >> 2),
233 (0x0e00 << 16) | (0x30250 >> 2),
235 (0x0e00 << 16) | (0x30254 >> 2),
237 (0x0e00 << 16) | (0x30258 >> 2),
239 (0x0e00 << 16) | (0x3025c >> 2),
241 (0x4e00 << 16) | (0xc900 >> 2),
243 (0x5e00 << 16) | (0xc900 >> 2),
245 (0x6e00 << 16) | (0xc900 >> 2),
247 (0x7e00 << 16) | (0xc900 >> 2),
249 (0x8e00 << 16) | (0xc900 >> 2),
251 (0x9e00 << 16) | (0xc900 >> 2),
253 (0xae00 << 16) | (0xc900 >> 2),
255 (0xbe00 << 16) | (0xc900 >> 2),
257 (0x4e00 << 16) | (0xc904 >> 2),
259 (0x5e00 << 16) | (0xc904 >> 2),
261 (0x6e00 << 16) | (0xc904 >> 2),
263 (0x7e00 << 16) | (0xc904 >> 2),
265 (0x8e00 << 16) | (0xc904 >> 2),
267 (0x9e00 << 16) | (0xc904 >> 2),
269 (0xae00 << 16) | (0xc904 >> 2),
271 (0xbe00 << 16) | (0xc904 >> 2),
273 (0x4e00 << 16) | (0xc908 >> 2),
275 (0x5e00 << 16) | (0xc908 >> 2),
277 (0x6e00 << 16) | (0xc908 >> 2),
279 (0x7e00 << 16) | (0xc908 >> 2),
281 (0x8e00 << 16) | (0xc908 >> 2),
283 (0x9e00 << 16) | (0xc908 >> 2),
285 (0xae00 << 16) | (0xc908 >> 2),
287 (0xbe00 << 16) | (0xc908 >> 2),
289 (0x4e00 << 16) | (0xc90c >> 2),
291 (0x5e00 << 16) | (0xc90c >> 2),
293 (0x6e00 << 16) | (0xc90c >> 2),
295 (0x7e00 << 16) | (0xc90c >> 2),
297 (0x8e00 << 16) | (0xc90c >> 2),
299 (0x9e00 << 16) | (0xc90c >> 2),
301 (0xae00 << 16) | (0xc90c >> 2),
303 (0xbe00 << 16) | (0xc90c >> 2),
305 (0x4e00 << 16) | (0xc910 >> 2),
307 (0x5e00 << 16) | (0xc910 >> 2),
309 (0x6e00 << 16) | (0xc910 >> 2),
311 (0x7e00 << 16) | (0xc910 >> 2),
313 (0x8e00 << 16) | (0xc910 >> 2),
315 (0x9e00 << 16) | (0xc910 >> 2),
317 (0xae00 << 16) | (0xc910 >> 2),
319 (0xbe00 << 16) | (0xc910 >> 2),
321 (0x0e00 << 16) | (0xc99c >> 2),
323 (0x0e00 << 16) | (0x9834 >> 2),
325 (0x0000 << 16) | (0x30f00 >> 2),
327 (0x0001 << 16) | (0x30f00 >> 2),
329 (0x0000 << 16) | (0x30f04 >> 2),
331 (0x0001 << 16) | (0x30f04 >> 2),
333 (0x0000 << 16) | (0x30f08 >> 2),
335 (0x0001 << 16) | (0x30f08 >> 2),
337 (0x0000 << 16) | (0x30f0c >> 2),
339 (0x0001 << 16) | (0x30f0c >> 2),
341 (0x0600 << 16) | (0x9b7c >> 2),
343 (0x0e00 << 16) | (0x8a14 >> 2),
345 (0x0e00 << 16) | (0x8a18 >> 2),
347 (0x0600 << 16) | (0x30a00 >> 2),
349 (0x0e00 << 16) | (0x8bf0 >> 2),
351 (0x0e00 << 16) | (0x8bcc >> 2),
353 (0x0e00 << 16) | (0x8b24 >> 2),
355 (0x0e00 << 16) | (0x30a04 >> 2),
357 (0x0600 << 16) | (0x30a10 >> 2),
359 (0x0600 << 16) | (0x30a14 >> 2),
361 (0x0600 << 16) | (0x30a18 >> 2),
363 (0x0600 << 16) | (0x30a2c >> 2),
365 (0x0e00 << 16) | (0xc700 >> 2),
367 (0x0e00 << 16) | (0xc704 >> 2),
369 (0x0e00 << 16) | (0xc708 >> 2),
371 (0x0e00 << 16) | (0xc768 >> 2),
373 (0x0400 << 16) | (0xc770 >> 2),
375 (0x0400 << 16) | (0xc774 >> 2),
377 (0x0400 << 16) | (0xc778 >> 2),
379 (0x0400 << 16) | (0xc77c >> 2),
381 (0x0400 << 16) | (0xc780 >> 2),
383 (0x0400 << 16) | (0xc784 >> 2),
385 (0x0400 << 16) | (0xc788 >> 2),
387 (0x0400 << 16) | (0xc78c >> 2),
389 (0x0400 << 16) | (0xc798 >> 2),
391 (0x0400 << 16) | (0xc79c >> 2),
393 (0x0400 << 16) | (0xc7a0 >> 2),
395 (0x0400 << 16) | (0xc7a4 >> 2),
397 (0x0400 << 16) | (0xc7a8 >> 2),
399 (0x0400 << 16) | (0xc7ac >> 2),
401 (0x0400 << 16) | (0xc7b0 >> 2),
403 (0x0400 << 16) | (0xc7b4 >> 2),
405 (0x0e00 << 16) | (0x9100 >> 2),
407 (0x0e00 << 16) | (0x3c010 >> 2),
409 (0x0e00 << 16) | (0x92a8 >> 2),
411 (0x0e00 << 16) | (0x92ac >> 2),
413 (0x0e00 << 16) | (0x92b4 >> 2),
415 (0x0e00 << 16) | (0x92b8 >> 2),
417 (0x0e00 << 16) | (0x92bc >> 2),
419 (0x0e00 << 16) | (0x92c0 >> 2),
421 (0x0e00 << 16) | (0x92c4 >> 2),
423 (0x0e00 << 16) | (0x92c8 >> 2),
425 (0x0e00 << 16) | (0x92cc >> 2),
427 (0x0e00 << 16) | (0x92d0 >> 2),
429 (0x0e00 << 16) | (0x8c00 >> 2),
431 (0x0e00 << 16) | (0x8c04 >> 2),
433 (0x0e00 << 16) | (0x8c20 >> 2),
435 (0x0e00 << 16) | (0x8c38 >> 2),
437 (0x0e00 << 16) | (0x8c3c >> 2),
439 (0x0e00 << 16) | (0xae00 >> 2),
441 (0x0e00 << 16) | (0x9604 >> 2),
443 (0x0e00 << 16) | (0xac08 >> 2),
445 (0x0e00 << 16) | (0xac0c >> 2),
447 (0x0e00 << 16) | (0xac10 >> 2),
449 (0x0e00 << 16) | (0xac14 >> 2),
451 (0x0e00 << 16) | (0xac58 >> 2),
453 (0x0e00 << 16) | (0xac68 >> 2),
455 (0x0e00 << 16) | (0xac6c >> 2),
457 (0x0e00 << 16) | (0xac70 >> 2),
459 (0x0e00 << 16) | (0xac74 >> 2),
461 (0x0e00 << 16) | (0xac78 >> 2),
463 (0x0e00 << 16) | (0xac7c >> 2),
465 (0x0e00 << 16) | (0xac80 >> 2),
467 (0x0e00 << 16) | (0xac84 >> 2),
469 (0x0e00 << 16) | (0xac88 >> 2),
471 (0x0e00 << 16) | (0xac8c >> 2),
473 (0x0e00 << 16) | (0x970c >> 2),
475 (0x0e00 << 16) | (0x9714 >> 2),
477 (0x0e00 << 16) | (0x9718 >> 2),
479 (0x0e00 << 16) | (0x971c >> 2),
481 (0x0e00 << 16) | (0x31068 >> 2),
483 (0x4e00 << 16) | (0x31068 >> 2),
485 (0x5e00 << 16) | (0x31068 >> 2),
487 (0x6e00 << 16) | (0x31068 >> 2),
489 (0x7e00 << 16) | (0x31068 >> 2),
491 (0x8e00 << 16) | (0x31068 >> 2),
493 (0x9e00 << 16) | (0x31068 >> 2),
495 (0xae00 << 16) | (0x31068 >> 2),
497 (0xbe00 << 16) | (0x31068 >> 2),
499 (0x0e00 << 16) | (0xcd10 >> 2),
501 (0x0e00 << 16) | (0xcd14 >> 2),
503 (0x0e00 << 16) | (0x88b0 >> 2),
505 (0x0e00 << 16) | (0x88b4 >> 2),
507 (0x0e00 << 16) | (0x88b8 >> 2),
509 (0x0e00 << 16) | (0x88bc >> 2),
511 (0x0400 << 16) | (0x89c0 >> 2),
513 (0x0e00 << 16) | (0x88c4 >> 2),
515 (0x0e00 << 16) | (0x88c8 >> 2),
517 (0x0e00 << 16) | (0x88d0 >> 2),
519 (0x0e00 << 16) | (0x88d4 >> 2),
521 (0x0e00 << 16) | (0x88d8 >> 2),
523 (0x0e00 << 16) | (0x8980 >> 2),
525 (0x0e00 << 16) | (0x30938 >> 2),
527 (0x0e00 << 16) | (0x3093c >> 2),
529 (0x0e00 << 16) | (0x30940 >> 2),
531 (0x0e00 << 16) | (0x89a0 >> 2),
533 (0x0e00 << 16) | (0x30900 >> 2),
535 (0x0e00 << 16) | (0x30904 >> 2),
537 (0x0e00 << 16) | (0x89b4 >> 2),
539 (0x0e00 << 16) | (0x3c210 >> 2),
541 (0x0e00 << 16) | (0x3c214 >> 2),
543 (0x0e00 << 16) | (0x3c218 >> 2),
545 (0x0e00 << 16) | (0x8904 >> 2),
548 (0x0e00 << 16) | (0x8c28 >> 2),
549 (0x0e00 << 16) | (0x8c2c >> 2),
550 (0x0e00 << 16) | (0x8c30 >> 2),
551 (0x0e00 << 16) | (0x8c34 >> 2),
552 (0x0e00 << 16) | (0x9600 >> 2),
555 static const u32 kalindi_rlc_save_restore_register_list[] =
557 (0x0e00 << 16) | (0xc12c >> 2),
559 (0x0e00 << 16) | (0xc140 >> 2),
561 (0x0e00 << 16) | (0xc150 >> 2),
563 (0x0e00 << 16) | (0xc15c >> 2),
565 (0x0e00 << 16) | (0xc168 >> 2),
567 (0x0e00 << 16) | (0xc170 >> 2),
569 (0x0e00 << 16) | (0xc204 >> 2),
571 (0x0e00 << 16) | (0xc2b4 >> 2),
573 (0x0e00 << 16) | (0xc2b8 >> 2),
575 (0x0e00 << 16) | (0xc2bc >> 2),
577 (0x0e00 << 16) | (0xc2c0 >> 2),
579 (0x0e00 << 16) | (0x8228 >> 2),
581 (0x0e00 << 16) | (0x829c >> 2),
583 (0x0e00 << 16) | (0x869c >> 2),
585 (0x0600 << 16) | (0x98f4 >> 2),
587 (0x0e00 << 16) | (0x98f8 >> 2),
589 (0x0e00 << 16) | (0x9900 >> 2),
591 (0x0e00 << 16) | (0xc260 >> 2),
593 (0x0e00 << 16) | (0x90e8 >> 2),
595 (0x0e00 << 16) | (0x3c000 >> 2),
597 (0x0e00 << 16) | (0x3c00c >> 2),
599 (0x0e00 << 16) | (0x8c1c >> 2),
601 (0x0e00 << 16) | (0x9700 >> 2),
603 (0x0e00 << 16) | (0xcd20 >> 2),
605 (0x4e00 << 16) | (0xcd20 >> 2),
607 (0x5e00 << 16) | (0xcd20 >> 2),
609 (0x6e00 << 16) | (0xcd20 >> 2),
611 (0x7e00 << 16) | (0xcd20 >> 2),
613 (0x0e00 << 16) | (0x89bc >> 2),
615 (0x0e00 << 16) | (0x8900 >> 2),
618 (0x0e00 << 16) | (0xc130 >> 2),
620 (0x0e00 << 16) | (0xc134 >> 2),
622 (0x0e00 << 16) | (0xc1fc >> 2),
624 (0x0e00 << 16) | (0xc208 >> 2),
626 (0x0e00 << 16) | (0xc264 >> 2),
628 (0x0e00 << 16) | (0xc268 >> 2),
630 (0x0e00 << 16) | (0xc26c >> 2),
632 (0x0e00 << 16) | (0xc270 >> 2),
634 (0x0e00 << 16) | (0xc274 >> 2),
636 (0x0e00 << 16) | (0xc28c >> 2),
638 (0x0e00 << 16) | (0xc290 >> 2),
640 (0x0e00 << 16) | (0xc294 >> 2),
642 (0x0e00 << 16) | (0xc298 >> 2),
644 (0x0e00 << 16) | (0xc2a0 >> 2),
646 (0x0e00 << 16) | (0xc2a4 >> 2),
648 (0x0e00 << 16) | (0xc2a8 >> 2),
650 (0x0e00 << 16) | (0xc2ac >> 2),
652 (0x0e00 << 16) | (0x301d0 >> 2),
654 (0x0e00 << 16) | (0x30238 >> 2),
656 (0x0e00 << 16) | (0x30250 >> 2),
658 (0x0e00 << 16) | (0x30254 >> 2),
660 (0x0e00 << 16) | (0x30258 >> 2),
662 (0x0e00 << 16) | (0x3025c >> 2),
664 (0x4e00 << 16) | (0xc900 >> 2),
666 (0x5e00 << 16) | (0xc900 >> 2),
668 (0x6e00 << 16) | (0xc900 >> 2),
670 (0x7e00 << 16) | (0xc900 >> 2),
672 (0x4e00 << 16) | (0xc904 >> 2),
674 (0x5e00 << 16) | (0xc904 >> 2),
676 (0x6e00 << 16) | (0xc904 >> 2),
678 (0x7e00 << 16) | (0xc904 >> 2),
680 (0x4e00 << 16) | (0xc908 >> 2),
682 (0x5e00 << 16) | (0xc908 >> 2),
684 (0x6e00 << 16) | (0xc908 >> 2),
686 (0x7e00 << 16) | (0xc908 >> 2),
688 (0x4e00 << 16) | (0xc90c >> 2),
690 (0x5e00 << 16) | (0xc90c >> 2),
692 (0x6e00 << 16) | (0xc90c >> 2),
694 (0x7e00 << 16) | (0xc90c >> 2),
696 (0x4e00 << 16) | (0xc910 >> 2),
698 (0x5e00 << 16) | (0xc910 >> 2),
700 (0x6e00 << 16) | (0xc910 >> 2),
702 (0x7e00 << 16) | (0xc910 >> 2),
704 (0x0e00 << 16) | (0xc99c >> 2),
706 (0x0e00 << 16) | (0x9834 >> 2),
708 (0x0000 << 16) | (0x30f00 >> 2),
710 (0x0000 << 16) | (0x30f04 >> 2),
712 (0x0000 << 16) | (0x30f08 >> 2),
714 (0x0000 << 16) | (0x30f0c >> 2),
716 (0x0600 << 16) | (0x9b7c >> 2),
718 (0x0e00 << 16) | (0x8a14 >> 2),
720 (0x0e00 << 16) | (0x8a18 >> 2),
722 (0x0600 << 16) | (0x30a00 >> 2),
724 (0x0e00 << 16) | (0x8bf0 >> 2),
726 (0x0e00 << 16) | (0x8bcc >> 2),
728 (0x0e00 << 16) | (0x8b24 >> 2),
730 (0x0e00 << 16) | (0x30a04 >> 2),
732 (0x0600 << 16) | (0x30a10 >> 2),
734 (0x0600 << 16) | (0x30a14 >> 2),
736 (0x0600 << 16) | (0x30a18 >> 2),
738 (0x0600 << 16) | (0x30a2c >> 2),
740 (0x0e00 << 16) | (0xc700 >> 2),
742 (0x0e00 << 16) | (0xc704 >> 2),
744 (0x0e00 << 16) | (0xc708 >> 2),
746 (0x0e00 << 16) | (0xc768 >> 2),
748 (0x0400 << 16) | (0xc770 >> 2),
750 (0x0400 << 16) | (0xc774 >> 2),
752 (0x0400 << 16) | (0xc798 >> 2),
754 (0x0400 << 16) | (0xc79c >> 2),
756 (0x0e00 << 16) | (0x9100 >> 2),
758 (0x0e00 << 16) | (0x3c010 >> 2),
760 (0x0e00 << 16) | (0x8c00 >> 2),
762 (0x0e00 << 16) | (0x8c04 >> 2),
764 (0x0e00 << 16) | (0x8c20 >> 2),
766 (0x0e00 << 16) | (0x8c38 >> 2),
768 (0x0e00 << 16) | (0x8c3c >> 2),
770 (0x0e00 << 16) | (0xae00 >> 2),
772 (0x0e00 << 16) | (0x9604 >> 2),
774 (0x0e00 << 16) | (0xac08 >> 2),
776 (0x0e00 << 16) | (0xac0c >> 2),
778 (0x0e00 << 16) | (0xac10 >> 2),
780 (0x0e00 << 16) | (0xac14 >> 2),
782 (0x0e00 << 16) | (0xac58 >> 2),
784 (0x0e00 << 16) | (0xac68 >> 2),
786 (0x0e00 << 16) | (0xac6c >> 2),
788 (0x0e00 << 16) | (0xac70 >> 2),
790 (0x0e00 << 16) | (0xac74 >> 2),
792 (0x0e00 << 16) | (0xac78 >> 2),
794 (0x0e00 << 16) | (0xac7c >> 2),
796 (0x0e00 << 16) | (0xac80 >> 2),
798 (0x0e00 << 16) | (0xac84 >> 2),
800 (0x0e00 << 16) | (0xac88 >> 2),
802 (0x0e00 << 16) | (0xac8c >> 2),
804 (0x0e00 << 16) | (0x970c >> 2),
806 (0x0e00 << 16) | (0x9714 >> 2),
808 (0x0e00 << 16) | (0x9718 >> 2),
810 (0x0e00 << 16) | (0x971c >> 2),
812 (0x0e00 << 16) | (0x31068 >> 2),
814 (0x4e00 << 16) | (0x31068 >> 2),
816 (0x5e00 << 16) | (0x31068 >> 2),
818 (0x6e00 << 16) | (0x31068 >> 2),
820 (0x7e00 << 16) | (0x31068 >> 2),
822 (0x0e00 << 16) | (0xcd10 >> 2),
824 (0x0e00 << 16) | (0xcd14 >> 2),
826 (0x0e00 << 16) | (0x88b0 >> 2),
828 (0x0e00 << 16) | (0x88b4 >> 2),
830 (0x0e00 << 16) | (0x88b8 >> 2),
832 (0x0e00 << 16) | (0x88bc >> 2),
834 (0x0400 << 16) | (0x89c0 >> 2),
836 (0x0e00 << 16) | (0x88c4 >> 2),
838 (0x0e00 << 16) | (0x88c8 >> 2),
840 (0x0e00 << 16) | (0x88d0 >> 2),
842 (0x0e00 << 16) | (0x88d4 >> 2),
844 (0x0e00 << 16) | (0x88d8 >> 2),
846 (0x0e00 << 16) | (0x8980 >> 2),
848 (0x0e00 << 16) | (0x30938 >> 2),
850 (0x0e00 << 16) | (0x3093c >> 2),
852 (0x0e00 << 16) | (0x30940 >> 2),
854 (0x0e00 << 16) | (0x89a0 >> 2),
856 (0x0e00 << 16) | (0x30900 >> 2),
858 (0x0e00 << 16) | (0x30904 >> 2),
860 (0x0e00 << 16) | (0x89b4 >> 2),
862 (0x0e00 << 16) | (0x3e1fc >> 2),
864 (0x0e00 << 16) | (0x3c210 >> 2),
866 (0x0e00 << 16) | (0x3c214 >> 2),
868 (0x0e00 << 16) | (0x3c218 >> 2),
870 (0x0e00 << 16) | (0x8904 >> 2),
873 (0x0e00 << 16) | (0x8c28 >> 2),
874 (0x0e00 << 16) | (0x8c2c >> 2),
875 (0x0e00 << 16) | (0x8c30 >> 2),
876 (0x0e00 << 16) | (0x8c34 >> 2),
877 (0x0e00 << 16) | (0x9600 >> 2),
880 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
881 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
882 static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
883 static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
884 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
890 * gfx_v7_0_init_microcode - load ucode images from disk
892 * @adev: amdgpu_device pointer
894 * Use the firmware interface to load the ucode images into
895 * the driver (not loaded into hw).
896 * Returns 0 on success, error on failure.
898 static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
900 const char *chip_name;
906 switch (adev->asic_type) {
908 chip_name = "bonaire";
911 chip_name = "hawaii";
914 chip_name = "kaveri";
917 chip_name = "kabini";
920 chip_name = "mullins";
925 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
926 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
929 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
933 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
934 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
937 err = amdgpu_ucode_validate(adev->gfx.me_fw);
941 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
942 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
945 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
949 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
950 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
953 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
957 if (adev->asic_type == CHIP_KAVERI) {
958 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
959 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
962 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
967 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
968 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
971 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
976 "gfx7: Failed to load firmware \"%s\"\n",
978 release_firmware(adev->gfx.pfp_fw);
979 adev->gfx.pfp_fw = NULL;
980 release_firmware(adev->gfx.me_fw);
981 adev->gfx.me_fw = NULL;
982 release_firmware(adev->gfx.ce_fw);
983 adev->gfx.ce_fw = NULL;
984 release_firmware(adev->gfx.mec_fw);
985 adev->gfx.mec_fw = NULL;
986 release_firmware(adev->gfx.mec2_fw);
987 adev->gfx.mec2_fw = NULL;
988 release_firmware(adev->gfx.rlc_fw);
989 adev->gfx.rlc_fw = NULL;
994 static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
996 release_firmware(adev->gfx.pfp_fw);
997 adev->gfx.pfp_fw = NULL;
998 release_firmware(adev->gfx.me_fw);
999 adev->gfx.me_fw = NULL;
1000 release_firmware(adev->gfx.ce_fw);
1001 adev->gfx.ce_fw = NULL;
1002 release_firmware(adev->gfx.mec_fw);
1003 adev->gfx.mec_fw = NULL;
1004 release_firmware(adev->gfx.mec2_fw);
1005 adev->gfx.mec2_fw = NULL;
1006 release_firmware(adev->gfx.rlc_fw);
1007 adev->gfx.rlc_fw = NULL;
1011 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
1013 * @adev: amdgpu_device pointer
1015 * Starting with SI, the tiling setup is done globally in a
1016 * set of 32 tiling modes. Rather than selecting each set of
1017 * parameters per surface as on older asics, we just select
1018 * which index in the tiling table we want to use, and the
1019 * surface uses those parameters (CIK).
1021 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
1023 const u32 num_tile_mode_states =
1024 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
1025 const u32 num_secondary_tile_mode_states =
1026 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
1027 u32 reg_offset, split_equal_to_row_size;
1028 uint32_t *tile, *macrotile;
1030 tile = adev->gfx.config.tile_mode_array;
1031 macrotile = adev->gfx.config.macrotile_mode_array;
1033 switch (adev->gfx.config.mem_row_size_in_kb) {
1035 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
1039 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
1042 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
1046 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1047 tile[reg_offset] = 0;
1048 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1049 macrotile[reg_offset] = 0;
1051 switch (adev->asic_type) {
1053 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1054 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1055 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1056 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1057 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1058 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1059 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1060 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1061 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1062 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1063 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1064 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1065 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1066 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1067 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1068 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1069 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1070 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1071 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1072 TILE_SPLIT(split_equal_to_row_size));
1073 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1074 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1075 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1076 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1077 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1078 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1079 TILE_SPLIT(split_equal_to_row_size));
1080 tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1081 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1082 PIPE_CONFIG(ADDR_SURF_P4_16x16));
1083 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1084 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1085 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1086 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1087 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1088 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1089 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1090 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1091 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1092 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1093 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1094 tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1095 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1096 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1097 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1098 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1099 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1100 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1101 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1102 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1103 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1104 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1105 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1106 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1107 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1108 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1109 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1110 tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1111 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1112 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1113 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1114 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1115 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1116 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1117 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1118 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1119 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1120 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1121 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1122 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1123 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1124 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1125 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1126 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1127 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1128 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1129 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1130 tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1131 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1132 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1133 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1134 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1135 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1136 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1137 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1138 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1139 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1140 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1141 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1142 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1143 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1144 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1145 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1146 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1147 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1148 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1149 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1150 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1151 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1152 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1153 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1154 tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1156 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1157 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1158 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1159 NUM_BANKS(ADDR_SURF_16_BANK));
1160 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1161 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1162 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1163 NUM_BANKS(ADDR_SURF_16_BANK));
1164 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1165 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1166 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1167 NUM_BANKS(ADDR_SURF_16_BANK));
1168 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1169 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1170 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1171 NUM_BANKS(ADDR_SURF_16_BANK));
1172 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1173 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1174 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1175 NUM_BANKS(ADDR_SURF_16_BANK));
1176 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1177 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1178 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1179 NUM_BANKS(ADDR_SURF_8_BANK));
1180 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1181 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1182 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1183 NUM_BANKS(ADDR_SURF_4_BANK));
1184 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1185 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1186 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1187 NUM_BANKS(ADDR_SURF_16_BANK));
1188 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1189 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1190 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1191 NUM_BANKS(ADDR_SURF_16_BANK));
1192 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1193 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1194 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1195 NUM_BANKS(ADDR_SURF_16_BANK));
1196 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1197 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1198 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1199 NUM_BANKS(ADDR_SURF_16_BANK));
1200 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1201 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1202 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1203 NUM_BANKS(ADDR_SURF_16_BANK));
1204 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1205 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1206 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1207 NUM_BANKS(ADDR_SURF_8_BANK));
1208 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1209 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1210 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1211 NUM_BANKS(ADDR_SURF_4_BANK));
1213 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1214 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1215 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1216 if (reg_offset != 7)
1217 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1220 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1221 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1222 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1223 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1224 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1225 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1226 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1227 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1228 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1229 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1230 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1231 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1232 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1233 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1234 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1235 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1236 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1237 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1238 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1239 TILE_SPLIT(split_equal_to_row_size));
1240 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1241 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1242 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1243 TILE_SPLIT(split_equal_to_row_size));
1244 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1245 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1246 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1247 TILE_SPLIT(split_equal_to_row_size));
1248 tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1249 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1250 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1251 TILE_SPLIT(split_equal_to_row_size));
1252 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1253 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1254 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1255 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1256 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1257 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1258 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1259 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1260 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1261 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1262 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1263 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1264 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1265 tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1266 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1267 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1268 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1269 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1270 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1271 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1272 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1273 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1274 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1275 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1276 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1277 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1278 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1279 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1280 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1281 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1282 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1283 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1284 tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1285 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1286 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1287 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1288 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1289 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1290 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1291 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1292 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1293 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1294 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1295 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1296 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1297 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1298 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1299 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1300 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1301 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1302 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1303 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1304 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1305 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1306 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1307 tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1308 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1309 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1310 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1311 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1312 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1313 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1314 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1315 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1316 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1317 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1318 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1319 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1320 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1321 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1322 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1323 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1324 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1325 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1326 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1327 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1328 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1329 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1330 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1331 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1332 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1333 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1334 tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1335 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1336 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1337 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1339 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1340 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1341 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1342 NUM_BANKS(ADDR_SURF_16_BANK));
1343 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1344 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1345 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1346 NUM_BANKS(ADDR_SURF_16_BANK));
1347 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1348 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1349 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1350 NUM_BANKS(ADDR_SURF_16_BANK));
1351 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1352 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1353 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1354 NUM_BANKS(ADDR_SURF_16_BANK));
1355 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1356 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1357 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1358 NUM_BANKS(ADDR_SURF_8_BANK));
1359 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1360 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1361 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1362 NUM_BANKS(ADDR_SURF_4_BANK));
1363 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1364 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1365 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1366 NUM_BANKS(ADDR_SURF_4_BANK));
1367 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1368 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1369 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1370 NUM_BANKS(ADDR_SURF_16_BANK));
1371 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1372 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1373 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1374 NUM_BANKS(ADDR_SURF_16_BANK));
1375 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1376 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1377 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1378 NUM_BANKS(ADDR_SURF_16_BANK));
1379 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1380 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1381 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1382 NUM_BANKS(ADDR_SURF_8_BANK));
1383 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1384 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1385 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1386 NUM_BANKS(ADDR_SURF_16_BANK));
1387 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1388 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1389 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1390 NUM_BANKS(ADDR_SURF_8_BANK));
1391 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1392 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1393 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1394 NUM_BANKS(ADDR_SURF_4_BANK));
1396 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1397 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1398 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1399 if (reg_offset != 7)
1400 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1406 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1407 PIPE_CONFIG(ADDR_SURF_P2) |
1408 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1409 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1410 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1411 PIPE_CONFIG(ADDR_SURF_P2) |
1412 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1413 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1414 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1415 PIPE_CONFIG(ADDR_SURF_P2) |
1416 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1417 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1418 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1419 PIPE_CONFIG(ADDR_SURF_P2) |
1420 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1421 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1422 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1423 PIPE_CONFIG(ADDR_SURF_P2) |
1424 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1425 TILE_SPLIT(split_equal_to_row_size));
1426 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1427 PIPE_CONFIG(ADDR_SURF_P2) |
1428 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1429 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1430 PIPE_CONFIG(ADDR_SURF_P2) |
1431 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1432 TILE_SPLIT(split_equal_to_row_size));
1433 tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1434 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1435 PIPE_CONFIG(ADDR_SURF_P2));
1436 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1437 PIPE_CONFIG(ADDR_SURF_P2) |
1438 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1439 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1440 PIPE_CONFIG(ADDR_SURF_P2) |
1441 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1442 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1443 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1444 PIPE_CONFIG(ADDR_SURF_P2) |
1445 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1446 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1447 tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1448 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1449 PIPE_CONFIG(ADDR_SURF_P2) |
1450 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1451 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1452 PIPE_CONFIG(ADDR_SURF_P2) |
1453 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1454 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1455 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1456 PIPE_CONFIG(ADDR_SURF_P2) |
1457 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1458 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1459 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1460 PIPE_CONFIG(ADDR_SURF_P2) |
1461 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1462 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1463 tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1464 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1465 PIPE_CONFIG(ADDR_SURF_P2) |
1466 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1467 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1468 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1469 PIPE_CONFIG(ADDR_SURF_P2) |
1470 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1471 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1472 PIPE_CONFIG(ADDR_SURF_P2) |
1473 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1474 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1475 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1476 PIPE_CONFIG(ADDR_SURF_P2) |
1477 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1478 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1479 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1480 PIPE_CONFIG(ADDR_SURF_P2) |
1481 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1482 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1483 tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1484 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1485 PIPE_CONFIG(ADDR_SURF_P2) |
1486 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1487 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1488 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1489 PIPE_CONFIG(ADDR_SURF_P2) |
1490 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1491 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1492 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1493 PIPE_CONFIG(ADDR_SURF_P2) |
1494 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1495 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1496 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1497 PIPE_CONFIG(ADDR_SURF_P2) |
1498 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1499 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1500 PIPE_CONFIG(ADDR_SURF_P2) |
1501 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1502 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1503 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1504 PIPE_CONFIG(ADDR_SURF_P2) |
1505 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1506 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1507 tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1509 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1510 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1511 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1512 NUM_BANKS(ADDR_SURF_8_BANK));
1513 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1514 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1515 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1516 NUM_BANKS(ADDR_SURF_8_BANK));
1517 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1518 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1519 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1520 NUM_BANKS(ADDR_SURF_8_BANK));
1521 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1522 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1523 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1524 NUM_BANKS(ADDR_SURF_8_BANK));
1525 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1526 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1527 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1528 NUM_BANKS(ADDR_SURF_8_BANK));
1529 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1530 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1531 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1532 NUM_BANKS(ADDR_SURF_8_BANK));
1533 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1534 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1535 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1536 NUM_BANKS(ADDR_SURF_8_BANK));
1537 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1538 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1539 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1540 NUM_BANKS(ADDR_SURF_16_BANK));
1541 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1542 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1543 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1544 NUM_BANKS(ADDR_SURF_16_BANK));
1545 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1546 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1547 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1548 NUM_BANKS(ADDR_SURF_16_BANK));
1549 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1550 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1551 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1552 NUM_BANKS(ADDR_SURF_16_BANK));
1553 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1554 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1555 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1556 NUM_BANKS(ADDR_SURF_16_BANK));
1557 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1558 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1559 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1560 NUM_BANKS(ADDR_SURF_16_BANK));
1561 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1562 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1563 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1564 NUM_BANKS(ADDR_SURF_8_BANK));
1566 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1567 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1568 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1569 if (reg_offset != 7)
1570 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1576 * gfx_v7_0_select_se_sh - select which SE, SH to address
1578 * @adev: amdgpu_device pointer
1579 * @se_num: shader engine to address
1580 * @sh_num: sh block to address
1582 * Select which SE, SH combinations to address. Certain
1583 * registers are instanced per SE or SH. 0xffffffff means
1584 * broadcast to all SEs or SHs (CIK).
1586 static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
1587 u32 se_num, u32 sh_num, u32 instance)
1591 if (instance == 0xffffffff)
1592 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1594 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1596 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1597 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1598 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
1599 else if (se_num == 0xffffffff)
1600 data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
1601 (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
1602 else if (sh_num == 0xffffffff)
1603 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1604 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1606 data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
1607 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1608 WREG32(mmGRBM_GFX_INDEX, data);
1612 * gfx_v7_0_create_bitmask - create a bitmask
1614 * @bit_width: length of the mask
1616 * create a variable length bit mask (CIK).
1617 * Returns the bitmask.
1619 static u32 gfx_v7_0_create_bitmask(u32 bit_width)
1621 return (u32)((1ULL << bit_width) - 1);
1625 * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
1627 * @adev: amdgpu_device pointer
1629 * Calculates the bitmask of enabled RBs (CIK).
1630 * Returns the enabled RB bitmask.
1632 static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1636 data = RREG32(mmCC_RB_BACKEND_DISABLE);
1637 data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1639 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1640 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1642 mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se /
1643 adev->gfx.config.max_sh_per_se);
1645 return (~data) & mask;
1649 gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
1651 switch (adev->asic_type) {
1653 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
1654 SE_XSEL(1) | SE_YSEL(1);
1658 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
1659 RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
1660 PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
1662 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
1666 *rconf |= RB_MAP_PKR0(2);
1675 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1681 gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
1682 u32 raster_config, u32 raster_config_1,
1683 unsigned rb_mask, unsigned num_rb)
1685 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
1686 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
1687 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
1688 unsigned rb_per_se = num_rb / num_se;
1689 unsigned se_mask[4];
1692 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1693 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1694 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1695 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1697 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
1698 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
1699 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
1701 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
1702 (!se_mask[2] && !se_mask[3]))) {
1703 raster_config_1 &= ~SE_PAIR_MAP_MASK;
1705 if (!se_mask[0] && !se_mask[1]) {
1707 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
1710 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
1714 for (se = 0; se < num_se; se++) {
1715 unsigned raster_config_se = raster_config;
1716 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1717 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1718 int idx = (se / 2) * 2;
1720 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1721 raster_config_se &= ~SE_MAP_MASK;
1723 if (!se_mask[idx]) {
1724 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
1726 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
1730 pkr0_mask &= rb_mask;
1731 pkr1_mask &= rb_mask;
1732 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1733 raster_config_se &= ~PKR_MAP_MASK;
1736 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
1738 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
1742 if (rb_per_se >= 2) {
1743 unsigned rb0_mask = 1 << (se * rb_per_se);
1744 unsigned rb1_mask = rb0_mask << 1;
1746 rb0_mask &= rb_mask;
1747 rb1_mask &= rb_mask;
1748 if (!rb0_mask || !rb1_mask) {
1749 raster_config_se &= ~RB_MAP_PKR0_MASK;
1753 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
1756 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
1760 if (rb_per_se > 2) {
1761 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1762 rb1_mask = rb0_mask << 1;
1763 rb0_mask &= rb_mask;
1764 rb1_mask &= rb_mask;
1765 if (!rb0_mask || !rb1_mask) {
1766 raster_config_se &= ~RB_MAP_PKR1_MASK;
1770 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
1773 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
1779 /* GRBM_GFX_INDEX has a different offset on CI+ */
1780 gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
1781 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
1782 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1785 /* GRBM_GFX_INDEX has a different offset on CI+ */
1786 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1790 * gfx_v7_0_setup_rb - setup the RBs on the asic
1792 * @adev: amdgpu_device pointer
1793 * @se_num: number of SEs (shader engines) for the asic
1794 * @sh_per_se: number of SH blocks per SE for the asic
1796 * Configures per-SE/SH RB registers (CIK).
1798 static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1802 u32 raster_config = 0, raster_config_1 = 0;
1804 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1805 adev->gfx.config.max_sh_per_se;
1806 unsigned num_rb_pipes;
1808 mutex_lock(&adev->grbm_idx_mutex);
1809 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1810 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1811 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
1812 data = gfx_v7_0_get_rb_active_bitmap(adev);
1813 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1814 rb_bitmap_width_per_sh);
1817 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1819 adev->gfx.config.backend_enable_mask = active_rbs;
1820 adev->gfx.config.num_rbs = hweight32(active_rbs);
1822 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1823 adev->gfx.config.max_shader_engines, 16);
1825 gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
1827 if (!adev->gfx.config.backend_enable_mask ||
1828 adev->gfx.config.num_rbs >= num_rb_pipes) {
1829 WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
1830 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1832 gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
1833 adev->gfx.config.backend_enable_mask,
1836 mutex_unlock(&adev->grbm_idx_mutex);
1840 * gmc_v7_0_init_compute_vmid - gart enable
1842 * @rdev: amdgpu_device pointer
1844 * Initialize compute vmid sh_mem registers
1847 #define DEFAULT_SH_MEM_BASES (0x6000)
1848 #define FIRST_COMPUTE_VMID (8)
1849 #define LAST_COMPUTE_VMID (16)
1850 static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
1853 uint32_t sh_mem_config;
1854 uint32_t sh_mem_bases;
1857 * Configure apertures:
1858 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1859 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1860 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1862 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1863 sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1864 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1865 sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
1866 mutex_lock(&adev->srbm_mutex);
1867 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1868 cik_srbm_select(adev, 0, 0, 0, i);
1869 /* CP and shaders */
1870 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
1871 WREG32(mmSH_MEM_APE1_BASE, 1);
1872 WREG32(mmSH_MEM_APE1_LIMIT, 0);
1873 WREG32(mmSH_MEM_BASES, sh_mem_bases);
1875 cik_srbm_select(adev, 0, 0, 0, 0);
1876 mutex_unlock(&adev->srbm_mutex);
1880 * gfx_v7_0_gpu_init - setup the 3D engine
1882 * @adev: amdgpu_device pointer
1884 * Configures the 3D engine and tiling configuration
1885 * registers so that the 3D engine is usable.
1887 static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
1889 u32 tmp, sh_mem_cfg;
1892 WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
1894 WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1895 WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1896 WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
1898 gfx_v7_0_tiling_mode_table_init(adev);
1900 gfx_v7_0_setup_rb(adev);
1901 gfx_v7_0_get_cu_info(adev);
1903 /* set HW defaults for 3D engine */
1904 WREG32(mmCP_MEQ_THRESHOLDS,
1905 (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
1906 (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
1908 mutex_lock(&adev->grbm_idx_mutex);
1910 * making sure that the following register writes will be broadcasted
1911 * to all the shaders
1913 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1915 /* XXX SH_MEM regs */
1916 /* where to put LDS, scratch, GPUVM in FSA64 space */
1917 sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1918 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1920 mutex_lock(&adev->srbm_mutex);
1921 for (i = 0; i < 16; i++) {
1922 cik_srbm_select(adev, 0, 0, 0, i);
1923 /* CP and shaders */
1924 WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
1925 WREG32(mmSH_MEM_APE1_BASE, 1);
1926 WREG32(mmSH_MEM_APE1_LIMIT, 0);
1927 WREG32(mmSH_MEM_BASES, 0);
1929 cik_srbm_select(adev, 0, 0, 0, 0);
1930 mutex_unlock(&adev->srbm_mutex);
1932 gmc_v7_0_init_compute_vmid(adev);
1934 WREG32(mmSX_DEBUG_1, 0x20);
1936 WREG32(mmTA_CNTL_AUX, 0x00010000);
1938 tmp = RREG32(mmSPI_CONFIG_CNTL);
1940 WREG32(mmSPI_CONFIG_CNTL, tmp);
1942 WREG32(mmSQ_CONFIG, 1);
1944 WREG32(mmDB_DEBUG, 0);
1946 tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
1948 WREG32(mmDB_DEBUG2, tmp);
1950 tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
1952 WREG32(mmDB_DEBUG3, tmp);
1954 tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000;
1956 WREG32(mmCB_HW_CONTROL, tmp);
1958 WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
1960 WREG32(mmPA_SC_FIFO_SIZE,
1961 ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1962 (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1963 (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1964 (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
1966 WREG32(mmVGT_NUM_INSTANCES, 1);
1968 WREG32(mmCP_PERFMON_CNTL, 0);
1970 WREG32(mmSQ_CONFIG, 0);
1972 WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS,
1973 ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
1974 (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
1976 WREG32(mmVGT_CACHE_INVALIDATION,
1977 (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
1978 (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
1980 WREG32(mmVGT_GS_VERTEX_REUSE, 16);
1981 WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
1983 WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
1984 (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
1985 WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
1986 mutex_unlock(&adev->grbm_idx_mutex);
1992 * GPU scratch registers helpers function.
1995 * gfx_v7_0_scratch_init - setup driver info for CP scratch regs
1997 * @adev: amdgpu_device pointer
1999 * Set up the number and offset of the CP scratch registers.
2000 * NOTE: use of CP scratch registers is a legacy inferface and
2001 * is not used by default on newer asics (r6xx+). On newer asics,
2002 * memory buffers are used for fences rather than scratch regs.
2004 static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
2008 adev->gfx.scratch.num_reg = 7;
2009 adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
2010 for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
2011 adev->gfx.scratch.free[i] = true;
2012 adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
2017 * gfx_v7_0_ring_test_ring - basic gfx ring test
2019 * @adev: amdgpu_device pointer
2020 * @ring: amdgpu_ring structure holding ring information
2022 * Allocate a scratch register and write to it using the gfx ring (CIK).
2023 * Provides a basic gfx ring test to verify that the ring is working.
2024 * Used by gfx_v7_0_cp_gfx_resume();
2025 * Returns 0 on success, error on failure.
2027 static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
2029 struct amdgpu_device *adev = ring->adev;
2035 r = amdgpu_gfx_scratch_get(adev, &scratch);
2037 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
2040 WREG32(scratch, 0xCAFEDEAD);
2041 r = amdgpu_ring_alloc(ring, 3);
2043 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
2044 amdgpu_gfx_scratch_free(adev, scratch);
2047 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2048 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2049 amdgpu_ring_write(ring, 0xDEADBEEF);
2050 amdgpu_ring_commit(ring);
2052 for (i = 0; i < adev->usec_timeout; i++) {
2053 tmp = RREG32(scratch);
2054 if (tmp == 0xDEADBEEF)
2058 if (i < adev->usec_timeout) {
2059 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2061 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2062 ring->idx, scratch, tmp);
2065 amdgpu_gfx_scratch_free(adev, scratch);
2070 * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
2072 * @adev: amdgpu_device pointer
2073 * @ridx: amdgpu ring index
2075 * Emits an hdp flush on the cp.
2077 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2080 int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
2082 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) {
2085 ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
2088 ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
2094 ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
2097 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2098 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
2099 WAIT_REG_MEM_FUNCTION(3) | /* == */
2100 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
2101 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
2102 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
2103 amdgpu_ring_write(ring, ref_and_mask);
2104 amdgpu_ring_write(ring, ref_and_mask);
2105 amdgpu_ring_write(ring, 0x20); /* poll interval */
2109 * gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
2111 * @adev: amdgpu_device pointer
2112 * @ridx: amdgpu ring index
2114 * Emits an hdp invalidate on the cp.
2116 static void gfx_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
2118 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2119 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2120 WRITE_DATA_DST_SEL(0) |
2122 amdgpu_ring_write(ring, mmHDP_DEBUG0);
2123 amdgpu_ring_write(ring, 0);
2124 amdgpu_ring_write(ring, 1);
2128 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
2130 * @adev: amdgpu_device pointer
2131 * @fence: amdgpu fence object
2133 * Emits a fence sequnce number on the gfx ring and flushes
2136 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
2137 u64 seq, unsigned flags)
2139 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2140 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2141 /* Workaround for cache flush problems. First send a dummy EOP
2142 * event down the pipe with seq one below.
2144 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2145 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2147 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2149 amdgpu_ring_write(ring, addr & 0xfffffffc);
2150 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2151 DATA_SEL(1) | INT_SEL(0));
2152 amdgpu_ring_write(ring, lower_32_bits(seq - 1));
2153 amdgpu_ring_write(ring, upper_32_bits(seq - 1));
2155 /* Then send the real EOP event down the pipe. */
2156 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2157 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2159 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2161 amdgpu_ring_write(ring, addr & 0xfffffffc);
2162 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2163 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2164 amdgpu_ring_write(ring, lower_32_bits(seq));
2165 amdgpu_ring_write(ring, upper_32_bits(seq));
2169 * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
2171 * @adev: amdgpu_device pointer
2172 * @fence: amdgpu fence object
2174 * Emits a fence sequnce number on the compute ring and flushes
2177 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
2181 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2182 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2184 /* RELEASE_MEM - flush caches, send int */
2185 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
2186 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2188 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2190 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2191 amdgpu_ring_write(ring, addr & 0xfffffffc);
2192 amdgpu_ring_write(ring, upper_32_bits(addr));
2193 amdgpu_ring_write(ring, lower_32_bits(seq));
2194 amdgpu_ring_write(ring, upper_32_bits(seq));
2201 * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
2203 * @ring: amdgpu_ring structure holding ring information
2204 * @ib: amdgpu indirect buffer object
2206 * Emits an DE (drawing engine) or CE (constant engine) IB
2207 * on the gfx ring. IBs are usually generated by userspace
2208 * acceleration drivers and submitted to the kernel for
2209 * sheduling on the ring. This function schedules the IB
2210 * on the gfx ring for execution by the GPU.
2212 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2213 struct amdgpu_ib *ib,
2214 unsigned vm_id, bool ctx_switch)
2216 u32 header, control = 0;
2218 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2220 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2221 amdgpu_ring_write(ring, 0);
2224 if (ib->flags & AMDGPU_IB_FLAG_CE)
2225 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
2227 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2229 control |= ib->length_dw | (vm_id << 24);
2231 amdgpu_ring_write(ring, header);
2232 amdgpu_ring_write(ring,
2236 (ib->gpu_addr & 0xFFFFFFFC));
2237 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2238 amdgpu_ring_write(ring, control);
2241 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2242 struct amdgpu_ib *ib,
2243 unsigned vm_id, bool ctx_switch)
2245 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
2247 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2248 amdgpu_ring_write(ring,
2252 (ib->gpu_addr & 0xFFFFFFFC));
2253 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2254 amdgpu_ring_write(ring, control);
2257 static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
2261 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
2262 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2263 /* set load_global_config & load_global_uconfig */
2265 /* set load_cs_sh_regs */
2267 /* set load_per_context_state & load_gfx_sh_regs */
2271 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2272 amdgpu_ring_write(ring, dw2);
2273 amdgpu_ring_write(ring, 0);
2277 * gfx_v7_0_ring_test_ib - basic ring IB test
2279 * @ring: amdgpu_ring structure holding ring information
2281 * Allocate an IB and execute it on the gfx ring (CIK).
2282 * Provides a basic gfx ring test to verify that IBs are working.
2283 * Returns 0 on success, error on failure.
2285 static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2287 struct amdgpu_device *adev = ring->adev;
2288 struct amdgpu_ib ib;
2289 struct fence *f = NULL;
2294 r = amdgpu_gfx_scratch_get(adev, &scratch);
2296 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
2299 WREG32(scratch, 0xCAFEDEAD);
2300 memset(&ib, 0, sizeof(ib));
2301 r = amdgpu_ib_get(adev, NULL, 256, &ib);
2303 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
2306 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2307 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
2308 ib.ptr[2] = 0xDEADBEEF;
2311 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
2315 r = fence_wait_timeout(f, false, timeout);
2317 DRM_ERROR("amdgpu: IB test timed out\n");
2321 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
2324 tmp = RREG32(scratch);
2325 if (tmp == 0xDEADBEEF) {
2326 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
2329 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
2335 amdgpu_ib_free(adev, &ib, NULL);
2338 amdgpu_gfx_scratch_free(adev, scratch);
2344 * On CIK, gfx and compute now have independant command processors.
2347 * Gfx consists of a single ring and can process both gfx jobs and
2348 * compute jobs. The gfx CP consists of three microengines (ME):
2349 * PFP - Pre-Fetch Parser
2351 * CE - Constant Engine
2352 * The PFP and ME make up what is considered the Drawing Engine (DE).
2353 * The CE is an asynchronous engine used for updating buffer desciptors
2354 * used by the DE so that they can be loaded into cache in parallel
2355 * while the DE is processing state update packets.
2358 * The compute CP consists of two microengines (ME):
2359 * MEC1 - Compute MicroEngine 1
2360 * MEC2 - Compute MicroEngine 2
2361 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2362 * The queues are exposed to userspace and are programmed directly
2363 * by the compute runtime.
2366 * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
2368 * @adev: amdgpu_device pointer
2369 * @enable: enable or disable the MEs
2371 * Halts or unhalts the gfx MEs.
2373 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2378 WREG32(mmCP_ME_CNTL, 0);
2380 WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
2381 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2382 adev->gfx.gfx_ring[i].ready = false;
2388 * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
2390 * @adev: amdgpu_device pointer
2392 * Loads the gfx PFP, ME, and CE ucode.
2393 * Returns 0 for success, -EINVAL if the ucode is not available.
2395 static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2397 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2398 const struct gfx_firmware_header_v1_0 *ce_hdr;
2399 const struct gfx_firmware_header_v1_0 *me_hdr;
2400 const __le32 *fw_data;
2401 unsigned i, fw_size;
2403 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2406 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2407 ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2408 me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2410 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2411 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2412 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2413 adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2414 adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2415 adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2416 adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2417 adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2418 adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2420 gfx_v7_0_cp_gfx_enable(adev, false);
2423 fw_data = (const __le32 *)
2424 (adev->gfx.pfp_fw->data +
2425 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2426 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2427 WREG32(mmCP_PFP_UCODE_ADDR, 0);
2428 for (i = 0; i < fw_size; i++)
2429 WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2430 WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2433 fw_data = (const __le32 *)
2434 (adev->gfx.ce_fw->data +
2435 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2436 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2437 WREG32(mmCP_CE_UCODE_ADDR, 0);
2438 for (i = 0; i < fw_size; i++)
2439 WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2440 WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2443 fw_data = (const __le32 *)
2444 (adev->gfx.me_fw->data +
2445 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2446 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2447 WREG32(mmCP_ME_RAM_WADDR, 0);
2448 for (i = 0; i < fw_size; i++)
2449 WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2450 WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2456 * gfx_v7_0_cp_gfx_start - start the gfx ring
2458 * @adev: amdgpu_device pointer
2460 * Enables the ring and loads the clear state context and other
2461 * packets required to init the ring.
2462 * Returns 0 for success, error for failure.
2464 static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
2466 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2467 const struct cs_section_def *sect = NULL;
2468 const struct cs_extent_def *ext = NULL;
2472 WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2473 WREG32(mmCP_ENDIAN_SWAP, 0);
2474 WREG32(mmCP_DEVICE_ID, 1);
2476 gfx_v7_0_cp_gfx_enable(adev, true);
2478 r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
2480 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2484 /* init the CE partitions. CE only used for gfx on CIK */
2485 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2486 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2487 amdgpu_ring_write(ring, 0x8000);
2488 amdgpu_ring_write(ring, 0x8000);
2490 /* clear state buffer */
2491 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2492 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2494 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2495 amdgpu_ring_write(ring, 0x80000000);
2496 amdgpu_ring_write(ring, 0x80000000);
2498 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2499 for (ext = sect->section; ext->extent != NULL; ++ext) {
2500 if (sect->id == SECT_CONTEXT) {
2501 amdgpu_ring_write(ring,
2502 PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2503 amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2504 for (i = 0; i < ext->reg_count; i++)
2505 amdgpu_ring_write(ring, ext->extent[i]);
2510 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2511 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2512 switch (adev->asic_type) {
2514 amdgpu_ring_write(ring, 0x16000012);
2515 amdgpu_ring_write(ring, 0x00000000);
2518 amdgpu_ring_write(ring, 0x00000000); /* XXX */
2519 amdgpu_ring_write(ring, 0x00000000);
2523 amdgpu_ring_write(ring, 0x00000000); /* XXX */
2524 amdgpu_ring_write(ring, 0x00000000);
2527 amdgpu_ring_write(ring, 0x3a00161a);
2528 amdgpu_ring_write(ring, 0x0000002e);
2531 amdgpu_ring_write(ring, 0x00000000);
2532 amdgpu_ring_write(ring, 0x00000000);
2536 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2537 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2539 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2540 amdgpu_ring_write(ring, 0);
2542 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2543 amdgpu_ring_write(ring, 0x00000316);
2544 amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2545 amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2547 amdgpu_ring_commit(ring);
2553 * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
2555 * @adev: amdgpu_device pointer
2557 * Program the location and size of the gfx ring buffer
2558 * and test it to make sure it's working.
2559 * Returns 0 for success, error for failure.
2561 static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
2563 struct amdgpu_ring *ring;
2566 u64 rb_addr, rptr_addr;
2569 WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
2570 if (adev->asic_type != CHIP_HAWAII)
2571 WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2573 /* Set the write pointer delay */
2574 WREG32(mmCP_RB_WPTR_DELAY, 0);
2576 /* set the RB to use vmid 0 */
2577 WREG32(mmCP_RB_VMID, 0);
2579 WREG32(mmSCRATCH_ADDR, 0);
2581 /* ring 0 - compute and gfx */
2582 /* Set ring buffer size */
2583 ring = &adev->gfx.gfx_ring[0];
2584 rb_bufsz = order_base_2(ring->ring_size / 8);
2585 tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2587 tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
2589 WREG32(mmCP_RB0_CNTL, tmp);
2591 /* Initialize the ring buffer's read and write pointers */
2592 WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
2594 WREG32(mmCP_RB0_WPTR, ring->wptr);
2596 /* set the wb address wether it's enabled or not */
2597 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2598 WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2599 WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2601 /* scratch register shadowing is no longer supported */
2602 WREG32(mmSCRATCH_UMSK, 0);
2605 WREG32(mmCP_RB0_CNTL, tmp);
2607 rb_addr = ring->gpu_addr >> 8;
2608 WREG32(mmCP_RB0_BASE, rb_addr);
2609 WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2611 /* start the ring */
2612 gfx_v7_0_cp_gfx_start(adev);
2614 r = amdgpu_ring_test_ring(ring);
2616 ring->ready = false;
2623 static u32 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
2625 return ring->adev->wb.wb[ring->rptr_offs];
2628 static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
2630 struct amdgpu_device *adev = ring->adev;
2632 return RREG32(mmCP_RB0_WPTR);
2635 static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
2637 struct amdgpu_device *adev = ring->adev;
2639 WREG32(mmCP_RB0_WPTR, ring->wptr);
2640 (void)RREG32(mmCP_RB0_WPTR);
2643 static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
2645 /* XXX check if swapping is necessary on BE */
2646 return ring->adev->wb.wb[ring->wptr_offs];
2649 static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
2651 struct amdgpu_device *adev = ring->adev;
2653 /* XXX check if swapping is necessary on BE */
2654 adev->wb.wb[ring->wptr_offs] = ring->wptr;
2655 WDOORBELL32(ring->doorbell_index, ring->wptr);
2659 * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
2661 * @adev: amdgpu_device pointer
2662 * @enable: enable or disable the MEs
2664 * Halts or unhalts the compute MEs.
2666 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2671 WREG32(mmCP_MEC_CNTL, 0);
2673 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2674 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2675 adev->gfx.compute_ring[i].ready = false;
2681 * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
2683 * @adev: amdgpu_device pointer
2685 * Loads the compute MEC1&2 ucode.
2686 * Returns 0 for success, -EINVAL if the ucode is not available.
2688 static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2690 const struct gfx_firmware_header_v1_0 *mec_hdr;
2691 const __le32 *fw_data;
2692 unsigned i, fw_size;
2694 if (!adev->gfx.mec_fw)
2697 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2698 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2699 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2700 adev->gfx.mec_feature_version = le32_to_cpu(
2701 mec_hdr->ucode_feature_version);
2703 gfx_v7_0_cp_compute_enable(adev, false);
2706 fw_data = (const __le32 *)
2707 (adev->gfx.mec_fw->data +
2708 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2709 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
2710 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2711 for (i = 0; i < fw_size; i++)
2712 WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
2713 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2715 if (adev->asic_type == CHIP_KAVERI) {
2716 const struct gfx_firmware_header_v1_0 *mec2_hdr;
2718 if (!adev->gfx.mec2_fw)
2721 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2722 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2723 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2724 adev->gfx.mec2_feature_version = le32_to_cpu(
2725 mec2_hdr->ucode_feature_version);
2728 fw_data = (const __le32 *)
2729 (adev->gfx.mec2_fw->data +
2730 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
2731 fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
2732 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2733 for (i = 0; i < fw_size; i++)
2734 WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
2735 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2742 * gfx_v7_0_cp_compute_fini - stop the compute queues
2744 * @adev: amdgpu_device pointer
2746 * Stop the compute queues and tear down the driver queue
2749 static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
2753 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2754 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2756 if (ring->mqd_obj) {
2757 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2758 if (unlikely(r != 0))
2759 dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
2761 amdgpu_bo_unpin(ring->mqd_obj);
2762 amdgpu_bo_unreserve(ring->mqd_obj);
2764 amdgpu_bo_unref(&ring->mqd_obj);
2765 ring->mqd_obj = NULL;
2770 static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
2774 if (adev->gfx.mec.hpd_eop_obj) {
2775 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
2776 if (unlikely(r != 0))
2777 dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
2778 amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
2779 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2781 amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
2782 adev->gfx.mec.hpd_eop_obj = NULL;
2786 #define MEC_HPD_SIZE 2048
2788 static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
2794 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
2795 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
2796 * Nonetheless, we assign only 1 pipe because all other pipes will
2799 adev->gfx.mec.num_mec = 1;
2800 adev->gfx.mec.num_pipe = 1;
2801 adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
2803 if (adev->gfx.mec.hpd_eop_obj == NULL) {
2804 r = amdgpu_bo_create(adev,
2805 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
2807 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
2808 &adev->gfx.mec.hpd_eop_obj);
2810 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
2815 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
2816 if (unlikely(r != 0)) {
2817 gfx_v7_0_mec_fini(adev);
2820 r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
2821 &adev->gfx.mec.hpd_eop_gpu_addr);
2823 dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
2824 gfx_v7_0_mec_fini(adev);
2827 r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
2829 dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
2830 gfx_v7_0_mec_fini(adev);
2834 /* clear memory. Not sure if this is required or not */
2835 memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2);
2837 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
2838 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2843 struct hqd_registers
2845 u32 cp_mqd_base_addr;
2846 u32 cp_mqd_base_addr_hi;
2849 u32 cp_hqd_persistent_state;
2850 u32 cp_hqd_pipe_priority;
2851 u32 cp_hqd_queue_priority;
2854 u32 cp_hqd_pq_base_hi;
2856 u32 cp_hqd_pq_rptr_report_addr;
2857 u32 cp_hqd_pq_rptr_report_addr_hi;
2858 u32 cp_hqd_pq_wptr_poll_addr;
2859 u32 cp_hqd_pq_wptr_poll_addr_hi;
2860 u32 cp_hqd_pq_doorbell_control;
2862 u32 cp_hqd_pq_control;
2863 u32 cp_hqd_ib_base_addr;
2864 u32 cp_hqd_ib_base_addr_hi;
2866 u32 cp_hqd_ib_control;
2867 u32 cp_hqd_iq_timer;
2869 u32 cp_hqd_dequeue_request;
2870 u32 cp_hqd_dma_offload;
2871 u32 cp_hqd_sema_cmd;
2872 u32 cp_hqd_msg_type;
2873 u32 cp_hqd_atomic0_preop_lo;
2874 u32 cp_hqd_atomic0_preop_hi;
2875 u32 cp_hqd_atomic1_preop_lo;
2876 u32 cp_hqd_atomic1_preop_hi;
2877 u32 cp_hqd_hq_scheduler0;
2878 u32 cp_hqd_hq_scheduler1;
2885 u32 dispatch_initiator;
2889 u32 pipeline_stat_enable;
2890 u32 perf_counter_enable;
2896 u32 resource_limits;
2897 u32 static_thread_mgmt01[2];
2899 u32 static_thread_mgmt23[2];
2901 u32 thread_trace_enable;
2904 u32 vgtcs_invoke_count[2];
2905 struct hqd_registers queue_state;
2907 u32 interrupt_queue[64];
2911 * gfx_v7_0_cp_compute_resume - setup the compute queue registers
2913 * @adev: amdgpu_device pointer
2915 * Program the compute queues and test them to make sure they
2917 * Returns 0 for success, error for failure.
2919 static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
2923 bool use_doorbell = true;
2929 struct bonaire_mqd *mqd;
2930 struct amdgpu_ring *ring;
2932 /* fix up chicken bits */
2933 tmp = RREG32(mmCP_CPF_DEBUG);
2935 WREG32(mmCP_CPF_DEBUG, tmp);
2937 /* init the pipes */
2938 mutex_lock(&adev->srbm_mutex);
2939 for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
2940 int me = (i < 4) ? 1 : 2;
2941 int pipe = (i < 4) ? i : (i - 4);
2943 eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
2945 cik_srbm_select(adev, me, pipe, 0, 0);
2947 /* write the EOP addr */
2948 WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
2949 WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
2951 /* set the VMID assigned */
2952 WREG32(mmCP_HPD_EOP_VMID, 0);
2954 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2955 tmp = RREG32(mmCP_HPD_EOP_CONTROL);
2956 tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
2957 tmp |= order_base_2(MEC_HPD_SIZE / 8);
2958 WREG32(mmCP_HPD_EOP_CONTROL, tmp);
2960 cik_srbm_select(adev, 0, 0, 0, 0);
2961 mutex_unlock(&adev->srbm_mutex);
2963 /* init the queues. Just two for now. */
2964 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2965 ring = &adev->gfx.compute_ring[i];
2967 if (ring->mqd_obj == NULL) {
2968 r = amdgpu_bo_create(adev,
2969 sizeof(struct bonaire_mqd),
2971 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
2974 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
2979 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2980 if (unlikely(r != 0)) {
2981 gfx_v7_0_cp_compute_fini(adev);
2984 r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
2987 dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
2988 gfx_v7_0_cp_compute_fini(adev);
2991 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
2993 dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
2994 gfx_v7_0_cp_compute_fini(adev);
2998 /* init the mqd struct */
2999 memset(buf, 0, sizeof(struct bonaire_mqd));
3001 mqd = (struct bonaire_mqd *)buf;
3002 mqd->header = 0xC0310800;
3003 mqd->static_thread_mgmt01[0] = 0xffffffff;
3004 mqd->static_thread_mgmt01[1] = 0xffffffff;
3005 mqd->static_thread_mgmt23[0] = 0xffffffff;
3006 mqd->static_thread_mgmt23[1] = 0xffffffff;
3008 mutex_lock(&adev->srbm_mutex);
3009 cik_srbm_select(adev, ring->me,
3013 /* disable wptr polling */
3014 tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
3015 tmp &= ~CP_PQ_WPTR_POLL_CNTL__EN_MASK;
3016 WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
3018 /* enable doorbell? */
3019 mqd->queue_state.cp_hqd_pq_doorbell_control =
3020 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3022 mqd->queue_state.cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3024 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3025 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
3026 mqd->queue_state.cp_hqd_pq_doorbell_control);
3028 /* disable the queue if it's active */
3029 mqd->queue_state.cp_hqd_dequeue_request = 0;
3030 mqd->queue_state.cp_hqd_pq_rptr = 0;
3031 mqd->queue_state.cp_hqd_pq_wptr= 0;
3032 if (RREG32(mmCP_HQD_ACTIVE) & 1) {
3033 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
3034 for (j = 0; j < adev->usec_timeout; j++) {
3035 if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
3039 WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
3040 WREG32(mmCP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
3041 WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
3044 /* set the pointer to the MQD */
3045 mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
3046 mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
3047 WREG32(mmCP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
3048 WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
3049 /* set MQD vmid to 0 */
3050 mqd->queue_state.cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
3051 mqd->queue_state.cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
3052 WREG32(mmCP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
3054 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3055 hqd_gpu_addr = ring->gpu_addr >> 8;
3056 mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
3057 mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3058 WREG32(mmCP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
3059 WREG32(mmCP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
3061 /* set up the HQD, this is similar to CP_RB0_CNTL */
3062 mqd->queue_state.cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
3063 mqd->queue_state.cp_hqd_pq_control &=
3064 ~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
3065 CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
3067 mqd->queue_state.cp_hqd_pq_control |=
3068 order_base_2(ring->ring_size / 8);
3069 mqd->queue_state.cp_hqd_pq_control |=
3070 (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
3072 mqd->queue_state.cp_hqd_pq_control |=
3073 2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
3075 mqd->queue_state.cp_hqd_pq_control &=
3076 ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
3077 CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
3078 CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
3079 mqd->queue_state.cp_hqd_pq_control |=
3080 CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
3081 CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
3082 WREG32(mmCP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
3084 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3085 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3086 mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
3087 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3088 WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
3089 WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3090 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
3092 /* set the wb address wether it's enabled or not */
3093 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3094 mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
3095 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
3096 upper_32_bits(wb_gpu_addr) & 0xffff;
3097 WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3098 mqd->queue_state.cp_hqd_pq_rptr_report_addr);
3099 WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3100 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
3102 /* enable the doorbell if requested */
3104 mqd->queue_state.cp_hqd_pq_doorbell_control =
3105 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3106 mqd->queue_state.cp_hqd_pq_doorbell_control &=
3107 ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
3108 mqd->queue_state.cp_hqd_pq_doorbell_control |=
3109 (ring->doorbell_index <<
3110 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
3111 mqd->queue_state.cp_hqd_pq_doorbell_control |=
3112 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
3113 mqd->queue_state.cp_hqd_pq_doorbell_control &=
3114 ~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
3115 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
3118 mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
3120 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
3121 mqd->queue_state.cp_hqd_pq_doorbell_control);
3123 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3125 mqd->queue_state.cp_hqd_pq_wptr = ring->wptr;
3126 WREG32(mmCP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
3127 mqd->queue_state.cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
3129 /* set the vmid for the queue */
3130 mqd->queue_state.cp_hqd_vmid = 0;
3131 WREG32(mmCP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
3133 /* activate the queue */
3134 mqd->queue_state.cp_hqd_active = 1;
3135 WREG32(mmCP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
3137 cik_srbm_select(adev, 0, 0, 0, 0);
3138 mutex_unlock(&adev->srbm_mutex);
3140 amdgpu_bo_kunmap(ring->mqd_obj);
3141 amdgpu_bo_unreserve(ring->mqd_obj);
3146 gfx_v7_0_cp_compute_enable(adev, true);
3148 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3149 ring = &adev->gfx.compute_ring[i];
3151 r = amdgpu_ring_test_ring(ring);
3153 ring->ready = false;
3159 static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
3161 gfx_v7_0_cp_gfx_enable(adev, enable);
3162 gfx_v7_0_cp_compute_enable(adev, enable);
3165 static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
3169 r = gfx_v7_0_cp_gfx_load_microcode(adev);
3172 r = gfx_v7_0_cp_compute_load_microcode(adev);
3179 static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3182 u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3185 tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3186 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3188 tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3189 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3190 WREG32(mmCP_INT_CNTL_RING0, tmp);
3193 static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3197 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3199 r = gfx_v7_0_cp_load_microcode(adev);
3203 r = gfx_v7_0_cp_gfx_resume(adev);
3206 r = gfx_v7_0_cp_compute_resume(adev);
3210 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3216 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3218 * @ring: the ring to emmit the commands to
3220 * Sync the command pipeline with the PFP. E.g. wait for everything
3223 static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3225 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
3226 uint32_t seq = ring->fence_drv.sync_seq;
3227 uint64_t addr = ring->fence_drv.gpu_addr;
3229 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3230 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3231 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3232 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
3233 amdgpu_ring_write(ring, addr & 0xfffffffc);
3234 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3235 amdgpu_ring_write(ring, seq);
3236 amdgpu_ring_write(ring, 0xffffffff);
3237 amdgpu_ring_write(ring, 4); /* poll interval */
3240 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3241 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3242 amdgpu_ring_write(ring, 0);
3243 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3244 amdgpu_ring_write(ring, 0);
3250 * VMID 0 is the physical GPU addresses as used by the kernel.
3251 * VMIDs 1-15 are used for userspace clients and are handled
3252 * by the amdgpu vm/hsa code.
3255 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3257 * @adev: amdgpu_device pointer
3259 * Update the page table base and flush the VM TLB
3260 * using the CP (CIK).
3262 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3263 unsigned vm_id, uint64_t pd_addr)
3265 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
3267 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3268 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
3269 WRITE_DATA_DST_SEL(0)));
3271 amdgpu_ring_write(ring,
3272 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
3274 amdgpu_ring_write(ring,
3275 (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
3277 amdgpu_ring_write(ring, 0);
3278 amdgpu_ring_write(ring, pd_addr >> 12);
3280 /* bits 0-15 are the VM contexts0-15 */
3281 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3282 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3283 WRITE_DATA_DST_SEL(0)));
3284 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3285 amdgpu_ring_write(ring, 0);
3286 amdgpu_ring_write(ring, 1 << vm_id);
3288 /* wait for the invalidate to complete */
3289 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3290 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3291 WAIT_REG_MEM_FUNCTION(0) | /* always */
3292 WAIT_REG_MEM_ENGINE(0))); /* me */
3293 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3294 amdgpu_ring_write(ring, 0);
3295 amdgpu_ring_write(ring, 0); /* ref */
3296 amdgpu_ring_write(ring, 0); /* mask */
3297 amdgpu_ring_write(ring, 0x20); /* poll interval */
3299 /* compute doesn't have PFP */
3301 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3302 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3303 amdgpu_ring_write(ring, 0x0);
3305 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3306 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3307 amdgpu_ring_write(ring, 0);
3308 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3309 amdgpu_ring_write(ring, 0);
3315 * The RLC is a multi-purpose microengine that handles a
3316 * variety of functions.
3318 static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
3322 /* save restore block */
3323 if (adev->gfx.rlc.save_restore_obj) {
3324 r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
3325 if (unlikely(r != 0))
3326 dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
3327 amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
3328 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3330 amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
3331 adev->gfx.rlc.save_restore_obj = NULL;
3334 /* clear state block */
3335 if (adev->gfx.rlc.clear_state_obj) {
3336 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
3337 if (unlikely(r != 0))
3338 dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
3339 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
3340 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3342 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
3343 adev->gfx.rlc.clear_state_obj = NULL;
3346 /* clear state block */
3347 if (adev->gfx.rlc.cp_table_obj) {
3348 r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
3349 if (unlikely(r != 0))
3350 dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
3351 amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
3352 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3354 amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
3355 adev->gfx.rlc.cp_table_obj = NULL;
3359 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3362 volatile u32 *dst_ptr;
3364 const struct cs_section_def *cs_data;
3367 /* allocate rlc buffers */
3368 if (adev->flags & AMD_IS_APU) {
3369 if (adev->asic_type == CHIP_KAVERI) {
3370 adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
3371 adev->gfx.rlc.reg_list_size =
3372 (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
3374 adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
3375 adev->gfx.rlc.reg_list_size =
3376 (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
3379 adev->gfx.rlc.cs_data = ci_cs_data;
3380 adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
3381 adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
3383 src_ptr = adev->gfx.rlc.reg_list;
3384 dws = adev->gfx.rlc.reg_list_size;
3385 dws += (5 * 16) + 48 + 48 + 64;
3387 cs_data = adev->gfx.rlc.cs_data;
3390 /* save restore block */
3391 if (adev->gfx.rlc.save_restore_obj == NULL) {
3392 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3393 AMDGPU_GEM_DOMAIN_VRAM,
3394 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3396 &adev->gfx.rlc.save_restore_obj);
3398 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
3403 r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
3404 if (unlikely(r != 0)) {
3405 gfx_v7_0_rlc_fini(adev);
3408 r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
3409 &adev->gfx.rlc.save_restore_gpu_addr);
3411 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3412 dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
3413 gfx_v7_0_rlc_fini(adev);
3417 r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
3419 dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
3420 gfx_v7_0_rlc_fini(adev);
3423 /* write the sr buffer */
3424 dst_ptr = adev->gfx.rlc.sr_ptr;
3425 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
3426 dst_ptr[i] = cpu_to_le32(src_ptr[i]);
3427 amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
3428 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
3432 /* clear state block */
3433 adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
3435 if (adev->gfx.rlc.clear_state_obj == NULL) {
3436 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3437 AMDGPU_GEM_DOMAIN_VRAM,
3438 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3440 &adev->gfx.rlc.clear_state_obj);
3442 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
3443 gfx_v7_0_rlc_fini(adev);
3447 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
3448 if (unlikely(r != 0)) {
3449 gfx_v7_0_rlc_fini(adev);
3452 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
3453 &adev->gfx.rlc.clear_state_gpu_addr);
3455 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3456 dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
3457 gfx_v7_0_rlc_fini(adev);
3461 r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
3463 dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
3464 gfx_v7_0_rlc_fini(adev);
3467 /* set up the cs buffer */
3468 dst_ptr = adev->gfx.rlc.cs_ptr;
3469 gfx_v7_0_get_csb_buffer(adev, dst_ptr);
3470 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
3471 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
3474 if (adev->gfx.rlc.cp_table_size) {
3475 if (adev->gfx.rlc.cp_table_obj == NULL) {
3476 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
3477 AMDGPU_GEM_DOMAIN_VRAM,
3478 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3480 &adev->gfx.rlc.cp_table_obj);
3482 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
3483 gfx_v7_0_rlc_fini(adev);
3488 r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
3489 if (unlikely(r != 0)) {
3490 dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
3491 gfx_v7_0_rlc_fini(adev);
3494 r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
3495 &adev->gfx.rlc.cp_table_gpu_addr);
3497 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3498 dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
3499 gfx_v7_0_rlc_fini(adev);
3502 r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
3504 dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
3505 gfx_v7_0_rlc_fini(adev);
3509 gfx_v7_0_init_cp_pg_table(adev);
3511 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
3512 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
3519 static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
3523 tmp = RREG32(mmRLC_LB_CNTL);
3525 tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3527 tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3528 WREG32(mmRLC_LB_CNTL, tmp);
3531 static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3536 mutex_lock(&adev->grbm_idx_mutex);
3537 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3538 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3539 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
3540 for (k = 0; k < adev->usec_timeout; k++) {
3541 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3547 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3548 mutex_unlock(&adev->grbm_idx_mutex);
3550 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3551 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3552 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3553 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3554 for (k = 0; k < adev->usec_timeout; k++) {
3555 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3561 static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
3565 tmp = RREG32(mmRLC_CNTL);
3567 WREG32(mmRLC_CNTL, rlc);
3570 static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
3574 orig = data = RREG32(mmRLC_CNTL);
3576 if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
3579 data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
3580 WREG32(mmRLC_CNTL, data);
3582 for (i = 0; i < adev->usec_timeout; i++) {
3583 if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0)
3588 gfx_v7_0_wait_for_rlc_serdes(adev);
3594 static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3598 tmp = 0x1 | (1 << 1);
3599 WREG32(mmRLC_GPR_REG2, tmp);
3601 mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK |
3602 RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK;
3603 for (i = 0; i < adev->usec_timeout; i++) {
3604 if ((RREG32(mmRLC_GPM_STAT) & mask) == mask)
3609 for (i = 0; i < adev->usec_timeout; i++) {
3610 if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0)
3616 static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3620 tmp = 0x1 | (0 << 1);
3621 WREG32(mmRLC_GPR_REG2, tmp);
3625 * gfx_v7_0_rlc_stop - stop the RLC ME
3627 * @adev: amdgpu_device pointer
3629 * Halt the RLC ME (MicroEngine) (CIK).
3631 static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
3633 WREG32(mmRLC_CNTL, 0);
3635 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3637 gfx_v7_0_wait_for_rlc_serdes(adev);
3641 * gfx_v7_0_rlc_start - start the RLC ME
3643 * @adev: amdgpu_device pointer
3645 * Unhalt the RLC ME (MicroEngine) (CIK).
3647 static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
3649 WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
3651 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3656 static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
3658 u32 tmp = RREG32(mmGRBM_SOFT_RESET);
3660 tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3661 WREG32(mmGRBM_SOFT_RESET, tmp);
3663 tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3664 WREG32(mmGRBM_SOFT_RESET, tmp);
3669 * gfx_v7_0_rlc_resume - setup the RLC hw
3671 * @adev: amdgpu_device pointer
3673 * Initialize the RLC registers, load the ucode,
3674 * and start the RLC (CIK).
3675 * Returns 0 for success, -EINVAL if the ucode is not available.
3677 static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
3679 const struct rlc_firmware_header_v1_0 *hdr;
3680 const __le32 *fw_data;
3681 unsigned i, fw_size;
3684 if (!adev->gfx.rlc_fw)
3687 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
3688 amdgpu_ucode_print_rlc_hdr(&hdr->header);
3689 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
3690 adev->gfx.rlc_feature_version = le32_to_cpu(
3691 hdr->ucode_feature_version);
3693 gfx_v7_0_rlc_stop(adev);
3696 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
3697 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
3699 gfx_v7_0_rlc_reset(adev);
3701 gfx_v7_0_init_pg(adev);
3703 WREG32(mmRLC_LB_CNTR_INIT, 0);
3704 WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
3706 mutex_lock(&adev->grbm_idx_mutex);
3707 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3708 WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
3709 WREG32(mmRLC_LB_PARAMS, 0x00600408);
3710 WREG32(mmRLC_LB_CNTL, 0x80000004);
3711 mutex_unlock(&adev->grbm_idx_mutex);
3713 WREG32(mmRLC_MC_CNTL, 0);
3714 WREG32(mmRLC_UCODE_CNTL, 0);
3716 fw_data = (const __le32 *)
3717 (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3718 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3719 WREG32(mmRLC_GPM_UCODE_ADDR, 0);
3720 for (i = 0; i < fw_size; i++)
3721 WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3722 WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3724 /* XXX - find out what chips support lbpw */
3725 gfx_v7_0_enable_lbpw(adev, false);
3727 if (adev->asic_type == CHIP_BONAIRE)
3728 WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
3730 gfx_v7_0_rlc_start(adev);
3735 static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
3737 u32 data, orig, tmp, tmp2;
3739 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
3741 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3742 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3744 tmp = gfx_v7_0_halt_rlc(adev);
3746 mutex_lock(&adev->grbm_idx_mutex);
3747 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3748 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3749 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3750 tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3751 RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK |
3752 RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK;
3753 WREG32(mmRLC_SERDES_WR_CTRL, tmp2);
3754 mutex_unlock(&adev->grbm_idx_mutex);
3756 gfx_v7_0_update_rlc(adev, tmp);
3758 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3760 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3762 RREG32(mmCB_CGTT_SCLK_CTRL);
3763 RREG32(mmCB_CGTT_SCLK_CTRL);
3764 RREG32(mmCB_CGTT_SCLK_CTRL);
3765 RREG32(mmCB_CGTT_SCLK_CTRL);
3767 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3771 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3775 static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3777 u32 data, orig, tmp = 0;
3779 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3780 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3781 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3782 orig = data = RREG32(mmCP_MEM_SLP_CNTL);
3783 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3785 WREG32(mmCP_MEM_SLP_CNTL, data);
3789 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3793 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3795 tmp = gfx_v7_0_halt_rlc(adev);
3797 mutex_lock(&adev->grbm_idx_mutex);
3798 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3799 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3800 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3801 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3802 RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK;
3803 WREG32(mmRLC_SERDES_WR_CTRL, data);
3804 mutex_unlock(&adev->grbm_idx_mutex);
3806 gfx_v7_0_update_rlc(adev, tmp);
3808 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
3809 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3810 data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
3811 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
3812 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
3813 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
3814 if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
3815 (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
3816 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3817 data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
3818 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
3819 data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
3821 WREG32(mmCGTS_SM_CTRL_REG, data);
3824 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3827 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3829 data = RREG32(mmRLC_MEM_SLP_CNTL);
3830 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3831 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3832 WREG32(mmRLC_MEM_SLP_CNTL, data);
3835 data = RREG32(mmCP_MEM_SLP_CNTL);
3836 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3837 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3838 WREG32(mmCP_MEM_SLP_CNTL, data);
3841 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3842 data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3844 WREG32(mmCGTS_SM_CTRL_REG, data);
3846 tmp = gfx_v7_0_halt_rlc(adev);
3848 mutex_lock(&adev->grbm_idx_mutex);
3849 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3850 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3851 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3852 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
3853 WREG32(mmRLC_SERDES_WR_CTRL, data);
3854 mutex_unlock(&adev->grbm_idx_mutex);
3856 gfx_v7_0_update_rlc(adev, tmp);
3860 static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
3863 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3864 /* order matters! */
3866 gfx_v7_0_enable_mgcg(adev, true);
3867 gfx_v7_0_enable_cgcg(adev, true);
3869 gfx_v7_0_enable_cgcg(adev, false);
3870 gfx_v7_0_enable_mgcg(adev, false);
3872 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3875 static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
3880 orig = data = RREG32(mmRLC_PG_CNTL);
3881 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3882 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3884 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3886 WREG32(mmRLC_PG_CNTL, data);
3889 static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
3894 orig = data = RREG32(mmRLC_PG_CNTL);
3895 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3896 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3898 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3900 WREG32(mmRLC_PG_CNTL, data);
3903 static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
3907 orig = data = RREG32(mmRLC_PG_CNTL);
3908 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
3913 WREG32(mmRLC_PG_CNTL, data);
3916 static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
3920 orig = data = RREG32(mmRLC_PG_CNTL);
3921 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
3926 WREG32(mmRLC_PG_CNTL, data);
3929 static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
3931 const __le32 *fw_data;
3932 volatile u32 *dst_ptr;
3933 int me, i, max_me = 4;
3935 u32 table_offset, table_size;
3937 if (adev->asic_type == CHIP_KAVERI)
3940 if (adev->gfx.rlc.cp_table_ptr == NULL)
3943 /* write the cp table buffer */
3944 dst_ptr = adev->gfx.rlc.cp_table_ptr;
3945 for (me = 0; me < max_me; me++) {
3947 const struct gfx_firmware_header_v1_0 *hdr =
3948 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
3949 fw_data = (const __le32 *)
3950 (adev->gfx.ce_fw->data +
3951 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3952 table_offset = le32_to_cpu(hdr->jt_offset);
3953 table_size = le32_to_cpu(hdr->jt_size);
3954 } else if (me == 1) {
3955 const struct gfx_firmware_header_v1_0 *hdr =
3956 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
3957 fw_data = (const __le32 *)
3958 (adev->gfx.pfp_fw->data +
3959 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3960 table_offset = le32_to_cpu(hdr->jt_offset);
3961 table_size = le32_to_cpu(hdr->jt_size);
3962 } else if (me == 2) {
3963 const struct gfx_firmware_header_v1_0 *hdr =
3964 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
3965 fw_data = (const __le32 *)
3966 (adev->gfx.me_fw->data +
3967 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3968 table_offset = le32_to_cpu(hdr->jt_offset);
3969 table_size = le32_to_cpu(hdr->jt_size);
3970 } else if (me == 3) {
3971 const struct gfx_firmware_header_v1_0 *hdr =
3972 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3973 fw_data = (const __le32 *)
3974 (adev->gfx.mec_fw->data +
3975 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3976 table_offset = le32_to_cpu(hdr->jt_offset);
3977 table_size = le32_to_cpu(hdr->jt_size);
3979 const struct gfx_firmware_header_v1_0 *hdr =
3980 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
3981 fw_data = (const __le32 *)
3982 (adev->gfx.mec2_fw->data +
3983 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3984 table_offset = le32_to_cpu(hdr->jt_offset);
3985 table_size = le32_to_cpu(hdr->jt_size);
3988 for (i = 0; i < table_size; i ++) {
3989 dst_ptr[bo_offset + i] =
3990 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
3993 bo_offset += table_size;
3997 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
4002 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
4003 orig = data = RREG32(mmRLC_PG_CNTL);
4004 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
4006 WREG32(mmRLC_PG_CNTL, data);
4008 orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
4009 data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
4011 WREG32(mmRLC_AUTO_PG_CTRL, data);
4013 orig = data = RREG32(mmRLC_PG_CNTL);
4014 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
4016 WREG32(mmRLC_PG_CNTL, data);
4018 orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
4019 data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
4021 WREG32(mmRLC_AUTO_PG_CTRL, data);
4023 data = RREG32(mmDB_RENDER_CONTROL);
4027 static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4035 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4036 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4038 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
4041 static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4045 data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
4046 data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
4048 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4049 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4051 mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
4053 return (~data) & mask;
4056 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
4060 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4062 tmp = RREG32(mmRLC_MAX_PG_CU);
4063 tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
4064 tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
4065 WREG32(mmRLC_MAX_PG_CU, tmp);
4068 static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
4073 orig = data = RREG32(mmRLC_PG_CNTL);
4074 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
4075 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
4077 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
4079 WREG32(mmRLC_PG_CNTL, data);
4082 static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
4087 orig = data = RREG32(mmRLC_PG_CNTL);
4088 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
4089 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
4091 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
4093 WREG32(mmRLC_PG_CNTL, data);
4096 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
4097 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
4099 static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
4104 if (adev->gfx.rlc.cs_data) {
4105 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
4106 WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
4107 WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
4108 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
4110 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
4111 for (i = 0; i < 3; i++)
4112 WREG32(mmRLC_GPM_SCRATCH_DATA, 0);
4114 if (adev->gfx.rlc.reg_list) {
4115 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
4116 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
4117 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
4120 orig = data = RREG32(mmRLC_PG_CNTL);
4121 data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK;
4123 WREG32(mmRLC_PG_CNTL, data);
4125 WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
4126 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4128 data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
4129 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
4130 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4131 WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
4134 WREG32(mmRLC_PG_DELAY, data);
4136 data = RREG32(mmRLC_PG_DELAY_2);
4139 WREG32(mmRLC_PG_DELAY_2, data);
4141 data = RREG32(mmRLC_AUTO_PG_CTRL);
4142 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
4143 data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
4144 WREG32(mmRLC_AUTO_PG_CTRL, data);
4148 static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
4150 gfx_v7_0_enable_gfx_cgpg(adev, enable);
4151 gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
4152 gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
4155 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
4158 const struct cs_section_def *sect = NULL;
4159 const struct cs_extent_def *ext = NULL;
4161 if (adev->gfx.rlc.cs_data == NULL)
4164 /* begin clear state */
4166 /* context control state */
4169 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
4170 for (ext = sect->section; ext->extent != NULL; ++ext) {
4171 if (sect->id == SECT_CONTEXT)
4172 count += 2 + ext->reg_count;
4177 /* pa_sc_raster_config/pa_sc_raster_config1 */
4179 /* end clear state */
4187 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
4188 volatile u32 *buffer)
4191 const struct cs_section_def *sect = NULL;
4192 const struct cs_extent_def *ext = NULL;
4194 if (adev->gfx.rlc.cs_data == NULL)
4199 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4200 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4202 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4203 buffer[count++] = cpu_to_le32(0x80000000);
4204 buffer[count++] = cpu_to_le32(0x80000000);
4206 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
4207 for (ext = sect->section; ext->extent != NULL; ++ext) {
4208 if (sect->id == SECT_CONTEXT) {
4210 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
4211 buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4212 for (i = 0; i < ext->reg_count; i++)
4213 buffer[count++] = cpu_to_le32(ext->extent[i]);
4220 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4221 buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4222 switch (adev->asic_type) {
4224 buffer[count++] = cpu_to_le32(0x16000012);
4225 buffer[count++] = cpu_to_le32(0x00000000);
4228 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4229 buffer[count++] = cpu_to_le32(0x00000000);
4233 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
4234 buffer[count++] = cpu_to_le32(0x00000000);
4237 buffer[count++] = cpu_to_le32(0x3a00161a);
4238 buffer[count++] = cpu_to_le32(0x0000002e);
4241 buffer[count++] = cpu_to_le32(0x00000000);
4242 buffer[count++] = cpu_to_le32(0x00000000);
4246 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4247 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
4249 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
4250 buffer[count++] = cpu_to_le32(0);
4253 static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4255 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4256 AMD_PG_SUPPORT_GFX_SMG |
4257 AMD_PG_SUPPORT_GFX_DMG |
4259 AMD_PG_SUPPORT_GDS |
4260 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4261 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
4262 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
4263 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4264 gfx_v7_0_init_gfx_cgpg(adev);
4265 gfx_v7_0_enable_cp_pg(adev, true);
4266 gfx_v7_0_enable_gds_pg(adev, true);
4268 gfx_v7_0_init_ao_cu_mask(adev);
4269 gfx_v7_0_update_gfx_pg(adev, true);
4273 static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
4275 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4276 AMD_PG_SUPPORT_GFX_SMG |
4277 AMD_PG_SUPPORT_GFX_DMG |
4279 AMD_PG_SUPPORT_GDS |
4280 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4281 gfx_v7_0_update_gfx_pg(adev, false);
4282 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4283 gfx_v7_0_enable_cp_pg(adev, false);
4284 gfx_v7_0_enable_gds_pg(adev, false);
4290 * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
4292 * @adev: amdgpu_device pointer
4294 * Fetches a GPU clock counter snapshot (SI).
4295 * Returns the 64 bit clock counter snapshot.
4297 static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4301 mutex_lock(&adev->gfx.gpu_clock_mutex);
4302 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4303 clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
4304 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4305 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4309 static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4311 uint32_t gds_base, uint32_t gds_size,
4312 uint32_t gws_base, uint32_t gws_size,
4313 uint32_t oa_base, uint32_t oa_size)
4315 gds_base = gds_base >> AMDGPU_GDS_SHIFT;
4316 gds_size = gds_size >> AMDGPU_GDS_SHIFT;
4318 gws_base = gws_base >> AMDGPU_GWS_SHIFT;
4319 gws_size = gws_size >> AMDGPU_GWS_SHIFT;
4321 oa_base = oa_base >> AMDGPU_OA_SHIFT;
4322 oa_size = oa_size >> AMDGPU_OA_SHIFT;
4325 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4326 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4327 WRITE_DATA_DST_SEL(0)));
4328 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
4329 amdgpu_ring_write(ring, 0);
4330 amdgpu_ring_write(ring, gds_base);
4333 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4334 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4335 WRITE_DATA_DST_SEL(0)));
4336 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
4337 amdgpu_ring_write(ring, 0);
4338 amdgpu_ring_write(ring, gds_size);
4341 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4342 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4343 WRITE_DATA_DST_SEL(0)));
4344 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
4345 amdgpu_ring_write(ring, 0);
4346 amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4349 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4350 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4351 WRITE_DATA_DST_SEL(0)));
4352 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
4353 amdgpu_ring_write(ring, 0);
4354 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4357 static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
4360 4; /* gfx_v7_0_ring_emit_ib_gfx */
4363 static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
4366 20 + /* gfx_v7_0_ring_emit_gds_switch */
4367 7 + /* gfx_v7_0_ring_emit_hdp_flush */
4368 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
4369 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
4370 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
4371 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
4372 3; /* gfx_v7_ring_emit_cntxcntl */
4375 static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
4378 4; /* gfx_v7_0_ring_emit_ib_compute */
4381 static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
4384 20 + /* gfx_v7_0_ring_emit_gds_switch */
4385 7 + /* gfx_v7_0_ring_emit_hdp_flush */
4386 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
4387 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
4388 17 + /* gfx_v7_0_ring_emit_vm_flush */
4389 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
4392 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
4393 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
4394 .select_se_sh = &gfx_v7_0_select_se_sh,
4397 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
4398 .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
4399 .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
4402 static int gfx_v7_0_early_init(void *handle)
4404 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4406 adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
4407 adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
4408 adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
4409 adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
4410 gfx_v7_0_set_ring_funcs(adev);
4411 gfx_v7_0_set_irq_funcs(adev);
4412 gfx_v7_0_set_gds_init(adev);
4417 static int gfx_v7_0_late_init(void *handle)
4419 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4422 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4426 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4433 static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4436 u32 mc_shared_chmap, mc_arb_ramcfg;
4437 u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
4440 switch (adev->asic_type) {
4442 adev->gfx.config.max_shader_engines = 2;
4443 adev->gfx.config.max_tile_pipes = 4;
4444 adev->gfx.config.max_cu_per_sh = 7;
4445 adev->gfx.config.max_sh_per_se = 1;
4446 adev->gfx.config.max_backends_per_se = 2;
4447 adev->gfx.config.max_texture_channel_caches = 4;
4448 adev->gfx.config.max_gprs = 256;
4449 adev->gfx.config.max_gs_threads = 32;
4450 adev->gfx.config.max_hw_contexts = 8;
4452 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4453 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4454 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4455 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4456 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4459 adev->gfx.config.max_shader_engines = 4;
4460 adev->gfx.config.max_tile_pipes = 16;
4461 adev->gfx.config.max_cu_per_sh = 11;
4462 adev->gfx.config.max_sh_per_se = 1;
4463 adev->gfx.config.max_backends_per_se = 4;
4464 adev->gfx.config.max_texture_channel_caches = 16;
4465 adev->gfx.config.max_gprs = 256;
4466 adev->gfx.config.max_gs_threads = 32;
4467 adev->gfx.config.max_hw_contexts = 8;
4469 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4470 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4471 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4472 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4473 gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
4476 adev->gfx.config.max_shader_engines = 1;
4477 adev->gfx.config.max_tile_pipes = 4;
4478 if ((adev->pdev->device == 0x1304) ||
4479 (adev->pdev->device == 0x1305) ||
4480 (adev->pdev->device == 0x130C) ||
4481 (adev->pdev->device == 0x130F) ||
4482 (adev->pdev->device == 0x1310) ||
4483 (adev->pdev->device == 0x1311) ||
4484 (adev->pdev->device == 0x131C)) {
4485 adev->gfx.config.max_cu_per_sh = 8;
4486 adev->gfx.config.max_backends_per_se = 2;
4487 } else if ((adev->pdev->device == 0x1309) ||
4488 (adev->pdev->device == 0x130A) ||
4489 (adev->pdev->device == 0x130D) ||
4490 (adev->pdev->device == 0x1313) ||
4491 (adev->pdev->device == 0x131D)) {
4492 adev->gfx.config.max_cu_per_sh = 6;
4493 adev->gfx.config.max_backends_per_se = 2;
4494 } else if ((adev->pdev->device == 0x1306) ||
4495 (adev->pdev->device == 0x1307) ||
4496 (adev->pdev->device == 0x130B) ||
4497 (adev->pdev->device == 0x130E) ||
4498 (adev->pdev->device == 0x1315) ||
4499 (adev->pdev->device == 0x131B)) {
4500 adev->gfx.config.max_cu_per_sh = 4;
4501 adev->gfx.config.max_backends_per_se = 1;
4503 adev->gfx.config.max_cu_per_sh = 3;
4504 adev->gfx.config.max_backends_per_se = 1;
4506 adev->gfx.config.max_sh_per_se = 1;
4507 adev->gfx.config.max_texture_channel_caches = 4;
4508 adev->gfx.config.max_gprs = 256;
4509 adev->gfx.config.max_gs_threads = 16;
4510 adev->gfx.config.max_hw_contexts = 8;
4512 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4513 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4514 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4515 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4516 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4521 adev->gfx.config.max_shader_engines = 1;
4522 adev->gfx.config.max_tile_pipes = 2;
4523 adev->gfx.config.max_cu_per_sh = 2;
4524 adev->gfx.config.max_sh_per_se = 1;
4525 adev->gfx.config.max_backends_per_se = 1;
4526 adev->gfx.config.max_texture_channel_caches = 2;
4527 adev->gfx.config.max_gprs = 256;
4528 adev->gfx.config.max_gs_threads = 16;
4529 adev->gfx.config.max_hw_contexts = 8;
4531 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4532 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4533 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4534 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4535 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4539 mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
4540 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
4541 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
4543 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
4544 adev->gfx.config.mem_max_burst_length_bytes = 256;
4545 if (adev->flags & AMD_IS_APU) {
4546 /* Get memory bank mapping mode. */
4547 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
4548 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4549 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4551 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
4552 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4553 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4555 /* Validate settings in case only one DIMM installed. */
4556 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
4557 dimm00_addr_map = 0;
4558 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
4559 dimm01_addr_map = 0;
4560 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
4561 dimm10_addr_map = 0;
4562 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
4563 dimm11_addr_map = 0;
4565 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
4566 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
4567 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
4568 adev->gfx.config.mem_row_size_in_kb = 2;
4570 adev->gfx.config.mem_row_size_in_kb = 1;
4572 tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
4573 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
4574 if (adev->gfx.config.mem_row_size_in_kb > 4)
4575 adev->gfx.config.mem_row_size_in_kb = 4;
4577 /* XXX use MC settings? */
4578 adev->gfx.config.shader_engine_tile_size = 32;
4579 adev->gfx.config.num_gpus = 1;
4580 adev->gfx.config.multi_gpu_tile_size = 64;
4582 /* fix up row size */
4583 gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
4584 switch (adev->gfx.config.mem_row_size_in_kb) {
4587 gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4590 gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4593 gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4596 adev->gfx.config.gb_addr_config = gb_addr_config;
4599 static int gfx_v7_0_sw_init(void *handle)
4601 struct amdgpu_ring *ring;
4602 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4606 r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
4610 /* Privileged reg */
4611 r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
4615 /* Privileged inst */
4616 r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
4620 gfx_v7_0_scratch_init(adev);
4622 r = gfx_v7_0_init_microcode(adev);
4624 DRM_ERROR("Failed to load gfx firmware!\n");
4628 r = gfx_v7_0_rlc_init(adev);
4630 DRM_ERROR("Failed to init rlc BOs!\n");
4634 /* allocate mec buffers */
4635 r = gfx_v7_0_mec_init(adev);
4637 DRM_ERROR("Failed to init MEC BOs!\n");
4641 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4642 ring = &adev->gfx.gfx_ring[i];
4643 ring->ring_obj = NULL;
4644 sprintf(ring->name, "gfx");
4645 r = amdgpu_ring_init(adev, ring, 1024,
4646 PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
4647 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
4648 AMDGPU_RING_TYPE_GFX);
4653 /* set up the compute queues */
4654 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4657 /* max 32 queues per MEC */
4658 if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
4659 DRM_ERROR("Too many (%d) compute rings!\n", i);
4662 ring = &adev->gfx.compute_ring[i];
4663 ring->ring_obj = NULL;
4664 ring->use_doorbell = true;
4665 ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i;
4666 ring->me = 1; /* first MEC */
4668 ring->queue = i % 8;
4669 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
4670 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
4671 /* type-2 packets are deprecated on MEC, use type-3 instead */
4672 r = amdgpu_ring_init(adev, ring, 1024,
4673 PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
4674 &adev->gfx.eop_irq, irq_type,
4675 AMDGPU_RING_TYPE_COMPUTE);
4680 /* reserve GDS, GWS and OA resource for gfx */
4681 r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
4682 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
4683 &adev->gds.gds_gfx_bo, NULL, NULL);
4687 r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
4688 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
4689 &adev->gds.gws_gfx_bo, NULL, NULL);
4693 r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
4694 PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
4695 &adev->gds.oa_gfx_bo, NULL, NULL);
4699 adev->gfx.ce_ram_size = 0x8000;
4701 gfx_v7_0_gpu_early_init(adev);
4706 static int gfx_v7_0_sw_fini(void *handle)
4709 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4711 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
4712 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
4713 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
4715 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4716 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
4717 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4718 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4720 gfx_v7_0_cp_compute_fini(adev);
4721 gfx_v7_0_rlc_fini(adev);
4722 gfx_v7_0_mec_fini(adev);
4723 gfx_v7_0_free_microcode(adev);
4728 static int gfx_v7_0_hw_init(void *handle)
4731 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4733 gfx_v7_0_gpu_init(adev);
4736 r = gfx_v7_0_rlc_resume(adev);
4740 r = gfx_v7_0_cp_resume(adev);
4747 static int gfx_v7_0_hw_fini(void *handle)
4749 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4751 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4752 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4753 gfx_v7_0_cp_enable(adev, false);
4754 gfx_v7_0_rlc_stop(adev);
4755 gfx_v7_0_fini_pg(adev);
4760 static int gfx_v7_0_suspend(void *handle)
4762 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4764 return gfx_v7_0_hw_fini(adev);
4767 static int gfx_v7_0_resume(void *handle)
4769 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4771 return gfx_v7_0_hw_init(adev);
4774 static bool gfx_v7_0_is_idle(void *handle)
4776 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4778 if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
4784 static int gfx_v7_0_wait_for_idle(void *handle)
4788 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4790 for (i = 0; i < adev->usec_timeout; i++) {
4791 /* read MC_STATUS */
4792 tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
4801 static int gfx_v7_0_soft_reset(void *handle)
4803 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4805 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4808 tmp = RREG32(mmGRBM_STATUS);
4809 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4810 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4811 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4812 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4813 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4814 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
4815 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
4816 GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
4818 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4819 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
4820 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4824 tmp = RREG32(mmGRBM_STATUS2);
4825 if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
4826 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
4829 tmp = RREG32(mmSRBM_STATUS);
4830 if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
4831 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4833 if (grbm_soft_reset || srbm_soft_reset) {
4835 gfx_v7_0_fini_pg(adev);
4836 gfx_v7_0_update_cg(adev, false);
4839 gfx_v7_0_rlc_stop(adev);
4841 /* Disable GFX parsing/prefetching */
4842 WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
4844 /* Disable MEC parsing/prefetching */
4845 WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
4847 if (grbm_soft_reset) {
4848 tmp = RREG32(mmGRBM_SOFT_RESET);
4849 tmp |= grbm_soft_reset;
4850 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4851 WREG32(mmGRBM_SOFT_RESET, tmp);
4852 tmp = RREG32(mmGRBM_SOFT_RESET);
4856 tmp &= ~grbm_soft_reset;
4857 WREG32(mmGRBM_SOFT_RESET, tmp);
4858 tmp = RREG32(mmGRBM_SOFT_RESET);
4861 if (srbm_soft_reset) {
4862 tmp = RREG32(mmSRBM_SOFT_RESET);
4863 tmp |= srbm_soft_reset;
4864 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4865 WREG32(mmSRBM_SOFT_RESET, tmp);
4866 tmp = RREG32(mmSRBM_SOFT_RESET);
4870 tmp &= ~srbm_soft_reset;
4871 WREG32(mmSRBM_SOFT_RESET, tmp);
4872 tmp = RREG32(mmSRBM_SOFT_RESET);
4874 /* Wait a little for things to settle down */
4880 static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4881 enum amdgpu_interrupt_state state)
4886 case AMDGPU_IRQ_STATE_DISABLE:
4887 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4888 cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4889 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4891 case AMDGPU_IRQ_STATE_ENABLE:
4892 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4893 cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4894 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4901 static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4903 enum amdgpu_interrupt_state state)
4905 u32 mec_int_cntl, mec_int_cntl_reg;
4908 * amdgpu controls only pipe 0 of MEC1. That's why this function only
4909 * handles the setting of interrupts for this specific pipe. All other
4910 * pipes' interrupts are set by amdkfd.
4916 mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
4919 DRM_DEBUG("invalid pipe %d\n", pipe);
4923 DRM_DEBUG("invalid me %d\n", me);
4928 case AMDGPU_IRQ_STATE_DISABLE:
4929 mec_int_cntl = RREG32(mec_int_cntl_reg);
4930 mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4931 WREG32(mec_int_cntl_reg, mec_int_cntl);
4933 case AMDGPU_IRQ_STATE_ENABLE:
4934 mec_int_cntl = RREG32(mec_int_cntl_reg);
4935 mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4936 WREG32(mec_int_cntl_reg, mec_int_cntl);
4943 static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4944 struct amdgpu_irq_src *src,
4946 enum amdgpu_interrupt_state state)
4951 case AMDGPU_IRQ_STATE_DISABLE:
4952 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4953 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4954 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4956 case AMDGPU_IRQ_STATE_ENABLE:
4957 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4958 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4959 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4968 static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4969 struct amdgpu_irq_src *src,
4971 enum amdgpu_interrupt_state state)
4976 case AMDGPU_IRQ_STATE_DISABLE:
4977 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4978 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4979 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4981 case AMDGPU_IRQ_STATE_ENABLE:
4982 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4983 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4984 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4993 static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4994 struct amdgpu_irq_src *src,
4996 enum amdgpu_interrupt_state state)
4999 case AMDGPU_CP_IRQ_GFX_EOP:
5000 gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
5002 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5003 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5005 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5006 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5008 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5009 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5011 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5012 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5014 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5015 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5017 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5018 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5020 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5021 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5023 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5024 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5032 static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
5033 struct amdgpu_irq_src *source,
5034 struct amdgpu_iv_entry *entry)
5037 struct amdgpu_ring *ring;
5040 DRM_DEBUG("IH: CP EOP\n");
5041 me_id = (entry->ring_id & 0x0c) >> 2;
5042 pipe_id = (entry->ring_id & 0x03) >> 0;
5045 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5049 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5050 ring = &adev->gfx.compute_ring[i];
5051 if ((ring->me == me_id) && (ring->pipe == pipe_id))
5052 amdgpu_fence_process(ring);
5059 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
5060 struct amdgpu_irq_src *source,
5061 struct amdgpu_iv_entry *entry)
5063 DRM_ERROR("Illegal register access in command stream\n");
5064 schedule_work(&adev->reset_work);
5068 static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
5069 struct amdgpu_irq_src *source,
5070 struct amdgpu_iv_entry *entry)
5072 DRM_ERROR("Illegal instruction in command stream\n");
5073 // XXX soft reset the gfx block only
5074 schedule_work(&adev->reset_work);
5078 static int gfx_v7_0_set_clockgating_state(void *handle,
5079 enum amd_clockgating_state state)
5082 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5084 if (state == AMD_CG_STATE_GATE)
5087 gfx_v7_0_enable_gui_idle_interrupt(adev, false);
5088 /* order matters! */
5090 gfx_v7_0_enable_mgcg(adev, true);
5091 gfx_v7_0_enable_cgcg(adev, true);
5093 gfx_v7_0_enable_cgcg(adev, false);
5094 gfx_v7_0_enable_mgcg(adev, false);
5096 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
5101 static int gfx_v7_0_set_powergating_state(void *handle,
5102 enum amd_powergating_state state)
5105 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5107 if (state == AMD_PG_STATE_GATE)
5110 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
5111 AMD_PG_SUPPORT_GFX_SMG |
5112 AMD_PG_SUPPORT_GFX_DMG |
5114 AMD_PG_SUPPORT_GDS |
5115 AMD_PG_SUPPORT_RLC_SMU_HS)) {
5116 gfx_v7_0_update_gfx_pg(adev, gate);
5117 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
5118 gfx_v7_0_enable_cp_pg(adev, gate);
5119 gfx_v7_0_enable_gds_pg(adev, gate);
5126 const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
5128 .early_init = gfx_v7_0_early_init,
5129 .late_init = gfx_v7_0_late_init,
5130 .sw_init = gfx_v7_0_sw_init,
5131 .sw_fini = gfx_v7_0_sw_fini,
5132 .hw_init = gfx_v7_0_hw_init,
5133 .hw_fini = gfx_v7_0_hw_fini,
5134 .suspend = gfx_v7_0_suspend,
5135 .resume = gfx_v7_0_resume,
5136 .is_idle = gfx_v7_0_is_idle,
5137 .wait_for_idle = gfx_v7_0_wait_for_idle,
5138 .soft_reset = gfx_v7_0_soft_reset,
5139 .set_clockgating_state = gfx_v7_0_set_clockgating_state,
5140 .set_powergating_state = gfx_v7_0_set_powergating_state,
5143 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5144 .get_rptr = gfx_v7_0_ring_get_rptr,
5145 .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5146 .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5148 .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5149 .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5150 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5151 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5152 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5153 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5154 .emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
5155 .test_ring = gfx_v7_0_ring_test_ring,
5156 .test_ib = gfx_v7_0_ring_test_ib,
5157 .insert_nop = amdgpu_ring_insert_nop,
5158 .pad_ib = amdgpu_ring_generic_pad_ib,
5159 .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
5160 .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
5161 .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
5164 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5165 .get_rptr = gfx_v7_0_ring_get_rptr,
5166 .get_wptr = gfx_v7_0_ring_get_wptr_compute,
5167 .set_wptr = gfx_v7_0_ring_set_wptr_compute,
5169 .emit_ib = gfx_v7_0_ring_emit_ib_compute,
5170 .emit_fence = gfx_v7_0_ring_emit_fence_compute,
5171 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5172 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5173 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5174 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5175 .emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
5176 .test_ring = gfx_v7_0_ring_test_ring,
5177 .test_ib = gfx_v7_0_ring_test_ib,
5178 .insert_nop = amdgpu_ring_insert_nop,
5179 .pad_ib = amdgpu_ring_generic_pad_ib,
5180 .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
5181 .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
5184 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
5188 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5189 adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
5190 for (i = 0; i < adev->gfx.num_compute_rings; i++)
5191 adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
5194 static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = {
5195 .set = gfx_v7_0_set_eop_interrupt_state,
5196 .process = gfx_v7_0_eop_irq,
5199 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = {
5200 .set = gfx_v7_0_set_priv_reg_fault_state,
5201 .process = gfx_v7_0_priv_reg_irq,
5204 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = {
5205 .set = gfx_v7_0_set_priv_inst_fault_state,
5206 .process = gfx_v7_0_priv_inst_irq,
5209 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
5211 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5212 adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
5214 adev->gfx.priv_reg_irq.num_types = 1;
5215 adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
5217 adev->gfx.priv_inst_irq.num_types = 1;
5218 adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
5221 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
5223 /* init asci gds info */
5224 adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
5225 adev->gds.gws.total_size = 64;
5226 adev->gds.oa.total_size = 16;
5228 if (adev->gds.mem.total_size == 64 * 1024) {
5229 adev->gds.mem.gfx_partition_size = 4096;
5230 adev->gds.mem.cs_partition_size = 4096;
5232 adev->gds.gws.gfx_partition_size = 4;
5233 adev->gds.gws.cs_partition_size = 4;
5235 adev->gds.oa.gfx_partition_size = 4;
5236 adev->gds.oa.cs_partition_size = 1;
5238 adev->gds.mem.gfx_partition_size = 1024;
5239 adev->gds.mem.cs_partition_size = 1024;
5241 adev->gds.gws.gfx_partition_size = 16;
5242 adev->gds.gws.cs_partition_size = 16;
5244 adev->gds.oa.gfx_partition_size = 4;
5245 adev->gds.oa.cs_partition_size = 4;
5250 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
5252 int i, j, k, counter, active_cu_number = 0;
5253 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5254 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
5255 unsigned disable_masks[4 * 2];
5257 memset(cu_info, 0, sizeof(*cu_info));
5259 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5261 mutex_lock(&adev->grbm_idx_mutex);
5262 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5263 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5267 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
5269 gfx_v7_0_set_user_cu_inactive_bitmap(
5270 adev, disable_masks[i * 2 + j]);
5271 bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
5272 cu_info->bitmap[i][j] = bitmap;
5274 for (k = 0; k < 16; k ++) {
5275 if (bitmap & mask) {
5282 active_cu_number += counter;
5283 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5286 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5287 mutex_unlock(&adev->grbm_idx_mutex);
5289 cu_info->number = active_cu_number;
5290 cu_info->ao_cu_mask = ao_cu_mask;