Merge tag 'tegra-for-4.8-i2c' of git://git.kernel.org/pub/scm/linux/kernel/git/tegra...
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / iceland_smc.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "ppsmc.h"
28 #include "iceland_smum.h"
29 #include "smu_ucode_xfer_vi.h"
30 #include "amdgpu_ucode.h"
31
32 #include "smu/smu_7_1_1_d.h"
33 #include "smu/smu_7_1_1_sh_mask.h"
34
35 #define ICELAND_SMC_SIZE 0x20000
36
37 static int iceland_set_smc_sram_address(struct amdgpu_device *adev,
38                                         uint32_t smc_address, uint32_t limit)
39 {
40         uint32_t val;
41
42         if (smc_address & 3)
43                 return -EINVAL;
44
45         if ((smc_address + 3) > limit)
46                 return -EINVAL;
47
48         WREG32(mmSMC_IND_INDEX_0, smc_address);
49
50         val = RREG32(mmSMC_IND_ACCESS_CNTL);
51         val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
52         WREG32(mmSMC_IND_ACCESS_CNTL, val);
53
54         return 0;
55 }
56
57 static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev,
58                                      uint32_t smc_start_address,
59                                      const uint8_t *src,
60                                      uint32_t byte_count, uint32_t limit)
61 {
62         uint32_t addr;
63         uint32_t data, orig_data;
64         int result = 0;
65         uint32_t extra_shift;
66         unsigned long flags;
67
68         if (smc_start_address & 3)
69                 return -EINVAL;
70
71         if ((smc_start_address + byte_count) > limit)
72                 return -EINVAL;
73
74         addr = smc_start_address;
75
76         spin_lock_irqsave(&adev->smc_idx_lock, flags);
77         while (byte_count >= 4) {
78                 /* Bytes are written into the SMC addres space with the MSB first */
79                 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
80
81                 result = iceland_set_smc_sram_address(adev, addr, limit);
82
83                 if (result)
84                         goto out;
85
86                 WREG32(mmSMC_IND_DATA_0, data);
87
88                 src += 4;
89                 byte_count -= 4;
90                 addr += 4;
91         }
92
93         if (0 != byte_count) {
94                 /* Now write odd bytes left, do a read modify write cycle */
95                 data = 0;
96
97                 result = iceland_set_smc_sram_address(adev, addr, limit);
98                 if (result)
99                         goto out;
100
101                 orig_data = RREG32(mmSMC_IND_DATA_0);
102                 extra_shift = 8 * (4 - byte_count);
103
104                 while (byte_count > 0) {
105                         data = (data << 8) + *src++;
106                         byte_count--;
107                 }
108
109                 data <<= extra_shift;
110                 data |= (orig_data & ~((~0UL) << extra_shift));
111
112                 result = iceland_set_smc_sram_address(adev, addr, limit);
113                 if (result)
114                         goto out;
115
116                 WREG32(mmSMC_IND_DATA_0, data);
117         }
118
119 out:
120         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
121         return result;
122 }
123
124 void iceland_start_smc(struct amdgpu_device *adev)
125 {
126         uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
127
128         val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
129         WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
130 }
131
132 void iceland_reset_smc(struct amdgpu_device *adev)
133 {
134         uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
135
136         val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
137         WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
138 }
139
140 static int iceland_program_jump_on_start(struct amdgpu_device *adev)
141 {
142         static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
143         iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
144
145         return 0;
146 }
147
148 void iceland_stop_smc_clock(struct amdgpu_device *adev)
149 {
150         uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
151
152         val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
153         WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
154 }
155
156 void iceland_start_smc_clock(struct amdgpu_device *adev)
157 {
158         uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
159
160         val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
161         WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
162 }
163
164 static bool iceland_is_smc_ram_running(struct amdgpu_device *adev)
165 {
166         uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
167         val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
168
169         return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
170 }
171
172 static int wait_smu_response(struct amdgpu_device *adev)
173 {
174         int i;
175         uint32_t val;
176
177         for (i = 0; i < adev->usec_timeout; i++) {
178                 val = RREG32(mmSMC_RESP_0);
179                 if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
180                         break;
181                 udelay(1);
182         }
183
184         if (i == adev->usec_timeout)
185                 return -EINVAL;
186
187         return 0;
188 }
189
190 static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
191 {
192         if (!iceland_is_smc_ram_running(adev))
193                 return -EINVAL;
194
195         if (wait_smu_response(adev)) {
196                 DRM_ERROR("Failed to send previous message\n");
197                 return -EINVAL;
198         }
199
200         WREG32(mmSMC_MESSAGE_0, msg);
201
202         if (wait_smu_response(adev)) {
203                 DRM_ERROR("Failed to send message\n");
204                 return -EINVAL;
205         }
206
207         return 0;
208 }
209
210 static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
211                                                    PPSMC_Msg msg)
212 {
213         if (!iceland_is_smc_ram_running(adev))
214                 return -EINVAL;
215
216         if (wait_smu_response(adev)) {
217                 DRM_ERROR("Failed to send previous message\n");
218                 return -EINVAL;
219         }
220
221         WREG32(mmSMC_MESSAGE_0, msg);
222
223         return 0;
224 }
225
226 static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
227                                                   PPSMC_Msg msg,
228                                                   uint32_t parameter)
229 {
230         WREG32(mmSMC_MSG_ARG_0, parameter);
231
232         return iceland_send_msg_to_smc(adev, msg);
233 }
234
235 static int iceland_send_msg_to_smc_with_parameter_without_waiting(
236                                         struct amdgpu_device *adev,
237                                         PPSMC_Msg msg, uint32_t parameter)
238 {
239         WREG32(mmSMC_MSG_ARG_0, parameter);
240
241         return iceland_send_msg_to_smc_without_waiting(adev, msg);
242 }
243
244 #if 0 /* not used yet */
245 static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev)
246 {
247         int i;
248         uint32_t val;
249
250         if (!iceland_is_smc_ram_running(adev))
251                 return -EINVAL;
252
253         for (i = 0; i < adev->usec_timeout; i++) {
254                 val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
255                 if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
256                         break;
257                 udelay(1);
258         }
259
260         if (i == adev->usec_timeout)
261                 return -EINVAL;
262
263         return 0;
264 }
265 #endif
266
267 static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev)
268 {
269         const struct smc_firmware_header_v1_0 *hdr;
270         uint32_t ucode_size;
271         uint32_t ucode_start_address;
272         const uint8_t *src;
273         uint32_t val;
274         uint32_t byte_count;
275         uint32_t data;
276         unsigned long flags;
277         int i;
278
279         if (!adev->pm.fw)
280                 return -EINVAL;
281
282         /* Skip SMC ucode loading on SR-IOV capable boards.
283          * vbios does this for us in asic_init in that case.
284          */
285         if (adev->virtualization.supports_sr_iov)
286                 return 0;
287
288         hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
289         amdgpu_ucode_print_smc_hdr(&hdr->header);
290
291         adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
292         ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
293         ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
294         src = (const uint8_t *)
295                 (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
296
297         if (ucode_size & 3) {
298                 DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
299                 return -EINVAL;
300         }
301
302         if (ucode_size > ICELAND_SMC_SIZE) {
303                 DRM_ERROR("SMC address is beyond the SMC RAM area\n");
304                 return -EINVAL;
305         }
306
307         for (i = 0; i < adev->usec_timeout; i++) {
308                 val = RREG32_SMC(ixRCU_UC_EVENTS);
309                 if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0)
310                         break;
311                 udelay(1);
312         }
313         val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL);
314         WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1);
315
316         iceland_stop_smc_clock(adev);
317         iceland_reset_smc(adev);
318
319         spin_lock_irqsave(&adev->smc_idx_lock, flags);
320         WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
321
322         val = RREG32(mmSMC_IND_ACCESS_CNTL);
323         val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
324         WREG32(mmSMC_IND_ACCESS_CNTL, val);
325
326         byte_count = ucode_size;
327         while (byte_count >= 4) {
328                 data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
329                 WREG32(mmSMC_IND_DATA_0, data);
330                 src += 4;
331                 byte_count -= 4;
332         }
333         val = RREG32(mmSMC_IND_ACCESS_CNTL);
334         val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
335         WREG32(mmSMC_IND_ACCESS_CNTL, val);
336         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
337
338         return 0;
339 }
340
341 #if 0 /* not used yet */
342 static int iceland_read_smc_sram_dword(struct amdgpu_device *adev,
343                                        uint32_t smc_address,
344                                        uint32_t *value,
345                                        uint32_t limit)
346 {
347         int result;
348         unsigned long flags;
349
350         spin_lock_irqsave(&adev->smc_idx_lock, flags);
351         result = iceland_set_smc_sram_address(adev, smc_address, limit);
352         if (result == 0)
353                 *value = RREG32(mmSMC_IND_DATA_0);
354         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
355         return result;
356 }
357
358 static int iceland_write_smc_sram_dword(struct amdgpu_device *adev,
359                                         uint32_t smc_address,
360                                         uint32_t value,
361                                         uint32_t limit)
362 {
363         int result;
364         unsigned long flags;
365
366         spin_lock_irqsave(&adev->smc_idx_lock, flags);
367         result = iceland_set_smc_sram_address(adev, smc_address, limit);
368         if (result == 0)
369                 WREG32(mmSMC_IND_DATA_0, value);
370         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
371         return result;
372 }
373
374 static int iceland_smu_stop_smc(struct amdgpu_device *adev)
375 {
376         iceland_reset_smc(adev);
377         iceland_stop_smc_clock(adev);
378
379         return 0;
380 }
381 #endif
382
383 static int iceland_smu_start_smc(struct amdgpu_device *adev)
384 {
385         int i;
386         uint32_t val;
387
388         iceland_program_jump_on_start(adev);
389         iceland_start_smc_clock(adev);
390         iceland_start_smc(adev);
391
392         for (i = 0; i < adev->usec_timeout; i++) {
393                 val = RREG32_SMC(ixFIRMWARE_FLAGS);
394                 if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1)
395                         break;
396                 udelay(1);
397         }
398         return 0;
399 }
400
401 static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type)
402 {
403         switch (fw_type) {
404                 case UCODE_ID_SDMA0:
405                         return AMDGPU_UCODE_ID_SDMA0;
406                 case UCODE_ID_SDMA1:
407                         return AMDGPU_UCODE_ID_SDMA1;
408                 case UCODE_ID_CP_CE:
409                         return AMDGPU_UCODE_ID_CP_CE;
410                 case UCODE_ID_CP_PFP:
411                         return AMDGPU_UCODE_ID_CP_PFP;
412                 case UCODE_ID_CP_ME:
413                         return AMDGPU_UCODE_ID_CP_ME;
414                 case UCODE_ID_CP_MEC:
415                 case UCODE_ID_CP_MEC_JT1:
416                         return AMDGPU_UCODE_ID_CP_MEC1;
417                 case UCODE_ID_CP_MEC_JT2:
418                         return AMDGPU_UCODE_ID_CP_MEC2;
419                 case UCODE_ID_RLC_G:
420                         return AMDGPU_UCODE_ID_RLC_G;
421                 default:
422                         DRM_ERROR("ucode type is out of range!\n");
423                         return AMDGPU_UCODE_ID_MAXIMUM;
424         }
425 }
426
427 static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
428 {
429         switch (fw_type) {
430                 case AMDGPU_UCODE_ID_SDMA0:
431                         return UCODE_ID_SDMA0_MASK;
432                 case AMDGPU_UCODE_ID_SDMA1:
433                         return UCODE_ID_SDMA1_MASK;
434                 case AMDGPU_UCODE_ID_CP_CE:
435                         return UCODE_ID_CP_CE_MASK;
436                 case AMDGPU_UCODE_ID_CP_PFP:
437                         return UCODE_ID_CP_PFP_MASK;
438                 case AMDGPU_UCODE_ID_CP_ME:
439                         return UCODE_ID_CP_ME_MASK;
440                 case AMDGPU_UCODE_ID_CP_MEC1:
441                         return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
442                 case AMDGPU_UCODE_ID_CP_MEC2:
443                         return UCODE_ID_CP_MEC_MASK;
444                 case AMDGPU_UCODE_ID_RLC_G:
445                         return UCODE_ID_RLC_G_MASK;
446                 default:
447                         DRM_ERROR("ucode type is out of range!\n");
448                         return 0;
449         }
450 }
451
452 static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
453                                                       uint32_t fw_type,
454                                                       struct SMU_Entry *entry)
455 {
456         enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type);
457         struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
458         const struct gfx_firmware_header_v1_0 *header = NULL;
459         uint64_t gpu_addr;
460         uint32_t data_size;
461
462         if (ucode->fw == NULL)
463                 return -EINVAL;
464
465         gpu_addr  = ucode->mc_addr;
466         header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
467         data_size = le32_to_cpu(header->header.ucode_size_bytes);
468
469         entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
470         entry->id = (uint16_t)fw_type;
471         entry->image_addr_high = upper_32_bits(gpu_addr);
472         entry->image_addr_low = lower_32_bits(gpu_addr);
473         entry->meta_data_addr_high = 0;
474         entry->meta_data_addr_low = 0;
475         entry->data_size_byte = data_size;
476         entry->num_register_entries = 0;
477         entry->flags = 0;
478
479         return 0;
480 }
481
482 static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
483 {
484         struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv;
485         struct SMU_DRAMData_TOC *toc;
486         uint32_t fw_to_load;
487
488         toc = (struct SMU_DRAMData_TOC *)private->header;
489         toc->num_entries = 0;
490         toc->structure_version = 1;
491
492         if (!adev->firmware.smu_load)
493                 return 0;
494
495         if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
496                         &toc->entry[toc->num_entries++])) {
497                 DRM_ERROR("Failed to get firmware entry for RLC\n");
498                 return -EINVAL;
499         }
500
501         if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
502                         &toc->entry[toc->num_entries++])) {
503                 DRM_ERROR("Failed to get firmware entry for CE\n");
504                 return -EINVAL;
505         }
506
507         if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
508                         &toc->entry[toc->num_entries++])) {
509                 DRM_ERROR("Failed to get firmware entry for PFP\n");
510                 return -EINVAL;
511         }
512
513         if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
514                         &toc->entry[toc->num_entries++])) {
515                 DRM_ERROR("Failed to get firmware entry for ME\n");
516                 return -EINVAL;
517         }
518
519         if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
520                         &toc->entry[toc->num_entries++])) {
521                 DRM_ERROR("Failed to get firmware entry for MEC\n");
522                 return -EINVAL;
523         }
524
525         if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
526                         &toc->entry[toc->num_entries++])) {
527                 DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
528                 return -EINVAL;
529         }
530
531         if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
532                         &toc->entry[toc->num_entries++])) {
533                 DRM_ERROR("Failed to get firmware entry for SDMA0\n");
534                 return -EINVAL;
535         }
536
537         if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
538                         &toc->entry[toc->num_entries++])) {
539                 DRM_ERROR("Failed to get firmware entry for SDMA1\n");
540                 return -EINVAL;
541         }
542
543         iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
544         iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
545
546         fw_to_load = UCODE_ID_RLC_G_MASK |
547                         UCODE_ID_SDMA0_MASK |
548                         UCODE_ID_SDMA1_MASK |
549                         UCODE_ID_CP_CE_MASK |
550                         UCODE_ID_CP_ME_MASK |
551                         UCODE_ID_CP_PFP_MASK |
552                         UCODE_ID_CP_MEC_MASK |
553                         UCODE_ID_CP_MEC_JT1_MASK;
554
555
556         if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
557                 DRM_ERROR("Fail to request SMU load ucode\n");
558                 return -EINVAL;
559         }
560
561         return 0;
562 }
563
564 static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev,
565                                             uint32_t fw_type)
566 {
567         uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type);
568         int i;
569
570         for (i = 0; i < adev->usec_timeout; i++) {
571                 if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask))
572                         break;
573                 udelay(1);
574         }
575
576         if (i == adev->usec_timeout) {
577                 DRM_ERROR("check firmware loading failed\n");
578                 return -EINVAL;
579         }
580
581         return 0;
582 }
583
584 int iceland_smu_start(struct amdgpu_device *adev)
585 {
586         int result;
587
588         result = iceland_smu_upload_firmware_image(adev);
589         if (result)
590                 return result;
591         result = iceland_smu_start_smc(adev);
592         if (result)
593                 return result;
594
595         return iceland_smu_request_load_fw(adev);
596 }
597
598 static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = {
599         .check_fw_load_finish = iceland_smu_check_fw_load_finish,
600         .request_smu_load_fw = NULL,
601         .request_smu_specific_fw = NULL,
602 };
603
604 int iceland_smu_init(struct amdgpu_device *adev)
605 {
606         struct iceland_smu_private_data *private;
607         uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
608         struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
609         uint64_t mc_addr;
610         void *toc_buf_ptr;
611         int ret;
612
613         private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL);
614         if (NULL == private)
615                 return -ENOMEM;
616
617         /* allocate firmware buffers */
618         if (adev->firmware.smu_load)
619                 amdgpu_ucode_init_bo(adev);
620
621         adev->smu.priv = private;
622         adev->smu.fw_flags = 0;
623
624         /* Allocate FW image data structure and header buffer */
625         ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
626                                true, AMDGPU_GEM_DOMAIN_VRAM,
627                                AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
628                                NULL, NULL, toc_buf);
629         if (ret) {
630                 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
631                 return -ENOMEM;
632         }
633
634         /* Retrieve GPU address for header buffer and internal buffer */
635         ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
636         if (ret) {
637                 amdgpu_bo_unref(&adev->smu.toc_buf);
638                 DRM_ERROR("Failed to reserve the TOC buffer\n");
639                 return -EINVAL;
640         }
641
642         ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
643         if (ret) {
644                 amdgpu_bo_unreserve(adev->smu.toc_buf);
645                 amdgpu_bo_unref(&adev->smu.toc_buf);
646                 DRM_ERROR("Failed to pin the TOC buffer\n");
647                 return -EINVAL;
648         }
649
650         ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
651         if (ret) {
652                 amdgpu_bo_unreserve(adev->smu.toc_buf);
653                 amdgpu_bo_unref(&adev->smu.toc_buf);
654                 DRM_ERROR("Failed to map the TOC buffer\n");
655                 return -EINVAL;
656         }
657
658         amdgpu_bo_unreserve(adev->smu.toc_buf);
659         private->header_addr_low = lower_32_bits(mc_addr);
660         private->header_addr_high = upper_32_bits(mc_addr);
661         private->header = toc_buf_ptr;
662
663         adev->smu.smumgr_funcs = &iceland_smumgr_funcs;
664
665         return 0;
666 }
667
668 int iceland_smu_fini(struct amdgpu_device *adev)
669 {
670         amdgpu_bo_unref(&adev->smu.toc_buf);
671         kfree(adev->smu.priv);
672         adev->smu.priv = NULL;
673         if (adev->firmware.fw_buf)
674                 amdgpu_ucode_fini_bo(adev);
675
676         return 0;
677 }