2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/module.h>
24 #include <linux/slab.h>
26 #include <asm/div64.h>
27 #include "linux/delay.h"
30 #include "ppatomctrl.h"
32 #include "pptable_v1_0.h"
33 #include "pppcielanes.h"
34 #include "amd_pcie_helpers.h"
35 #include "hardwaremanager.h"
36 #include "process_pptables_v1_0.h"
37 #include "cgs_common.h"
39 #include "smu7_common.h"
42 #include "smu7_hwmgr.h"
43 #include "smu7_powertune.h"
44 #include "smu7_dyn_defaults.h"
45 #include "smu7_thermal.h"
46 #include "smu7_clockpowergating.h"
47 #include "processpptables.h"
49 #define MC_CG_ARB_FREQ_F0 0x0a
50 #define MC_CG_ARB_FREQ_F1 0x0b
51 #define MC_CG_ARB_FREQ_F2 0x0c
52 #define MC_CG_ARB_FREQ_F3 0x0d
54 #define MC_CG_SEQ_DRAMCONF_S0 0x05
55 #define MC_CG_SEQ_DRAMCONF_S1 0x06
56 #define MC_CG_SEQ_YCLK_SUSPEND 0x04
57 #define MC_CG_SEQ_YCLK_RESUME 0x0a
59 #define SMC_CG_IND_START 0xc0030000
60 #define SMC_CG_IND_END 0xc0040000
62 #define VOLTAGE_SCALE 4
63 #define VOLTAGE_VID_OFFSET_SCALE1 625
64 #define VOLTAGE_VID_OFFSET_SCALE2 100
66 #define MEM_FREQ_LOW_LATENCY 25000
67 #define MEM_FREQ_HIGH_LATENCY 80000
69 #define MEM_LATENCY_HIGH 45
70 #define MEM_LATENCY_LOW 35
71 #define MEM_LATENCY_ERR 0xFFFF
73 #define MC_SEQ_MISC0_GDDR5_SHIFT 28
74 #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
75 #define MC_SEQ_MISC0_GDDR5_VALUE 5
77 #define PCIE_BUS_CLK 10000
78 #define TCLK (PCIE_BUS_CLK / 10)
81 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
83 DPM_EVENT_SRC_ANALOG = 0,
84 DPM_EVENT_SRC_EXTERNAL = 1,
85 DPM_EVENT_SRC_DIGITAL = 2,
86 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
87 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
90 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
92 struct smu7_power_state *cast_phw_smu7_power_state(
93 struct pp_hw_power_state *hw_ps)
95 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
96 "Invalid Powerstate Type!",
99 return (struct smu7_power_state *)hw_ps;
102 const struct smu7_power_state *cast_const_phw_smu7_power_state(
103 const struct pp_hw_power_state *hw_ps)
105 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
106 "Invalid Powerstate Type!",
109 return (const struct smu7_power_state *)hw_ps;
113 * Find the MC microcode version and store it in the HwMgr struct
115 * @param hwmgr the address of the powerplay hardware manager.
118 int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
120 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
122 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
127 uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
129 uint32_t speedCntl = 0;
131 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
132 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
133 ixPCIE_LC_SPEED_CNTL);
134 return((uint16_t)PHM_GET_FIELD(speedCntl,
135 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
138 int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
142 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
143 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
144 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
146 PP_ASSERT_WITH_CODE((7 >= link_width),
147 "Invalid PCIe lane width!", return 0);
149 return decode_pcie_lane_width(link_width);
153 * Enable voltage control
155 * @param pHwMgr the address of the powerplay hardware manager.
156 * @return always PP_Result_OK
158 int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
160 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
161 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
167 * Checks if we want to support voltage control
169 * @param hwmgr the address of the powerplay hardware manager.
171 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
173 const struct smu7_hwmgr *data =
174 (const struct smu7_hwmgr *)(hwmgr->backend);
176 return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
180 * Enable voltage control
182 * @param hwmgr the address of the powerplay hardware manager.
185 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
187 /* enable voltage control */
188 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
189 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
194 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
195 struct phm_clock_voltage_dependency_table *voltage_dependency_table
200 PP_ASSERT_WITH_CODE((NULL != voltage_table),
201 "Voltage Dependency Table empty.", return -EINVAL;);
203 voltage_table->mask_low = 0;
204 voltage_table->phase_delay = 0;
205 voltage_table->count = voltage_dependency_table->count;
207 for (i = 0; i < voltage_dependency_table->count; i++) {
208 voltage_table->entries[i].value =
209 voltage_dependency_table->entries[i].v;
210 voltage_table->entries[i].smio_low = 0;
218 * Create Voltage Tables.
220 * @param hwmgr the address of the powerplay hardware manager.
223 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
225 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
226 struct phm_ppt_v1_information *table_info =
227 (struct phm_ppt_v1_information *)hwmgr->pptable;
231 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
232 result = atomctrl_get_voltage_table_v3(hwmgr,
233 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
234 &(data->mvdd_voltage_table));
235 PP_ASSERT_WITH_CODE((0 == result),
236 "Failed to retrieve MVDD table.",
238 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
239 if (hwmgr->pp_table_version == PP_TABLE_V1)
240 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
241 table_info->vdd_dep_on_mclk);
242 else if (hwmgr->pp_table_version == PP_TABLE_V0)
243 result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
244 hwmgr->dyn_state.mvdd_dependency_on_mclk);
246 PP_ASSERT_WITH_CODE((0 == result),
247 "Failed to retrieve SVI2 MVDD table from dependancy table.",
251 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
252 result = atomctrl_get_voltage_table_v3(hwmgr,
253 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
254 &(data->vddci_voltage_table));
255 PP_ASSERT_WITH_CODE((0 == result),
256 "Failed to retrieve VDDCI table.",
258 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
259 if (hwmgr->pp_table_version == PP_TABLE_V1)
260 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
261 table_info->vdd_dep_on_mclk);
262 else if (hwmgr->pp_table_version == PP_TABLE_V0)
263 result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
264 hwmgr->dyn_state.vddci_dependency_on_mclk);
265 PP_ASSERT_WITH_CODE((0 == result),
266 "Failed to retrieve SVI2 VDDCI table from dependancy table.",
270 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
271 /* VDDGFX has only SVI2 voltage control */
272 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
273 table_info->vddgfx_lookup_table);
274 PP_ASSERT_WITH_CODE((0 == result),
275 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
279 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
280 result = atomctrl_get_voltage_table_v3(hwmgr,
281 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
282 &data->vddc_voltage_table);
283 PP_ASSERT_WITH_CODE((0 == result),
284 "Failed to retrieve VDDC table.", return result;);
285 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
287 if (hwmgr->pp_table_version == PP_TABLE_V0)
288 result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
289 hwmgr->dyn_state.vddc_dependency_on_mclk);
290 else if (hwmgr->pp_table_version == PP_TABLE_V1)
291 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
292 table_info->vddc_lookup_table);
294 PP_ASSERT_WITH_CODE((0 == result),
295 "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
298 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC);
300 (data->vddc_voltage_table.count <= tmp),
301 "Too many voltage values for VDDC. Trimming to fit state table.",
302 phm_trim_voltage_table_to_fit_state_table(tmp,
303 &(data->vddc_voltage_table)));
305 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
307 (data->vddgfx_voltage_table.count <= tmp),
308 "Too many voltage values for VDDC. Trimming to fit state table.",
309 phm_trim_voltage_table_to_fit_state_table(tmp,
310 &(data->vddgfx_voltage_table)));
312 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI);
314 (data->vddci_voltage_table.count <= tmp),
315 "Too many voltage values for VDDCI. Trimming to fit state table.",
316 phm_trim_voltage_table_to_fit_state_table(tmp,
317 &(data->vddci_voltage_table)));
319 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD);
321 (data->mvdd_voltage_table.count <= tmp),
322 "Too many voltage values for MVDD. Trimming to fit state table.",
323 phm_trim_voltage_table_to_fit_state_table(tmp,
324 &(data->mvdd_voltage_table)));
330 * Programs static screed detection parameters
332 * @param hwmgr the address of the powerplay hardware manager.
335 static int smu7_program_static_screen_threshold_parameters(
336 struct pp_hwmgr *hwmgr)
338 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
340 /* Set static screen threshold unit */
341 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
342 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
343 data->static_screen_threshold_unit);
344 /* Set static screen threshold */
345 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
346 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
347 data->static_screen_threshold);
353 * Setup display gap for glitch free memory clock switching.
355 * @param hwmgr the address of the powerplay hardware manager.
358 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
360 uint32_t display_gap =
361 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
362 ixCG_DISPLAY_GAP_CNTL);
364 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
365 DISP_GAP, DISPLAY_GAP_IGNORE);
367 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
368 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
370 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
371 ixCG_DISPLAY_GAP_CNTL, display_gap);
377 * Programs activity state transition voting clients
379 * @param hwmgr the address of the powerplay hardware manager.
382 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
384 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
386 /* Clear reset for voting clients before enabling DPM */
387 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
388 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
389 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
390 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
392 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
393 ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
394 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
395 ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
396 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
397 ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
398 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
399 ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
400 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
401 ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
402 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
403 ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
404 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
405 ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
406 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
407 ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
412 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
414 /* Reset voting clients before disabling DPM */
415 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
416 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
417 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
418 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
420 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
421 ixCG_FREQ_TRAN_VOTING_0, 0);
422 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
423 ixCG_FREQ_TRAN_VOTING_1, 0);
424 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
425 ixCG_FREQ_TRAN_VOTING_2, 0);
426 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
427 ixCG_FREQ_TRAN_VOTING_3, 0);
428 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
429 ixCG_FREQ_TRAN_VOTING_4, 0);
430 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
431 ixCG_FREQ_TRAN_VOTING_5, 0);
432 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
433 ixCG_FREQ_TRAN_VOTING_6, 0);
434 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
435 ixCG_FREQ_TRAN_VOTING_7, 0);
440 /* Copy one arb setting to another and then switch the active set.
441 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
443 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
444 uint32_t arb_src, uint32_t arb_dest)
446 uint32_t mc_arb_dram_timing;
447 uint32_t mc_arb_dram_timing2;
449 uint32_t mc_cg_config;
452 case MC_CG_ARB_FREQ_F0:
453 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
454 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
455 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
457 case MC_CG_ARB_FREQ_F1:
458 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
459 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
460 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
467 case MC_CG_ARB_FREQ_F0:
468 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
469 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
470 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
472 case MC_CG_ARB_FREQ_F1:
473 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
474 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
475 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
481 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
482 mc_cg_config |= 0x0000000F;
483 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
484 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
489 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
491 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
495 * Initial switch from ARB F0->F1
497 * @param hwmgr the address of the powerplay hardware manager.
499 * This function is to be called from the SetPowerState table.
501 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
503 return smu7_copy_and_switch_arb_sets(hwmgr,
504 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
507 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
511 tmp = (cgs_read_ind_register(hwmgr->device,
512 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
515 if (tmp == MC_CG_ARB_FREQ_F0)
518 return smu7_copy_and_switch_arb_sets(hwmgr,
519 tmp, MC_CG_ARB_FREQ_F0);
522 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
524 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
526 struct phm_ppt_v1_information *table_info =
527 (struct phm_ppt_v1_information *)(hwmgr->pptable);
528 struct phm_ppt_v1_pcie_table *pcie_table = NULL;
530 uint32_t i, max_entry;
533 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
534 data->use_pcie_power_saving_levels), "No pcie performance levels!",
537 if (table_info != NULL)
538 pcie_table = table_info->pcie_table;
540 if (data->use_pcie_performance_levels &&
541 !data->use_pcie_power_saving_levels) {
542 data->pcie_gen_power_saving = data->pcie_gen_performance;
543 data->pcie_lane_power_saving = data->pcie_lane_performance;
544 } else if (!data->use_pcie_performance_levels &&
545 data->use_pcie_power_saving_levels) {
546 data->pcie_gen_performance = data->pcie_gen_power_saving;
547 data->pcie_lane_performance = data->pcie_lane_power_saving;
549 tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK);
550 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
552 MAX_REGULAR_DPM_NUMBER);
554 if (pcie_table != NULL) {
555 /* max_entry is used to make sure we reserve one PCIE level
556 * for boot level (fix for A+A PSPP issue).
557 * If PCIE table from PPTable have ULV entry + 8 entries,
558 * then ignore the last entry.*/
559 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
560 for (i = 1; i < max_entry; i++) {
561 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
562 get_pcie_gen_support(data->pcie_gen_cap,
563 pcie_table->entries[i].gen_speed),
564 get_pcie_lane_support(data->pcie_lane_cap,
565 pcie_table->entries[i].lane_width));
567 data->dpm_table.pcie_speed_table.count = max_entry - 1;
568 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
570 /* Hardcode Pcie Table */
571 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
572 get_pcie_gen_support(data->pcie_gen_cap,
574 get_pcie_lane_support(data->pcie_lane_cap,
576 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
577 get_pcie_gen_support(data->pcie_gen_cap,
579 get_pcie_lane_support(data->pcie_lane_cap,
581 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
582 get_pcie_gen_support(data->pcie_gen_cap,
584 get_pcie_lane_support(data->pcie_lane_cap,
586 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
587 get_pcie_gen_support(data->pcie_gen_cap,
589 get_pcie_lane_support(data->pcie_lane_cap,
591 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
592 get_pcie_gen_support(data->pcie_gen_cap,
594 get_pcie_lane_support(data->pcie_lane_cap,
596 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
597 get_pcie_gen_support(data->pcie_gen_cap,
599 get_pcie_lane_support(data->pcie_lane_cap,
602 data->dpm_table.pcie_speed_table.count = 6;
604 /* Populate last level for boot PCIE level, but do not increment count. */
605 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
606 data->dpm_table.pcie_speed_table.count,
607 get_pcie_gen_support(data->pcie_gen_cap,
609 get_pcie_lane_support(data->pcie_lane_cap,
615 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
617 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
619 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
621 phm_reset_single_dpm_table(
622 &data->dpm_table.sclk_table,
623 smum_get_mac_definition(hwmgr->smumgr,
624 SMU_MAX_LEVELS_GRAPHICS),
625 MAX_REGULAR_DPM_NUMBER);
626 phm_reset_single_dpm_table(
627 &data->dpm_table.mclk_table,
628 smum_get_mac_definition(hwmgr->smumgr,
629 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
631 phm_reset_single_dpm_table(
632 &data->dpm_table.vddc_table,
633 smum_get_mac_definition(hwmgr->smumgr,
634 SMU_MAX_LEVELS_VDDC),
635 MAX_REGULAR_DPM_NUMBER);
636 phm_reset_single_dpm_table(
637 &data->dpm_table.vddci_table,
638 smum_get_mac_definition(hwmgr->smumgr,
639 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
641 phm_reset_single_dpm_table(
642 &data->dpm_table.mvdd_table,
643 smum_get_mac_definition(hwmgr->smumgr,
644 SMU_MAX_LEVELS_MVDD),
645 MAX_REGULAR_DPM_NUMBER);
649 * This function is to initialize all DPM state tables
650 * for SMU7 based on the dependency table.
651 * Dynamic state patching function will then trim these
652 * state tables to the allowed range based
653 * on the power policy or external client requests,
654 * such as UVD request, etc.
657 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
659 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
660 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
661 hwmgr->dyn_state.vddc_dependency_on_sclk;
662 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
663 hwmgr->dyn_state.vddc_dependency_on_mclk;
664 struct phm_cac_leakage_table *std_voltage_table =
665 hwmgr->dyn_state.cac_leakage_table;
668 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
669 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
670 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
671 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
673 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
674 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
675 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
676 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
679 /* Initialize Sclk DPM table based on allow Sclk values*/
680 data->dpm_table.sclk_table.count = 0;
682 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
683 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
684 allowed_vdd_sclk_table->entries[i].clk) {
685 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
686 allowed_vdd_sclk_table->entries[i].clk;
687 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
688 data->dpm_table.sclk_table.count++;
692 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
693 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
694 /* Initialize Mclk DPM table based on allow Mclk values */
695 data->dpm_table.mclk_table.count = 0;
696 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
697 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
698 allowed_vdd_mclk_table->entries[i].clk) {
699 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
700 allowed_vdd_mclk_table->entries[i].clk;
701 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
702 data->dpm_table.mclk_table.count++;
706 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
707 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
708 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
709 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
710 /* param1 is for corresponding std voltage */
711 data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
714 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
715 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
717 if (NULL != allowed_vdd_mclk_table) {
718 /* Initialize Vddci DPM table based on allow Mclk values */
719 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
720 data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
721 data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
723 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
726 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
728 if (NULL != allowed_vdd_mclk_table) {
730 * Initialize MVDD DPM table based on allow Mclk
733 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
734 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
735 data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
737 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
743 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
745 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
746 struct phm_ppt_v1_information *table_info =
747 (struct phm_ppt_v1_information *)(hwmgr->pptable);
750 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
751 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
753 if (table_info == NULL)
756 dep_sclk_table = table_info->vdd_dep_on_sclk;
757 dep_mclk_table = table_info->vdd_dep_on_mclk;
759 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
760 "SCLK dependency table is missing.",
762 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
763 "SCLK dependency table count is 0.",
766 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
767 "MCLK dependency table is missing.",
769 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
770 "MCLK dependency table count is 0",
773 /* Initialize Sclk DPM table based on allow Sclk values */
774 data->dpm_table.sclk_table.count = 0;
775 for (i = 0; i < dep_sclk_table->count; i++) {
776 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
777 dep_sclk_table->entries[i].clk) {
779 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
780 dep_sclk_table->entries[i].clk;
782 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
783 (i == 0) ? true : false;
784 data->dpm_table.sclk_table.count++;
788 /* Initialize Mclk DPM table based on allow Mclk values */
789 data->dpm_table.mclk_table.count = 0;
790 for (i = 0; i < dep_mclk_table->count; i++) {
791 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
792 [data->dpm_table.mclk_table.count - 1].value !=
793 dep_mclk_table->entries[i].clk) {
794 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
795 dep_mclk_table->entries[i].clk;
796 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
797 (i == 0) ? true : false;
798 data->dpm_table.mclk_table.count++;
805 int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
807 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
809 smu7_reset_dpm_tables(hwmgr);
811 if (hwmgr->pp_table_version == PP_TABLE_V1)
812 smu7_setup_dpm_tables_v1(hwmgr);
813 else if (hwmgr->pp_table_version == PP_TABLE_V0)
814 smu7_setup_dpm_tables_v0(hwmgr);
816 smu7_setup_default_pcie_table(hwmgr);
818 /* save a copy of the default DPM table */
819 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
820 sizeof(struct smu7_dpm_table));
824 uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
826 uint32_t reference_clock, tmp;
827 struct cgs_display_info info = {0};
828 struct cgs_mode_info mode_info;
830 info.mode_info = &mode_info;
832 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
837 cgs_get_active_displays_info(hwmgr->device, &info);
838 reference_clock = mode_info.ref_clock;
840 tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
843 return reference_clock / 4;
845 return reference_clock;
848 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
851 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
852 PHM_PlatformCaps_RegulatorHot))
853 return smum_send_msg_to_smc(hwmgr->smumgr,
854 PPSMC_MSG_EnableVRHotGPIOInterrupt);
859 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
861 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
866 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
868 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
870 if (data->ulv_supported)
871 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
876 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
878 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
880 if (data->ulv_supported)
881 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
886 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
888 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
889 PHM_PlatformCaps_SclkDeepSleep)) {
890 if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
891 PP_ASSERT_WITH_CODE(false,
892 "Attempt to enable Master Deep Sleep switch failed!",
895 if (smum_send_msg_to_smc(hwmgr->smumgr,
896 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
897 PP_ASSERT_WITH_CODE(false,
898 "Attempt to disable Master Deep Sleep switch failed!",
906 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
908 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
909 PHM_PlatformCaps_SclkDeepSleep)) {
910 if (smum_send_msg_to_smc(hwmgr->smumgr,
911 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
912 PP_ASSERT_WITH_CODE(false,
913 "Attempt to disable Master Deep Sleep switch failed!",
921 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
923 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
924 uint32_t soft_register_value = 0;
925 uint32_t handshake_disables_offset = data->soft_regs_start
926 + smum_get_offsetof(hwmgr->smumgr,
927 SMU_SoftRegisters, HandshakeDisables);
929 soft_register_value = cgs_read_ind_register(hwmgr->device,
930 CGS_IND_REG__SMC, handshake_disables_offset);
931 soft_register_value |= smum_get_mac_definition(hwmgr->smumgr,
932 SMU_UVD_MCLK_HANDSHAKE_DISABLE);
933 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
934 handshake_disables_offset, soft_register_value);
938 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
940 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
942 /* enable SCLK dpm */
943 if (!data->sclk_dpm_key_disabled)
945 (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
946 "Failed to enable SCLK DPM during DPM Start Function!",
949 /* enable MCLK dpm */
950 if (0 == data->mclk_dpm_key_disabled) {
951 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
952 smu7_disable_handshake_uvd(hwmgr);
954 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
955 PPSMC_MSG_MCLKDPM_Enable)),
956 "Failed to enable MCLK DPM during DPM Start Function!",
959 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
961 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
962 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
963 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
965 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
966 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
967 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
973 static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
975 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
977 /*enable general power management */
979 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
980 GLOBAL_PWRMGT_EN, 1);
982 /* enable sclk deep sleep */
984 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
987 /* prepare for PCIE DPM */
989 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
990 data->soft_regs_start +
991 smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters,
992 VoltageChangeTimeout), 0x1000);
993 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
994 SWRST_COMMAND_1, RESETLC, 0x0);
997 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
998 PPSMC_MSG_Voltage_Cntl_Enable)),
999 "Failed to enable voltage DPM during DPM Start Function!",
1003 if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1004 printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
1008 /* enable PCIE dpm */
1009 if (0 == data->pcie_dpm_key_disabled) {
1010 PP_ASSERT_WITH_CODE(
1011 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
1012 PPSMC_MSG_PCIeDPM_Enable)),
1013 "Failed to enable pcie DPM during DPM Start Function!",
1017 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1018 PHM_PlatformCaps_Falcon_QuickTransition)) {
1019 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
1020 PPSMC_MSG_EnableACDCGPIOInterrupt)),
1021 "Failed to enable AC DC GPIO Interrupt!",
1028 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1030 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1032 /* disable SCLK dpm */
1033 if (!data->sclk_dpm_key_disabled) {
1034 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1035 "Trying to disable SCLK DPM when DPM is disabled",
1037 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
1040 /* disable MCLK dpm */
1041 if (!data->mclk_dpm_key_disabled) {
1042 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1043 "Trying to disable MCLK DPM when DPM is disabled",
1045 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
1051 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1053 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1055 /* disable general power management */
1056 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1057 GLOBAL_PWRMGT_EN, 0);
1058 /* disable sclk deep sleep */
1059 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1062 /* disable PCIE dpm */
1063 if (!data->pcie_dpm_key_disabled) {
1064 PP_ASSERT_WITH_CODE(
1065 (smum_send_msg_to_smc(hwmgr->smumgr,
1066 PPSMC_MSG_PCIeDPM_Disable) == 0),
1067 "Failed to disable pcie DPM during DPM Stop Function!",
1071 smu7_disable_sclk_mclk_dpm(hwmgr);
1073 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1074 "Trying to disable voltage DPM when DPM is disabled",
1077 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
1082 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1085 enum DPM_EVENT_SRC src;
1089 printk(KERN_ERR "Unknown throttling event sources.");
1095 case (1 << PHM_AutoThrottleSource_Thermal):
1097 src = DPM_EVENT_SRC_DIGITAL;
1099 case (1 << PHM_AutoThrottleSource_External):
1101 src = DPM_EVENT_SRC_EXTERNAL;
1103 case (1 << PHM_AutoThrottleSource_External) |
1104 (1 << PHM_AutoThrottleSource_Thermal):
1106 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1109 /* Order matters - don't enable thermal protection for the wrong source. */
1111 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1112 DPM_EVENT_SRC, src);
1113 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1114 THERMAL_PROTECTION_DIS,
1115 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1116 PHM_PlatformCaps_ThermalController));
1118 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1119 THERMAL_PROTECTION_DIS, 1);
1122 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1123 PHM_AutoThrottleSource source)
1125 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1127 if (!(data->active_auto_throttle_sources & (1 << source))) {
1128 data->active_auto_throttle_sources |= 1 << source;
1129 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1134 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1136 return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1139 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1140 PHM_AutoThrottleSource source)
1142 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1144 if (data->active_auto_throttle_sources & (1 << source)) {
1145 data->active_auto_throttle_sources &= ~(1 << source);
1146 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1151 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1153 return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1156 int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1158 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1159 data->pcie_performance_request = true;
1164 int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1169 tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
1170 PP_ASSERT_WITH_CODE(tmp_result == 0,
1171 "DPM is already running right now, no need to enable DPM!",
1174 if (smu7_voltage_control(hwmgr)) {
1175 tmp_result = smu7_enable_voltage_control(hwmgr);
1176 PP_ASSERT_WITH_CODE(tmp_result == 0,
1177 "Failed to enable voltage control!",
1178 result = tmp_result);
1180 tmp_result = smu7_construct_voltage_tables(hwmgr);
1181 PP_ASSERT_WITH_CODE((0 == tmp_result),
1182 "Failed to contruct voltage tables!",
1183 result = tmp_result);
1185 smum_initialize_mc_reg_table(hwmgr);
1187 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1188 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1189 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1190 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1192 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1193 PHM_PlatformCaps_ThermalController))
1194 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1195 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1197 tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1198 PP_ASSERT_WITH_CODE((0 == tmp_result),
1199 "Failed to program static screen threshold parameters!",
1200 result = tmp_result);
1202 tmp_result = smu7_enable_display_gap(hwmgr);
1203 PP_ASSERT_WITH_CODE((0 == tmp_result),
1204 "Failed to enable display gap!", result = tmp_result);
1206 tmp_result = smu7_program_voting_clients(hwmgr);
1207 PP_ASSERT_WITH_CODE((0 == tmp_result),
1208 "Failed to program voting clients!", result = tmp_result);
1210 tmp_result = smum_process_firmware_header(hwmgr);
1211 PP_ASSERT_WITH_CODE((0 == tmp_result),
1212 "Failed to process firmware header!", result = tmp_result);
1214 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1215 PP_ASSERT_WITH_CODE((0 == tmp_result),
1216 "Failed to initialize switch from ArbF0 to F1!",
1217 result = tmp_result);
1219 result = smu7_setup_default_dpm_tables(hwmgr);
1220 PP_ASSERT_WITH_CODE(0 == result,
1221 "Failed to setup default DPM tables!", return result);
1223 tmp_result = smum_init_smc_table(hwmgr);
1224 PP_ASSERT_WITH_CODE((0 == tmp_result),
1225 "Failed to initialize SMC table!", result = tmp_result);
1227 tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1228 PP_ASSERT_WITH_CODE((0 == tmp_result),
1229 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1231 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
1233 tmp_result = smu7_enable_sclk_control(hwmgr);
1234 PP_ASSERT_WITH_CODE((0 == tmp_result),
1235 "Failed to enable SCLK control!", result = tmp_result);
1237 tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1238 PP_ASSERT_WITH_CODE((0 == tmp_result),
1239 "Failed to enable voltage control!", result = tmp_result);
1241 tmp_result = smu7_enable_ulv(hwmgr);
1242 PP_ASSERT_WITH_CODE((0 == tmp_result),
1243 "Failed to enable ULV!", result = tmp_result);
1245 tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1246 PP_ASSERT_WITH_CODE((0 == tmp_result),
1247 "Failed to enable deep sleep master switch!", result = tmp_result);
1249 tmp_result = smu7_enable_didt_config(hwmgr);
1250 PP_ASSERT_WITH_CODE((tmp_result == 0),
1251 "Failed to enable deep sleep master switch!", result = tmp_result);
1253 tmp_result = smu7_start_dpm(hwmgr);
1254 PP_ASSERT_WITH_CODE((0 == tmp_result),
1255 "Failed to start DPM!", result = tmp_result);
1257 tmp_result = smu7_enable_smc_cac(hwmgr);
1258 PP_ASSERT_WITH_CODE((0 == tmp_result),
1259 "Failed to enable SMC CAC!", result = tmp_result);
1261 tmp_result = smu7_enable_power_containment(hwmgr);
1262 PP_ASSERT_WITH_CODE((0 == tmp_result),
1263 "Failed to enable power containment!", result = tmp_result);
1265 tmp_result = smu7_power_control_set_level(hwmgr);
1266 PP_ASSERT_WITH_CODE((0 == tmp_result),
1267 "Failed to power control set level!", result = tmp_result);
1269 tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1270 PP_ASSERT_WITH_CODE((0 == tmp_result),
1271 "Failed to enable thermal auto throttle!", result = tmp_result);
1273 tmp_result = smu7_pcie_performance_request(hwmgr);
1274 PP_ASSERT_WITH_CODE((0 == tmp_result),
1275 "pcie performance request failed!", result = tmp_result);
1280 int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1282 int tmp_result, result = 0;
1284 tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1;
1285 PP_ASSERT_WITH_CODE(tmp_result == 0,
1286 "DPM is not running right now, no need to disable DPM!",
1289 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1290 PHM_PlatformCaps_ThermalController))
1291 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1292 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1294 tmp_result = smu7_disable_power_containment(hwmgr);
1295 PP_ASSERT_WITH_CODE((tmp_result == 0),
1296 "Failed to disable power containment!", result = tmp_result);
1298 tmp_result = smu7_disable_smc_cac(hwmgr);
1299 PP_ASSERT_WITH_CODE((tmp_result == 0),
1300 "Failed to disable SMC CAC!", result = tmp_result);
1302 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1303 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1304 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1305 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1307 tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1308 PP_ASSERT_WITH_CODE((tmp_result == 0),
1309 "Failed to disable thermal auto throttle!", result = tmp_result);
1311 if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1312 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)),
1313 "Failed to disable AVFS!",
1317 tmp_result = smu7_stop_dpm(hwmgr);
1318 PP_ASSERT_WITH_CODE((tmp_result == 0),
1319 "Failed to stop DPM!", result = tmp_result);
1321 tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1322 PP_ASSERT_WITH_CODE((tmp_result == 0),
1323 "Failed to disable deep sleep master switch!", result = tmp_result);
1325 tmp_result = smu7_disable_ulv(hwmgr);
1326 PP_ASSERT_WITH_CODE((tmp_result == 0),
1327 "Failed to disable ULV!", result = tmp_result);
1329 tmp_result = smu7_clear_voting_clients(hwmgr);
1330 PP_ASSERT_WITH_CODE((tmp_result == 0),
1331 "Failed to clear voting clients!", result = tmp_result);
1333 tmp_result = smu7_reset_to_default(hwmgr);
1334 PP_ASSERT_WITH_CODE((tmp_result == 0),
1335 "Failed to reset to default!", result = tmp_result);
1337 tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1338 PP_ASSERT_WITH_CODE((tmp_result == 0),
1339 "Failed to force to switch arbf0!", result = tmp_result);
1344 int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
1350 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1352 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1353 struct phm_ppt_v1_information *table_info =
1354 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1356 data->dll_default_on = false;
1357 data->mclk_dpm0_activity_target = 0xa;
1358 data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT;
1359 data->vddc_vddgfx_delta = 300;
1360 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1361 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1362 data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1363 data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1364 data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1365 data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1366 data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1367 data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1368 data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1369 data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1371 data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1372 data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1373 data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1374 /* need to set voltage control types before EVV patching */
1375 data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1376 data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1377 data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1378 data->enable_tdc_limit_feature = true;
1379 data->enable_pkg_pwr_tracking_feature = true;
1380 data->force_pcie_gen = PP_PCIEGenInvalid;
1381 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1383 data->fast_watermark_threshold = 100;
1384 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1385 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1386 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1388 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1389 PHM_PlatformCaps_ControlVDDGFX)) {
1390 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1391 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1392 data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1396 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1397 PHM_PlatformCaps_EnableMVDDControl)) {
1398 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1399 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1400 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1401 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1402 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1403 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1406 if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
1407 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1408 PHM_PlatformCaps_ControlVDDGFX);
1411 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1412 PHM_PlatformCaps_ControlVDDCI)) {
1413 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1414 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1415 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1416 else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1417 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1418 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1421 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1422 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1423 PHM_PlatformCaps_EnableMVDDControl);
1425 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1426 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1427 PHM_PlatformCaps_ControlVDDCI);
1429 if ((hwmgr->pp_table_version != PP_TABLE_V0)
1430 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1431 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1432 PHM_PlatformCaps_ClockStretcher);
1434 data->pcie_gen_performance.max = PP_PCIEGen1;
1435 data->pcie_gen_performance.min = PP_PCIEGen3;
1436 data->pcie_gen_power_saving.max = PP_PCIEGen1;
1437 data->pcie_gen_power_saving.min = PP_PCIEGen3;
1438 data->pcie_lane_performance.max = 0;
1439 data->pcie_lane_performance.min = 16;
1440 data->pcie_lane_power_saving.max = 0;
1441 data->pcie_lane_power_saving.min = 16;
1445 * Get Leakage VDDC based on leakage ID.
1447 * @param hwmgr the address of the powerplay hardware manager.
1450 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1452 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1455 uint16_t vddgfx = 0;
1458 struct phm_ppt_v1_information *table_info =
1459 (struct phm_ppt_v1_information *)hwmgr->pptable;
1460 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1463 if (table_info == NULL)
1466 sclk_table = table_info->vdd_dep_on_sclk;
1468 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1469 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1471 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1472 if (0 == phm_get_sclk_for_voltage_evv(hwmgr,
1473 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1474 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1475 PHM_PlatformCaps_ClockStretcher)) {
1476 for (j = 1; j < sclk_table->count; j++) {
1477 if (sclk_table->entries[j].clk == sclk &&
1478 sclk_table->entries[j].cks_enable == 0) {
1484 if (0 == atomctrl_get_voltage_evv_on_sclk
1485 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1487 /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1488 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1490 /* the voltage should not be zero nor equal to leakage ID */
1491 if (vddgfx != 0 && vddgfx != vv_id) {
1492 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1493 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1494 data->vddcgfx_leakage.count++;
1497 printk("Error retrieving EVV voltage value!\n");
1502 if ((hwmgr->pp_table_version == PP_TABLE_V0)
1503 || !phm_get_sclk_for_voltage_evv(hwmgr,
1504 table_info->vddc_lookup_table, vv_id, &sclk)) {
1505 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1506 PHM_PlatformCaps_ClockStretcher)) {
1507 for (j = 1; j < sclk_table->count; j++) {
1508 if (sclk_table->entries[j].clk == sclk &&
1509 sclk_table->entries[j].cks_enable == 0) {
1516 if (phm_get_voltage_evv_on_sclk(hwmgr,
1518 sclk, vv_id, &vddc) == 0) {
1519 if (vddc >= 2000 || vddc == 0)
1522 printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
1526 /* the voltage should not be zero nor equal to leakage ID */
1527 if (vddc != 0 && vddc != vv_id) {
1528 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1529 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1530 data->vddc_leakage.count++;
1540 * Change virtual leakage voltage to actual value.
1542 * @param hwmgr the address of the powerplay hardware manager.
1543 * @param pointer to changing voltage
1544 * @param pointer to leakage table
1546 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1547 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1551 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
1552 for (index = 0; index < leakage_table->count; index++) {
1553 /* if this voltage matches a leakage voltage ID */
1554 /* patch with actual leakage voltage */
1555 if (leakage_table->leakage_id[index] == *voltage) {
1556 *voltage = leakage_table->actual_voltage[index];
1561 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
1562 printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
1566 * Patch voltage lookup table by EVV leakages.
1568 * @param hwmgr the address of the powerplay hardware manager.
1569 * @param pointer to voltage lookup table
1570 * @param pointer to leakage table
1573 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
1574 phm_ppt_v1_voltage_lookup_table *lookup_table,
1575 struct smu7_leakage_voltage *leakage_table)
1579 for (i = 0; i < lookup_table->count; i++)
1580 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1581 &lookup_table->entries[i].us_vdd, leakage_table);
1586 static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
1587 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
1590 struct phm_ppt_v1_information *table_info =
1591 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1592 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
1593 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
1594 table_info->max_clock_voltage_on_dc.vddc;
1598 static int smu7_patch_voltage_dependency_tables_with_lookup_table(
1599 struct pp_hwmgr *hwmgr)
1603 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1604 struct phm_ppt_v1_information *table_info =
1605 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1607 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1608 table_info->vdd_dep_on_sclk;
1609 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
1610 table_info->vdd_dep_on_mclk;
1611 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1612 table_info->mm_dep_table;
1614 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1615 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1616 voltage_id = sclk_table->entries[entry_id].vddInd;
1617 sclk_table->entries[entry_id].vddgfx =
1618 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
1621 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1622 voltage_id = sclk_table->entries[entry_id].vddInd;
1623 sclk_table->entries[entry_id].vddc =
1624 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1628 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1629 voltage_id = mclk_table->entries[entry_id].vddInd;
1630 mclk_table->entries[entry_id].vddc =
1631 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1634 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
1635 voltage_id = mm_table->entries[entry_id].vddcInd;
1636 mm_table->entries[entry_id].vddc =
1637 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1644 static int phm_add_voltage(struct pp_hwmgr *hwmgr,
1645 phm_ppt_v1_voltage_lookup_table *look_up_table,
1646 phm_ppt_v1_voltage_lookup_record *record)
1650 PP_ASSERT_WITH_CODE((NULL != look_up_table),
1651 "Lookup Table empty.", return -EINVAL);
1652 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1653 "Lookup Table empty.", return -EINVAL);
1655 i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
1656 PP_ASSERT_WITH_CODE((i >= look_up_table->count),
1657 "Lookup Table is full.", return -EINVAL);
1659 /* This is to avoid entering duplicate calculated records. */
1660 for (i = 0; i < look_up_table->count; i++) {
1661 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
1662 if (look_up_table->entries[i].us_calculated == 1)
1668 look_up_table->entries[i].us_calculated = 1;
1669 look_up_table->entries[i].us_vdd = record->us_vdd;
1670 look_up_table->entries[i].us_cac_low = record->us_cac_low;
1671 look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
1672 look_up_table->entries[i].us_cac_high = record->us_cac_high;
1673 /* Only increment the count when we're appending, not replacing duplicate entry. */
1674 if (i == look_up_table->count)
1675 look_up_table->count++;
1681 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
1684 struct phm_ppt_v1_voltage_lookup_record v_record;
1685 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1686 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1688 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
1689 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
1691 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1692 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1693 if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
1694 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1695 sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1697 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1698 sclk_table->entries[entry_id].vdd_offset;
1700 sclk_table->entries[entry_id].vddc =
1701 v_record.us_cac_low = v_record.us_cac_mid =
1702 v_record.us_cac_high = v_record.us_vdd;
1704 phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
1707 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1708 if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
1709 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1710 mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1712 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1713 mclk_table->entries[entry_id].vdd_offset;
1715 mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1716 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1717 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1723 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
1726 struct phm_ppt_v1_voltage_lookup_record v_record;
1727 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1728 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1729 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1731 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1732 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
1733 if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
1734 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1735 mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
1737 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1738 mm_table->entries[entry_id].vddgfx_offset;
1740 /* Add the calculated VDDGFX to the VDDGFX lookup table */
1741 mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1742 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1743 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1749 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
1750 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
1752 uint32_t table_size, i, j;
1753 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
1754 table_size = lookup_table->count;
1756 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
1757 "Lookup table is empty", return -EINVAL);
1759 /* Sorting voltages */
1760 for (i = 0; i < table_size - 1; i++) {
1761 for (j = i + 1; j > 0; j--) {
1762 if (lookup_table->entries[j].us_vdd <
1763 lookup_table->entries[j - 1].us_vdd) {
1764 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
1765 lookup_table->entries[j - 1] = lookup_table->entries[j];
1766 lookup_table->entries[j] = tmp_voltage_lookup_record;
1774 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
1778 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1779 struct phm_ppt_v1_information *table_info =
1780 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1782 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1783 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1784 table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
1785 if (tmp_result != 0)
1786 result = tmp_result;
1788 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1789 &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
1792 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1793 table_info->vddc_lookup_table, &(data->vddc_leakage));
1795 result = tmp_result;
1797 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
1798 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
1800 result = tmp_result;
1803 tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
1805 result = tmp_result;
1807 tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
1809 result = tmp_result;
1811 tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
1813 result = tmp_result;
1815 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
1817 result = tmp_result;
1819 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
1821 result = tmp_result;
1826 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
1828 struct phm_ppt_v1_information *table_info =
1829 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1831 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
1832 table_info->vdd_dep_on_sclk;
1833 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
1834 table_info->vdd_dep_on_mclk;
1836 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
1837 "VDD dependency on SCLK table is missing.",
1839 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
1840 "VDD dependency on SCLK table has to have is missing.",
1843 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
1844 "VDD dependency on MCLK table is missing",
1846 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
1847 "VDD dependency on MCLK table has to have is missing.",
1850 table_info->max_clock_voltage_on_ac.sclk =
1851 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
1852 table_info->max_clock_voltage_on_ac.mclk =
1853 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
1854 table_info->max_clock_voltage_on_ac.vddc =
1855 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
1856 table_info->max_clock_voltage_on_ac.vddci =
1857 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
1859 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
1860 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
1861 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
1862 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
1867 int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
1869 struct phm_ppt_v1_information *table_info =
1870 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1871 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
1872 struct phm_ppt_v1_voltage_lookup_table *lookup_table;
1874 uint32_t hw_revision, sub_vendor_id, sub_sys_id;
1875 struct cgs_system_info sys_info = {0};
1877 if (table_info != NULL) {
1878 dep_mclk_table = table_info->vdd_dep_on_mclk;
1879 lookup_table = table_info->vddc_lookup_table;
1883 sys_info.size = sizeof(struct cgs_system_info);
1885 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
1886 cgs_query_system_info(hwmgr->device, &sys_info);
1887 hw_revision = (uint32_t)sys_info.value;
1889 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
1890 cgs_query_system_info(hwmgr->device, &sys_info);
1891 sub_sys_id = (uint32_t)sys_info.value;
1893 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
1894 cgs_query_system_info(hwmgr->device, &sys_info);
1895 sub_vendor_id = (uint32_t)sys_info.value;
1897 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
1898 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
1899 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
1900 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
1901 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
1904 for (i = 0; i < lookup_table->count; i++) {
1905 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
1906 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
1914 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
1916 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
1918 struct phm_ppt_v1_information *table_info =
1919 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1922 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
1923 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
1924 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
1926 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
1929 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
1932 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
1935 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
1938 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
1941 PP_ASSERT_WITH_CODE(0,
1942 "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
1946 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
1949 if (table_info == NULL)
1952 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
1953 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
1954 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
1955 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
1957 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
1958 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
1960 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
1962 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
1964 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
1965 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
1967 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
1969 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
1970 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
1972 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
1973 table_info->cac_dtp_table->usOperatingTempStep = 1;
1974 table_info->cac_dtp_table->usOperatingTempHyst = 1;
1976 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
1977 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
1979 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
1980 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
1982 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
1983 table_info->cac_dtp_table->usOperatingTempMinLimit;
1985 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
1986 table_info->cac_dtp_table->usOperatingTempMaxLimit;
1988 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
1989 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
1991 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
1992 table_info->cac_dtp_table->usOperatingTempStep;
1994 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
1995 table_info->cac_dtp_table->usTargetOperatingTemp;
1996 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1997 PHM_PlatformCaps_ODFuzzyFanControlSupport);
2004 * Change virtual leakage voltage to actual value.
2006 * @param hwmgr the address of the powerplay hardware manager.
2007 * @param pointer to changing voltage
2008 * @param pointer to leakage table
2010 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2011 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2015 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2016 for (index = 0; index < leakage_table->count; index++) {
2017 /* if this voltage matches a leakage voltage ID */
2018 /* patch with actual leakage voltage */
2019 if (leakage_table->leakage_id[index] == *voltage) {
2020 *voltage = leakage_table->actual_voltage[index];
2025 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2026 printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
2030 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2031 struct phm_clock_voltage_dependency_table *tab)
2034 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2037 for (i = 0; i < tab->count; i++)
2038 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2039 &data->vddc_leakage);
2044 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2045 struct phm_clock_voltage_dependency_table *tab)
2048 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2051 for (i = 0; i < tab->count; i++)
2052 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2053 &data->vddci_leakage);
2058 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2059 struct phm_vce_clock_voltage_dependency_table *tab)
2062 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2065 for (i = 0; i < tab->count; i++)
2066 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2067 &data->vddc_leakage);
2073 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2074 struct phm_uvd_clock_voltage_dependency_table *tab)
2077 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2080 for (i = 0; i < tab->count; i++)
2081 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2082 &data->vddc_leakage);
2087 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2088 struct phm_phase_shedding_limits_table *tab)
2091 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2094 for (i = 0; i < tab->count; i++)
2095 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2096 &data->vddc_leakage);
2101 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2102 struct phm_samu_clock_voltage_dependency_table *tab)
2105 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2108 for (i = 0; i < tab->count; i++)
2109 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2110 &data->vddc_leakage);
2115 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2116 struct phm_acp_clock_voltage_dependency_table *tab)
2119 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2122 for (i = 0; i < tab->count; i++)
2123 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2124 &data->vddc_leakage);
2129 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2130 struct phm_clock_and_voltage_limits *tab)
2132 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2135 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc,
2136 &data->vddc_leakage);
2137 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci,
2138 &data->vddci_leakage);
2144 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2148 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2151 for (i = 0; i < tab->count; i++) {
2152 vddc = (uint32_t)(tab->entries[i].Vddc);
2153 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2154 tab->entries[i].Vddc = (uint16_t)vddc;
2161 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2165 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2169 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2173 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2177 tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2181 tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2185 tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2189 tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2193 tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2197 tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2201 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2205 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2209 tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2217 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2219 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2221 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2222 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2223 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2225 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2226 "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
2227 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2228 "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
2230 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2231 "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
2232 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2233 "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
2235 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2236 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2238 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2239 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2240 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2241 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2242 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2243 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2245 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2246 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2247 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2250 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
2251 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2256 int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2258 struct smu7_hwmgr *data;
2261 data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2265 hwmgr->backend = data;
2267 smu7_patch_voltage_workaround(hwmgr);
2268 smu7_init_dpm_defaults(hwmgr);
2270 /* Get leakage voltage based on leakage ID. */
2271 result = smu7_get_evv_voltages(hwmgr);
2274 printk("Get EVV Voltage Failed. Abort Driver loading!\n");
2278 if (hwmgr->pp_table_version == PP_TABLE_V1) {
2279 smu7_complete_dependency_tables(hwmgr);
2280 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2281 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2282 smu7_patch_dependency_tables_with_leakage(hwmgr);
2283 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2286 /* Initalize Dynamic State Adjustment Rule Settings */
2287 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2290 struct cgs_system_info sys_info = {0};
2292 data->is_tlu_enabled = false;
2294 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2295 SMU7_MAX_HARDWARE_POWERLEVELS;
2296 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2297 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2299 sys_info.size = sizeof(struct cgs_system_info);
2300 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
2301 result = cgs_query_system_info(hwmgr->device, &sys_info);
2303 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2305 data->pcie_gen_cap = (uint32_t)sys_info.value;
2306 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2307 data->pcie_spc_cap = 20;
2308 sys_info.size = sizeof(struct cgs_system_info);
2309 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
2310 result = cgs_query_system_info(hwmgr->device, &sys_info);
2312 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2314 data->pcie_lane_cap = (uint32_t)sys_info.value;
2316 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2317 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2318 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2319 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2320 smu7_thermal_parameter_init(hwmgr);
2322 /* Ignore return value in here, we are cleaning up a mess. */
2323 phm_hwmgr_backend_fini(hwmgr);
2329 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2331 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2332 uint32_t level, tmp;
2334 if (!data->pcie_dpm_key_disabled) {
2335 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2337 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2342 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2343 PPSMC_MSG_PCIeDPM_ForceLevel, level);
2347 if (!data->sclk_dpm_key_disabled) {
2348 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2350 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2355 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2356 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2361 if (!data->mclk_dpm_key_disabled) {
2362 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2364 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2369 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2370 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2378 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2380 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2382 if (hwmgr->pp_table_version == PP_TABLE_V1)
2383 phm_apply_dal_min_voltage_request(hwmgr);
2384 /* TO DO for v0 iceland and Ci*/
2386 if (!data->sclk_dpm_key_disabled) {
2387 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2388 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2389 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2390 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2393 if (!data->mclk_dpm_key_disabled) {
2394 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2395 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2396 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2397 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2403 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2405 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2407 if (!smum_is_dpm_running(hwmgr))
2410 if (!data->pcie_dpm_key_disabled) {
2411 smum_send_msg_to_smc(hwmgr->smumgr,
2412 PPSMC_MSG_PCIeDPM_UnForceLevel);
2415 return smu7_upload_dpm_level_enable_mask(hwmgr);
2418 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2420 struct smu7_hwmgr *data =
2421 (struct smu7_hwmgr *)(hwmgr->backend);
2424 if (!data->sclk_dpm_key_disabled)
2425 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2426 level = phm_get_lowest_enabled_level(hwmgr,
2427 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2428 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2429 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2434 if (!data->mclk_dpm_key_disabled) {
2435 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2436 level = phm_get_lowest_enabled_level(hwmgr,
2437 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2438 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2439 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2444 if (!data->pcie_dpm_key_disabled) {
2445 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2446 level = phm_get_lowest_enabled_level(hwmgr,
2447 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
2448 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2449 PPSMC_MSG_PCIeDPM_ForceLevel,
2457 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2458 enum amd_dpm_forced_level level)
2463 case AMD_DPM_FORCED_LEVEL_HIGH:
2464 ret = smu7_force_dpm_highest(hwmgr);
2468 case AMD_DPM_FORCED_LEVEL_LOW:
2469 ret = smu7_force_dpm_lowest(hwmgr);
2473 case AMD_DPM_FORCED_LEVEL_AUTO:
2474 ret = smu7_unforce_dpm_levels(hwmgr);
2482 hwmgr->dpm_level = level;
2487 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2489 return sizeof(struct smu7_power_state);
2493 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2494 struct pp_power_state *request_ps,
2495 const struct pp_power_state *current_ps)
2498 struct smu7_power_state *smu7_ps =
2499 cast_phw_smu7_power_state(&request_ps->hardware);
2502 struct PP_Clocks minimum_clocks = {0};
2503 bool disable_mclk_switching;
2504 bool disable_mclk_switching_for_frame_lock;
2505 struct cgs_display_info info = {0};
2506 const struct phm_clock_and_voltage_limits *max_limits;
2508 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2509 struct phm_ppt_v1_information *table_info =
2510 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2512 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2514 data->battery_state = (PP_StateUILabel_Battery ==
2515 request_ps->classification.ui_label);
2517 PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
2518 "VI should always have 2 performance levels",
2521 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
2522 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2523 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2525 /* Cap clock DPM tables at DC MAX if it is in DC. */
2526 if (PP_PowerSource_DC == hwmgr->power_source) {
2527 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2528 if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
2529 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
2530 if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
2531 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
2535 smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
2536 smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
2538 cgs_get_active_displays_info(hwmgr->device, &info);
2540 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
2542 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2543 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
2545 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2546 PHM_PlatformCaps_StablePState)) {
2547 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2548 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
2550 for (count = table_info->vdd_dep_on_sclk->count - 1;
2551 count >= 0; count--) {
2552 if (stable_pstate_sclk >=
2553 table_info->vdd_dep_on_sclk->entries[count].clk) {
2554 stable_pstate_sclk =
2555 table_info->vdd_dep_on_sclk->entries[count].clk;
2561 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2563 stable_pstate_mclk = max_limits->mclk;
2565 minimum_clocks.engineClock = stable_pstate_sclk;
2566 minimum_clocks.memoryClock = stable_pstate_mclk;
2569 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
2570 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
2572 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
2573 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
2575 smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
2577 if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
2578 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
2579 hwmgr->platform_descriptor.overdriveLimit.engineClock),
2580 "Overdrive sclk exceeds limit",
2581 hwmgr->gfx_arbiter.sclk_over_drive =
2582 hwmgr->platform_descriptor.overdriveLimit.engineClock);
2584 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
2585 smu7_ps->performance_levels[1].engine_clock =
2586 hwmgr->gfx_arbiter.sclk_over_drive;
2589 if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
2590 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
2591 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
2592 "Overdrive mclk exceeds limit",
2593 hwmgr->gfx_arbiter.mclk_over_drive =
2594 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
2596 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
2597 smu7_ps->performance_levels[1].memory_clock =
2598 hwmgr->gfx_arbiter.mclk_over_drive;
2601 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2602 hwmgr->platform_descriptor.platformCaps,
2603 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2606 disable_mclk_switching = (1 < info.display_count) ||
2607 disable_mclk_switching_for_frame_lock;
2609 sclk = smu7_ps->performance_levels[0].engine_clock;
2610 mclk = smu7_ps->performance_levels[0].memory_clock;
2612 if (disable_mclk_switching)
2613 mclk = smu7_ps->performance_levels
2614 [smu7_ps->performance_level_count - 1].memory_clock;
2616 if (sclk < minimum_clocks.engineClock)
2617 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2618 max_limits->sclk : minimum_clocks.engineClock;
2620 if (mclk < minimum_clocks.memoryClock)
2621 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2622 max_limits->mclk : minimum_clocks.memoryClock;
2624 smu7_ps->performance_levels[0].engine_clock = sclk;
2625 smu7_ps->performance_levels[0].memory_clock = mclk;
2627 smu7_ps->performance_levels[1].engine_clock =
2628 (smu7_ps->performance_levels[1].engine_clock >=
2629 smu7_ps->performance_levels[0].engine_clock) ?
2630 smu7_ps->performance_levels[1].engine_clock :
2631 smu7_ps->performance_levels[0].engine_clock;
2633 if (disable_mclk_switching) {
2634 if (mclk < smu7_ps->performance_levels[1].memory_clock)
2635 mclk = smu7_ps->performance_levels[1].memory_clock;
2637 smu7_ps->performance_levels[0].memory_clock = mclk;
2638 smu7_ps->performance_levels[1].memory_clock = mclk;
2640 if (smu7_ps->performance_levels[1].memory_clock <
2641 smu7_ps->performance_levels[0].memory_clock)
2642 smu7_ps->performance_levels[1].memory_clock =
2643 smu7_ps->performance_levels[0].memory_clock;
2646 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2647 PHM_PlatformCaps_StablePState)) {
2648 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2649 smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
2650 smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
2651 smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
2652 smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
2659 static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
2661 struct pp_power_state *ps;
2662 struct smu7_power_state *smu7_ps;
2667 ps = hwmgr->request_ps;
2672 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2675 return smu7_ps->performance_levels[0].memory_clock;
2677 return smu7_ps->performance_levels
2678 [smu7_ps->performance_level_count-1].memory_clock;
2681 static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
2683 struct pp_power_state *ps;
2684 struct smu7_power_state *smu7_ps;
2689 ps = hwmgr->request_ps;
2694 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2697 return smu7_ps->performance_levels[0].engine_clock;
2699 return smu7_ps->performance_levels
2700 [smu7_ps->performance_level_count-1].engine_clock;
2703 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
2704 struct pp_hw_power_state *hw_ps)
2706 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2707 struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
2708 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
2711 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
2713 /* First retrieve the Boot clocks and VDDC from the firmware info table.
2714 * We assume here that fw_info is unchanged if this call fails.
2716 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
2717 hwmgr->device, index,
2718 &size, &frev, &crev);
2720 /* During a test, there is no firmware info table. */
2723 /* Patch the state. */
2724 data->vbios_boot_state.sclk_bootup_value =
2725 le32_to_cpu(fw_info->ulDefaultEngineClock);
2726 data->vbios_boot_state.mclk_bootup_value =
2727 le32_to_cpu(fw_info->ulDefaultMemoryClock);
2728 data->vbios_boot_state.mvdd_bootup_value =
2729 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
2730 data->vbios_boot_state.vddc_bootup_value =
2731 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
2732 data->vbios_boot_state.vddci_bootup_value =
2733 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
2734 data->vbios_boot_state.pcie_gen_bootup_value =
2735 smu7_get_current_pcie_speed(hwmgr);
2737 data->vbios_boot_state.pcie_lane_bootup_value =
2738 (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
2740 /* set boot power state */
2741 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
2742 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
2743 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
2744 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
2749 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
2752 unsigned long ret = 0;
2754 if (hwmgr->pp_table_version == PP_TABLE_V0) {
2755 result = pp_tables_get_num_of_entries(hwmgr, &ret);
2756 return result ? 0 : ret;
2757 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2758 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
2764 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
2765 void *state, struct pp_power_state *power_state,
2766 void *pp_table, uint32_t classification_flag)
2768 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2769 struct smu7_power_state *smu7_power_state =
2770 (struct smu7_power_state *)(&(power_state->hardware));
2771 struct smu7_performance_level *performance_level;
2772 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
2773 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
2774 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
2775 PPTable_Generic_SubTable_Header *sclk_dep_table =
2776 (PPTable_Generic_SubTable_Header *)
2777 (((unsigned long)powerplay_table) +
2778 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
2780 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
2781 (ATOM_Tonga_MCLK_Dependency_Table *)
2782 (((unsigned long)powerplay_table) +
2783 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2785 /* The following fields are not initialized here: id orderedList allStatesList */
2786 power_state->classification.ui_label =
2787 (le16_to_cpu(state_entry->usClassification) &
2788 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2789 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2790 power_state->classification.flags = classification_flag;
2791 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
2793 power_state->classification.temporary_state = false;
2794 power_state->classification.to_be_deleted = false;
2796 power_state->validation.disallowOnDC =
2797 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
2798 ATOM_Tonga_DISALLOW_ON_DC));
2800 power_state->pcie.lanes = 0;
2802 power_state->display.disableFrameModulation = false;
2803 power_state->display.limitRefreshrate = false;
2804 power_state->display.enableVariBright =
2805 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
2806 ATOM_Tonga_ENABLE_VARIBRIGHT));
2808 power_state->validation.supportedPowerLevels = 0;
2809 power_state->uvd_clocks.VCLK = 0;
2810 power_state->uvd_clocks.DCLK = 0;
2811 power_state->temperatures.min = 0;
2812 power_state->temperatures.max = 0;
2814 performance_level = &(smu7_power_state->performance_levels
2815 [smu7_power_state->performance_level_count++]);
2817 PP_ASSERT_WITH_CODE(
2818 (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
2819 "Performance levels exceeds SMC limit!",
2822 PP_ASSERT_WITH_CODE(
2823 (smu7_power_state->performance_level_count <=
2824 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
2825 "Performance levels exceeds Driver limit!",
2828 /* Performance levels are arranged from low to high. */
2829 performance_level->memory_clock = mclk_dep_table->entries
2830 [state_entry->ucMemoryClockIndexLow].ulMclk;
2831 if (sclk_dep_table->ucRevId == 0)
2832 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
2833 [state_entry->ucEngineClockIndexLow].ulSclk;
2834 else if (sclk_dep_table->ucRevId == 1)
2835 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
2836 [state_entry->ucEngineClockIndexLow].ulSclk;
2837 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
2838 state_entry->ucPCIEGenLow);
2839 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
2840 state_entry->ucPCIELaneHigh);
2842 performance_level = &(smu7_power_state->performance_levels
2843 [smu7_power_state->performance_level_count++]);
2844 performance_level->memory_clock = mclk_dep_table->entries
2845 [state_entry->ucMemoryClockIndexHigh].ulMclk;
2847 if (sclk_dep_table->ucRevId == 0)
2848 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
2849 [state_entry->ucEngineClockIndexHigh].ulSclk;
2850 else if (sclk_dep_table->ucRevId == 1)
2851 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
2852 [state_entry->ucEngineClockIndexHigh].ulSclk;
2854 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
2855 state_entry->ucPCIEGenHigh);
2856 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
2857 state_entry->ucPCIELaneHigh);
2862 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
2863 unsigned long entry_index, struct pp_power_state *state)
2866 struct smu7_power_state *ps;
2867 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2868 struct phm_ppt_v1_information *table_info =
2869 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2870 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
2871 table_info->vdd_dep_on_mclk;
2873 state->hardware.magic = PHM_VIslands_Magic;
2875 ps = (struct smu7_power_state *)(&state->hardware);
2877 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
2878 smu7_get_pp_table_entry_callback_func_v1);
2880 /* This is the earliest time we have all the dependency table and the VBIOS boot state
2881 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
2882 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
2884 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
2885 if (dep_mclk_table->entries[0].clk !=
2886 data->vbios_boot_state.mclk_bootup_value)
2887 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
2888 "does not match VBIOS boot MCLK level");
2889 if (dep_mclk_table->entries[0].vddci !=
2890 data->vbios_boot_state.vddci_bootup_value)
2891 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
2892 "does not match VBIOS boot VDDCI level");
2895 /* set DC compatible flag if this state supports DC */
2896 if (!state->validation.disallowOnDC)
2897 ps->dc_compatible = true;
2899 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
2900 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
2902 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
2903 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
2908 switch (state->classification.ui_label) {
2909 case PP_StateUILabel_Performance:
2910 data->use_pcie_performance_levels = true;
2911 for (i = 0; i < ps->performance_level_count; i++) {
2912 if (data->pcie_gen_performance.max <
2913 ps->performance_levels[i].pcie_gen)
2914 data->pcie_gen_performance.max =
2915 ps->performance_levels[i].pcie_gen;
2917 if (data->pcie_gen_performance.min >
2918 ps->performance_levels[i].pcie_gen)
2919 data->pcie_gen_performance.min =
2920 ps->performance_levels[i].pcie_gen;
2922 if (data->pcie_lane_performance.max <
2923 ps->performance_levels[i].pcie_lane)
2924 data->pcie_lane_performance.max =
2925 ps->performance_levels[i].pcie_lane;
2926 if (data->pcie_lane_performance.min >
2927 ps->performance_levels[i].pcie_lane)
2928 data->pcie_lane_performance.min =
2929 ps->performance_levels[i].pcie_lane;
2932 case PP_StateUILabel_Battery:
2933 data->use_pcie_power_saving_levels = true;
2935 for (i = 0; i < ps->performance_level_count; i++) {
2936 if (data->pcie_gen_power_saving.max <
2937 ps->performance_levels[i].pcie_gen)
2938 data->pcie_gen_power_saving.max =
2939 ps->performance_levels[i].pcie_gen;
2941 if (data->pcie_gen_power_saving.min >
2942 ps->performance_levels[i].pcie_gen)
2943 data->pcie_gen_power_saving.min =
2944 ps->performance_levels[i].pcie_gen;
2946 if (data->pcie_lane_power_saving.max <
2947 ps->performance_levels[i].pcie_lane)
2948 data->pcie_lane_power_saving.max =
2949 ps->performance_levels[i].pcie_lane;
2951 if (data->pcie_lane_power_saving.min >
2952 ps->performance_levels[i].pcie_lane)
2953 data->pcie_lane_power_saving.min =
2954 ps->performance_levels[i].pcie_lane;
2964 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
2965 struct pp_hw_power_state *power_state,
2966 unsigned int index, const void *clock_info)
2968 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2969 struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state);
2970 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
2971 struct smu7_performance_level *performance_level;
2972 uint32_t engine_clock, memory_clock;
2973 uint16_t pcie_gen_from_bios;
2975 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
2976 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
2978 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
2979 data->highest_mclk = memory_clock;
2981 performance_level = &(ps->performance_levels
2982 [ps->performance_level_count++]);
2984 PP_ASSERT_WITH_CODE(
2985 (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
2986 "Performance levels exceeds SMC limit!",
2989 PP_ASSERT_WITH_CODE(
2990 (ps->performance_level_count <=
2991 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
2992 "Performance levels exceeds Driver limit!",
2995 /* Performance levels are arranged from low to high. */
2996 performance_level->memory_clock = memory_clock;
2997 performance_level->engine_clock = engine_clock;
2999 pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3001 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3002 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3007 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3008 unsigned long entry_index, struct pp_power_state *state)
3011 struct smu7_power_state *ps;
3012 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3013 struct phm_clock_voltage_dependency_table *dep_mclk_table =
3014 hwmgr->dyn_state.vddci_dependency_on_mclk;
3016 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3018 state->hardware.magic = PHM_VIslands_Magic;
3020 ps = (struct smu7_power_state *)(&state->hardware);
3022 result = pp_tables_get_entry(hwmgr, entry_index, state,
3023 smu7_get_pp_table_entry_callback_func_v0);
3026 * This is the earliest time we have all the dependency table
3027 * and the VBIOS boot state as
3028 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3029 * state if there is only one VDDCI/MCLK level, check if it's
3030 * the same as VBIOS boot state
3032 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3033 if (dep_mclk_table->entries[0].clk !=
3034 data->vbios_boot_state.mclk_bootup_value)
3035 printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
3036 "does not match VBIOS boot MCLK level");
3037 if (dep_mclk_table->entries[0].v !=
3038 data->vbios_boot_state.vddci_bootup_value)
3039 printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
3040 "does not match VBIOS boot VDDCI level");
3043 /* set DC compatible flag if this state supports DC */
3044 if (!state->validation.disallowOnDC)
3045 ps->dc_compatible = true;
3047 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3048 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3050 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3051 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3056 switch (state->classification.ui_label) {
3057 case PP_StateUILabel_Performance:
3058 data->use_pcie_performance_levels = true;
3060 for (i = 0; i < ps->performance_level_count; i++) {
3061 if (data->pcie_gen_performance.max <
3062 ps->performance_levels[i].pcie_gen)
3063 data->pcie_gen_performance.max =
3064 ps->performance_levels[i].pcie_gen;
3066 if (data->pcie_gen_performance.min >
3067 ps->performance_levels[i].pcie_gen)
3068 data->pcie_gen_performance.min =
3069 ps->performance_levels[i].pcie_gen;
3071 if (data->pcie_lane_performance.max <
3072 ps->performance_levels[i].pcie_lane)
3073 data->pcie_lane_performance.max =
3074 ps->performance_levels[i].pcie_lane;
3076 if (data->pcie_lane_performance.min >
3077 ps->performance_levels[i].pcie_lane)
3078 data->pcie_lane_performance.min =
3079 ps->performance_levels[i].pcie_lane;
3082 case PP_StateUILabel_Battery:
3083 data->use_pcie_power_saving_levels = true;
3085 for (i = 0; i < ps->performance_level_count; i++) {
3086 if (data->pcie_gen_power_saving.max <
3087 ps->performance_levels[i].pcie_gen)
3088 data->pcie_gen_power_saving.max =
3089 ps->performance_levels[i].pcie_gen;
3091 if (data->pcie_gen_power_saving.min >
3092 ps->performance_levels[i].pcie_gen)
3093 data->pcie_gen_power_saving.min =
3094 ps->performance_levels[i].pcie_gen;
3096 if (data->pcie_lane_power_saving.max <
3097 ps->performance_levels[i].pcie_lane)
3098 data->pcie_lane_power_saving.max =
3099 ps->performance_levels[i].pcie_lane;
3101 if (data->pcie_lane_power_saving.min >
3102 ps->performance_levels[i].pcie_lane)
3103 data->pcie_lane_power_saving.min =
3104 ps->performance_levels[i].pcie_lane;
3114 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3115 unsigned long entry_index, struct pp_power_state *state)
3117 if (hwmgr->pp_table_version == PP_TABLE_V0)
3118 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3119 else if (hwmgr->pp_table_version == PP_TABLE_V1)
3120 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3125 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
3127 uint32_t sclk, mclk, activity_percent;
3129 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3132 case AMDGPU_PP_SENSOR_GFX_SCLK:
3133 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
3134 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3137 case AMDGPU_PP_SENSOR_GFX_MCLK:
3138 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
3139 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3142 case AMDGPU_PP_SENSOR_GPU_LOAD:
3143 offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
3145 AverageGraphicsActivity);
3147 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3148 activity_percent += 0x80;
3149 activity_percent >>= 8;
3150 *value = activity_percent > 100 ? 100 : activity_percent;
3152 case AMDGPU_PP_SENSOR_GPU_TEMP:
3153 *value = smu7_thermal_get_temperature(hwmgr);
3155 case AMDGPU_PP_SENSOR_UVD_POWER:
3156 *value = data->uvd_power_gated ? 0 : 1;
3158 case AMDGPU_PP_SENSOR_VCE_POWER:
3159 *value = data->vce_power_gated ? 0 : 1;
3166 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3168 const struct phm_set_power_state_input *states =
3169 (const struct phm_set_power_state_input *)input;
3170 const struct smu7_power_state *smu7_ps =
3171 cast_const_phw_smu7_power_state(states->pnew_state);
3172 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3173 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3174 uint32_t sclk = smu7_ps->performance_levels
3175 [smu7_ps->performance_level_count - 1].engine_clock;
3176 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3177 uint32_t mclk = smu7_ps->performance_levels
3178 [smu7_ps->performance_level_count - 1].memory_clock;
3179 struct PP_Clocks min_clocks = {0};
3181 struct cgs_display_info info = {0};
3183 data->need_update_smu7_dpm_table = 0;
3185 for (i = 0; i < sclk_table->count; i++) {
3186 if (sclk == sclk_table->dpm_levels[i].value)
3190 if (i >= sclk_table->count)
3191 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3193 /* TODO: Check SCLK in DAL's minimum clocks
3194 * in case DeepSleep divider update is required.
3196 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3197 (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3198 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3199 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3202 for (i = 0; i < mclk_table->count; i++) {
3203 if (mclk == mclk_table->dpm_levels[i].value)
3207 if (i >= mclk_table->count)
3208 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3210 cgs_get_active_displays_info(hwmgr->device, &info);
3212 if (data->display_timing.num_existing_displays != info.display_count)
3213 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3218 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3219 const struct smu7_power_state *smu7_ps)
3222 uint32_t sclk, max_sclk = 0;
3223 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3224 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3226 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3227 sclk = smu7_ps->performance_levels[i].engine_clock;
3228 if (max_sclk < sclk)
3232 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3233 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3234 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3235 dpm_table->pcie_speed_table.dpm_levels
3236 [dpm_table->pcie_speed_table.count - 1].value :
3237 dpm_table->pcie_speed_table.dpm_levels[i].value);
3243 static int smu7_request_link_speed_change_before_state_change(
3244 struct pp_hwmgr *hwmgr, const void *input)
3246 const struct phm_set_power_state_input *states =
3247 (const struct phm_set_power_state_input *)input;
3248 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3249 const struct smu7_power_state *smu7_nps =
3250 cast_const_phw_smu7_power_state(states->pnew_state);
3251 const struct smu7_power_state *polaris10_cps =
3252 cast_const_phw_smu7_power_state(states->pcurrent_state);
3254 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3255 uint16_t current_link_speed;
3257 if (data->force_pcie_gen == PP_PCIEGenInvalid)
3258 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3260 current_link_speed = data->force_pcie_gen;
3262 data->force_pcie_gen = PP_PCIEGenInvalid;
3263 data->pspp_notify_required = false;
3265 if (target_link_speed > current_link_speed) {
3266 switch (target_link_speed) {
3268 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
3270 data->force_pcie_gen = PP_PCIEGen2;
3271 if (current_link_speed == PP_PCIEGen2)
3274 if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
3277 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3281 if (target_link_speed < current_link_speed)
3282 data->pspp_notify_required = true;
3288 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3290 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3292 if (0 == data->need_update_smu7_dpm_table)
3295 if ((0 == data->sclk_dpm_key_disabled) &&
3296 (data->need_update_smu7_dpm_table &
3297 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3298 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3299 "Trying to freeze SCLK DPM when DPM is disabled",
3301 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3302 PPSMC_MSG_SCLKDPM_FreezeLevel),
3303 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3307 if ((0 == data->mclk_dpm_key_disabled) &&
3308 (data->need_update_smu7_dpm_table &
3309 DPMTABLE_OD_UPDATE_MCLK)) {
3310 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3311 "Trying to freeze MCLK DPM when DPM is disabled",
3313 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3314 PPSMC_MSG_MCLKDPM_FreezeLevel),
3315 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3322 static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3323 struct pp_hwmgr *hwmgr, const void *input)
3326 const struct phm_set_power_state_input *states =
3327 (const struct phm_set_power_state_input *)input;
3328 const struct smu7_power_state *smu7_ps =
3329 cast_const_phw_smu7_power_state(states->pnew_state);
3330 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3331 uint32_t sclk = smu7_ps->performance_levels
3332 [smu7_ps->performance_level_count - 1].engine_clock;
3333 uint32_t mclk = smu7_ps->performance_levels
3334 [smu7_ps->performance_level_count - 1].memory_clock;
3335 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3337 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
3338 uint32_t dpm_count, clock_percent;
3341 if (0 == data->need_update_smu7_dpm_table)
3344 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3345 dpm_table->sclk_table.dpm_levels
3346 [dpm_table->sclk_table.count - 1].value = sclk;
3348 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3349 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3350 /* Need to do calculation based on the golden DPM table
3351 * as the Heatmap GPU Clock axis is also based on the default values
3353 PP_ASSERT_WITH_CODE(
3354 (golden_dpm_table->sclk_table.dpm_levels
3355 [golden_dpm_table->sclk_table.count - 1].value != 0),
3358 dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
3360 for (i = dpm_count; i > 1; i--) {
3361 if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
3364 - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
3366 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3368 dpm_table->sclk_table.dpm_levels[i].value =
3369 golden_dpm_table->sclk_table.dpm_levels[i].value +
3370 (golden_dpm_table->sclk_table.dpm_levels[i].value *
3373 } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
3375 ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
3377 / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3379 dpm_table->sclk_table.dpm_levels[i].value =
3380 golden_dpm_table->sclk_table.dpm_levels[i].value -
3381 (golden_dpm_table->sclk_table.dpm_levels[i].value *
3382 clock_percent) / 100;
3384 dpm_table->sclk_table.dpm_levels[i].value =
3385 golden_dpm_table->sclk_table.dpm_levels[i].value;
3390 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3391 dpm_table->mclk_table.dpm_levels
3392 [dpm_table->mclk_table.count - 1].value = mclk;
3394 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3395 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3397 PP_ASSERT_WITH_CODE(
3398 (golden_dpm_table->mclk_table.dpm_levels
3399 [golden_dpm_table->mclk_table.count-1].value != 0),
3402 dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
3403 for (i = dpm_count; i > 1; i--) {
3404 if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
3405 clock_percent = ((mclk -
3406 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
3407 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3409 dpm_table->mclk_table.dpm_levels[i].value =
3410 golden_dpm_table->mclk_table.dpm_levels[i].value +
3411 (golden_dpm_table->mclk_table.dpm_levels[i].value *
3412 clock_percent) / 100;
3414 } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
3416 (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
3418 / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3420 dpm_table->mclk_table.dpm_levels[i].value =
3421 golden_dpm_table->mclk_table.dpm_levels[i].value -
3422 (golden_dpm_table->mclk_table.dpm_levels[i].value *
3423 clock_percent) / 100;
3425 dpm_table->mclk_table.dpm_levels[i].value =
3426 golden_dpm_table->mclk_table.dpm_levels[i].value;
3431 if (data->need_update_smu7_dpm_table &
3432 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3433 result = smum_populate_all_graphic_levels(hwmgr);
3434 PP_ASSERT_WITH_CODE((0 == result),
3435 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3439 if (data->need_update_smu7_dpm_table &
3440 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3441 /*populate MCLK dpm table to SMU7 */
3442 result = smum_populate_all_memory_levels(hwmgr);
3443 PP_ASSERT_WITH_CODE((0 == result),
3444 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3451 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3452 struct smu7_single_dpm_table *dpm_table,
3453 uint32_t low_limit, uint32_t high_limit)
3457 for (i = 0; i < dpm_table->count; i++) {
3458 if ((dpm_table->dpm_levels[i].value < low_limit)
3459 || (dpm_table->dpm_levels[i].value > high_limit))
3460 dpm_table->dpm_levels[i].enabled = false;
3462 dpm_table->dpm_levels[i].enabled = true;
3468 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
3469 const struct smu7_power_state *smu7_ps)
3471 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3472 uint32_t high_limit_count;
3474 PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
3475 "power state did not have any performance level",
3478 high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
3480 smu7_trim_single_dpm_states(hwmgr,
3481 &(data->dpm_table.sclk_table),
3482 smu7_ps->performance_levels[0].engine_clock,
3483 smu7_ps->performance_levels[high_limit_count].engine_clock);
3485 smu7_trim_single_dpm_states(hwmgr,
3486 &(data->dpm_table.mclk_table),
3487 smu7_ps->performance_levels[0].memory_clock,
3488 smu7_ps->performance_levels[high_limit_count].memory_clock);
3493 static int smu7_generate_dpm_level_enable_mask(
3494 struct pp_hwmgr *hwmgr, const void *input)
3497 const struct phm_set_power_state_input *states =
3498 (const struct phm_set_power_state_input *)input;
3499 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3500 const struct smu7_power_state *smu7_ps =
3501 cast_const_phw_smu7_power_state(states->pnew_state);
3503 result = smu7_trim_dpm_states(hwmgr, smu7_ps);
3507 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3508 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3509 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3510 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3511 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3512 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3517 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3519 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3521 if (0 == data->need_update_smu7_dpm_table)
3524 if ((0 == data->sclk_dpm_key_disabled) &&
3525 (data->need_update_smu7_dpm_table &
3526 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3528 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3529 "Trying to Unfreeze SCLK DPM when DPM is disabled",
3531 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3532 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3533 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
3537 if ((0 == data->mclk_dpm_key_disabled) &&
3538 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
3540 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3541 "Trying to Unfreeze MCLK DPM when DPM is disabled",
3543 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3544 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3545 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
3549 data->need_update_smu7_dpm_table = 0;
3554 static int smu7_notify_link_speed_change_after_state_change(
3555 struct pp_hwmgr *hwmgr, const void *input)
3557 const struct phm_set_power_state_input *states =
3558 (const struct phm_set_power_state_input *)input;
3559 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3560 const struct smu7_power_state *smu7_ps =
3561 cast_const_phw_smu7_power_state(states->pnew_state);
3562 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
3565 if (data->pspp_notify_required) {
3566 if (target_link_speed == PP_PCIEGen3)
3567 request = PCIE_PERF_REQ_GEN3;
3568 else if (target_link_speed == PP_PCIEGen2)
3569 request = PCIE_PERF_REQ_GEN2;
3571 request = PCIE_PERF_REQ_GEN1;
3573 if (request == PCIE_PERF_REQ_GEN1 &&
3574 smu7_get_current_pcie_speed(hwmgr) > 0)
3577 if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
3578 if (PP_PCIEGen2 == target_link_speed)
3579 printk("PSPP request to switch to Gen2 from Gen3 Failed!");
3581 printk("PSPP request to switch to Gen1 from Gen2 Failed!");
3588 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3590 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3592 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
3593 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3594 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
3595 return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
3598 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3600 int tmp_result, result = 0;
3601 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3603 tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3604 PP_ASSERT_WITH_CODE((0 == tmp_result),
3605 "Failed to find DPM states clocks in DPM table!",
3606 result = tmp_result);
3608 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3609 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3611 smu7_request_link_speed_change_before_state_change(hwmgr, input);
3612 PP_ASSERT_WITH_CODE((0 == tmp_result),
3613 "Failed to request link speed change before state change!",
3614 result = tmp_result);
3617 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
3618 PP_ASSERT_WITH_CODE((0 == tmp_result),
3619 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
3621 tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3622 PP_ASSERT_WITH_CODE((0 == tmp_result),
3623 "Failed to populate and upload SCLK MCLK DPM levels!",
3624 result = tmp_result);
3626 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
3627 PP_ASSERT_WITH_CODE((0 == tmp_result),
3628 "Failed to generate DPM level enabled mask!",
3629 result = tmp_result);
3631 tmp_result = smum_update_sclk_threshold(hwmgr);
3632 PP_ASSERT_WITH_CODE((0 == tmp_result),
3633 "Failed to update SCLK threshold!",
3634 result = tmp_result);
3636 tmp_result = smu7_notify_smc_display(hwmgr);
3637 PP_ASSERT_WITH_CODE((0 == tmp_result),
3638 "Failed to notify smc display settings!",
3639 result = tmp_result);
3641 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
3642 PP_ASSERT_WITH_CODE((0 == tmp_result),
3643 "Failed to unfreeze SCLK MCLK DPM!",
3644 result = tmp_result);
3646 tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
3647 PP_ASSERT_WITH_CODE((0 == tmp_result),
3648 "Failed to upload DPM level enabled mask!",
3649 result = tmp_result);
3651 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3652 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3654 smu7_notify_link_speed_change_after_state_change(hwmgr, input);
3655 PP_ASSERT_WITH_CODE((0 == tmp_result),
3656 "Failed to notify link speed change after state change!",
3657 result = tmp_result);
3659 data->apply_optimized_settings = false;
3663 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
3665 hwmgr->thermal_controller.
3666 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
3668 if (phm_is_hw_access_blocked(hwmgr))
3671 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3672 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
3675 int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
3677 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
3679 return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
3682 int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
3684 uint32_t num_active_displays = 0;
3685 struct cgs_display_info info = {0};
3687 info.mode_info = NULL;
3688 cgs_get_active_displays_info(hwmgr->device, &info);
3690 num_active_displays = info.display_count;
3692 if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
3693 smu7_notify_smc_display_change(hwmgr, false);
3699 * Programs the display gap
3701 * @param hwmgr the address of the powerplay hardware manager.
3704 int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
3706 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3707 uint32_t num_active_displays = 0;
3708 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
3709 uint32_t display_gap2;
3710 uint32_t pre_vbi_time_in_us;
3711 uint32_t frame_time_in_us;
3713 uint32_t refresh_rate = 0;
3714 struct cgs_display_info info = {0};
3715 struct cgs_mode_info mode_info;
3717 info.mode_info = &mode_info;
3719 cgs_get_active_displays_info(hwmgr->device, &info);
3720 num_active_displays = info.display_count;
3722 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
3723 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
3725 ref_clock = mode_info.ref_clock;
3726 refresh_rate = mode_info.refresh_rate;
3728 if (0 == refresh_rate)
3731 frame_time_in_us = 1000000 / refresh_rate;
3733 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
3734 data->frame_time_x2 = frame_time_in_us * 2 / 100;
3736 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
3738 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
3740 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3741 data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
3743 PreVBlankGap), 0x64);
3745 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3746 data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
3749 (frame_time_in_us - pre_vbi_time_in_us));
3754 int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3756 return smu7_program_display_gap(hwmgr);
3760 * Set maximum target operating fan output RPM
3762 * @param hwmgr: the address of the powerplay hardware manager.
3763 * @param usMaxFanRpm: max operating fan RPM value.
3764 * @return The response that came from the SMC.
3766 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
3768 hwmgr->thermal_controller.
3769 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
3771 if (phm_is_hw_access_blocked(hwmgr))
3774 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3775 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
3778 int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
3779 const void *thermal_interrupt_info)
3784 bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
3786 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3787 bool is_update_required = false;
3788 struct cgs_display_info info = {0, 0, NULL};
3790 cgs_get_active_displays_info(hwmgr->device, &info);
3792 if (data->display_timing.num_existing_displays != info.display_count)
3793 is_update_required = true;
3795 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
3796 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr &&
3797 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
3798 hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3799 is_update_required = true;
3801 return is_update_required;
3804 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
3805 const struct smu7_performance_level *pl2)
3807 return ((pl1->memory_clock == pl2->memory_clock) &&
3808 (pl1->engine_clock == pl2->engine_clock) &&
3809 (pl1->pcie_gen == pl2->pcie_gen) &&
3810 (pl1->pcie_lane == pl2->pcie_lane));
3813 int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
3815 const struct smu7_power_state *psa;
3816 const struct smu7_power_state *psb;
3819 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
3822 psa = cast_const_phw_smu7_power_state(pstate1);
3823 psb = cast_const_phw_smu7_power_state(pstate2);
3824 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
3825 if (psa->performance_level_count != psb->performance_level_count) {
3830 for (i = 0; i < psa->performance_level_count; i++) {
3831 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
3832 /* If we have found even one performance level pair that is different the states are different. */
3838 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
3839 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
3840 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
3841 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
3846 int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
3848 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3850 uint32_t vbios_version;
3853 /* Read MC indirect register offset 0x9F bits [3:0] to see
3854 * if VBIOS has already loaded a full version of MC ucode
3858 smu7_get_mc_microcode_version(hwmgr);
3859 vbios_version = hwmgr->microcode_version_info.MC & 0xf;
3861 data->need_long_memory_training = false;
3863 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
3864 ixMC_IO_DEBUG_UP_13);
3865 tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
3867 if (tmp & (1 << 23)) {
3868 data->mem_latency_high = MEM_LATENCY_HIGH;
3869 data->mem_latency_low = MEM_LATENCY_LOW;
3871 data->mem_latency_high = 330;
3872 data->mem_latency_low = 330;
3878 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
3880 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3882 data->clock_registers.vCG_SPLL_FUNC_CNTL =
3883 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
3884 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
3885 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
3886 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
3887 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
3888 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
3889 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
3890 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
3891 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
3892 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
3893 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
3894 data->clock_registers.vDLL_CNTL =
3895 cgs_read_register(hwmgr->device, mmDLL_CNTL);
3896 data->clock_registers.vMCLK_PWRMGT_CNTL =
3897 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
3898 data->clock_registers.vMPLL_AD_FUNC_CNTL =
3899 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
3900 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
3901 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
3902 data->clock_registers.vMPLL_FUNC_CNTL =
3903 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
3904 data->clock_registers.vMPLL_FUNC_CNTL_1 =
3905 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
3906 data->clock_registers.vMPLL_FUNC_CNTL_2 =
3907 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
3908 data->clock_registers.vMPLL_SS1 =
3909 cgs_read_register(hwmgr->device, mmMPLL_SS1);
3910 data->clock_registers.vMPLL_SS2 =
3911 cgs_read_register(hwmgr->device, mmMPLL_SS2);
3917 * Find out if memory is GDDR5.
3919 * @param hwmgr the address of the powerplay hardware manager.
3922 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
3924 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3927 temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
3929 data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
3930 ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
3931 MC_SEQ_MISC0_GDDR5_SHIFT));
3937 * Enables Dynamic Power Management by SMC
3939 * @param hwmgr the address of the powerplay hardware manager.
3942 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
3944 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3945 GENERAL_PWRMGT, STATIC_PM_EN, 1);
3951 * Initialize PowerGating States for different engines
3953 * @param hwmgr the address of the powerplay hardware manager.
3956 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
3958 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3960 data->uvd_power_gated = false;
3961 data->vce_power_gated = false;
3962 data->samu_power_gated = false;
3967 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
3969 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3971 data->low_sclk_interrupt_threshold = 0;
3975 int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
3977 int tmp_result, result = 0;
3979 smu7_upload_mc_firmware(hwmgr);
3981 tmp_result = smu7_read_clock_registers(hwmgr);
3982 PP_ASSERT_WITH_CODE((0 == tmp_result),
3983 "Failed to read clock registers!", result = tmp_result);
3985 tmp_result = smu7_get_memory_type(hwmgr);
3986 PP_ASSERT_WITH_CODE((0 == tmp_result),
3987 "Failed to get memory type!", result = tmp_result);
3989 tmp_result = smu7_enable_acpi_power_management(hwmgr);
3990 PP_ASSERT_WITH_CODE((0 == tmp_result),
3991 "Failed to enable ACPI power management!", result = tmp_result);
3993 tmp_result = smu7_init_power_gate_state(hwmgr);
3994 PP_ASSERT_WITH_CODE((0 == tmp_result),
3995 "Failed to init power gate state!", result = tmp_result);
3997 tmp_result = smu7_get_mc_microcode_version(hwmgr);
3998 PP_ASSERT_WITH_CODE((0 == tmp_result),
3999 "Failed to get MC microcode version!", result = tmp_result);
4001 tmp_result = smu7_init_sclk_threshold(hwmgr);
4002 PP_ASSERT_WITH_CODE((0 == tmp_result),
4003 "Failed to init sclk threshold!", result = tmp_result);
4008 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4009 enum pp_clock_type type, uint32_t mask)
4011 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4013 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4018 if (!data->sclk_dpm_key_disabled)
4019 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4020 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4021 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
4024 if (!data->mclk_dpm_key_disabled)
4025 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4026 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4027 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
4031 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4037 if (!data->pcie_dpm_key_disabled)
4038 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4039 PPSMC_MSG_PCIeDPM_ForceLevel,
4050 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4051 enum pp_clock_type type, char *buf)
4053 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4054 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4055 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4056 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4057 int i, now, size = 0;
4058 uint32_t clock, pcie_speed;
4062 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
4063 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4065 for (i = 0; i < sclk_table->count; i++) {
4066 if (clock > sclk_table->dpm_levels[i].value)
4072 for (i = 0; i < sclk_table->count; i++)
4073 size += sprintf(buf + size, "%d: %uMhz %s\n",
4074 i, sclk_table->dpm_levels[i].value / 100,
4075 (i == now) ? "*" : "");
4078 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
4079 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4081 for (i = 0; i < mclk_table->count; i++) {
4082 if (clock > mclk_table->dpm_levels[i].value)
4088 for (i = 0; i < mclk_table->count; i++)
4089 size += sprintf(buf + size, "%d: %uMhz %s\n",
4090 i, mclk_table->dpm_levels[i].value / 100,
4091 (i == now) ? "*" : "");
4094 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4095 for (i = 0; i < pcie_table->count; i++) {
4096 if (pcie_speed != pcie_table->dpm_levels[i].value)
4102 for (i = 0; i < pcie_table->count; i++)
4103 size += sprintf(buf + size, "%d: %s %s\n", i,
4104 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
4105 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
4106 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
4107 (i == now) ? "*" : "");
4115 static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4118 /* stop auto-manage */
4119 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4120 PHM_PlatformCaps_MicrocodeFanControl))
4121 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4122 smu7_fan_ctrl_set_static_mode(hwmgr, mode);
4124 /* restart auto-manage */
4125 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
4130 static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4132 if (hwmgr->fan_ctrl_is_in_default_mode)
4133 return hwmgr->fan_ctrl_default_mode;
4135 return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4136 CG_FDO_CTRL2, FDO_PWM_MODE);
4139 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4141 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4142 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4143 struct smu7_single_dpm_table *golden_sclk_table =
4144 &(data->golden_dpm_table.sclk_table);
4147 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4148 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
4150 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4155 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4157 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4158 struct smu7_single_dpm_table *golden_sclk_table =
4159 &(data->golden_dpm_table.sclk_table);
4160 struct pp_power_state *ps;
4161 struct smu7_power_state *smu7_ps;
4166 ps = hwmgr->request_ps;
4171 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4173 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4174 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4176 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4181 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4183 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4184 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4185 struct smu7_single_dpm_table *golden_mclk_table =
4186 &(data->golden_dpm_table.mclk_table);
4189 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
4190 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
4192 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4197 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4199 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4200 struct smu7_single_dpm_table *golden_mclk_table =
4201 &(data->golden_dpm_table.mclk_table);
4202 struct pp_power_state *ps;
4203 struct smu7_power_state *smu7_ps;
4208 ps = hwmgr->request_ps;
4213 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4215 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
4216 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
4218 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4224 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4226 struct phm_ppt_v1_information *table_info =
4227 (struct phm_ppt_v1_information *)hwmgr->pptable;
4228 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
4231 if (table_info == NULL)
4234 dep_sclk_table = table_info->vdd_dep_on_sclk;
4236 for (i = 0; i < dep_sclk_table->count; i++) {
4237 clocks->clock[i] = dep_sclk_table->entries[i].clk;
4243 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
4245 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4247 if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
4248 return data->mem_latency_high;
4249 else if (clk >= MEM_FREQ_HIGH_LATENCY)
4250 return data->mem_latency_low;
4252 return MEM_LATENCY_ERR;
4255 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4257 struct phm_ppt_v1_information *table_info =
4258 (struct phm_ppt_v1_information *)hwmgr->pptable;
4259 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4262 if (table_info == NULL)
4265 dep_mclk_table = table_info->vdd_dep_on_mclk;
4267 for (i = 0; i < dep_mclk_table->count; i++) {
4268 clocks->clock[i] = dep_mclk_table->entries[i].clk;
4269 clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4270 dep_mclk_table->entries[i].clk);
4276 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
4277 struct amd_pp_clocks *clocks)
4280 case amd_pp_sys_clock:
4281 smu7_get_sclks(hwmgr, clocks);
4283 case amd_pp_mem_clock:
4284 smu7_get_mclks(hwmgr, clocks);
4293 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
4294 .backend_init = &smu7_hwmgr_backend_init,
4295 .backend_fini = &phm_hwmgr_backend_fini,
4296 .asic_setup = &smu7_setup_asic_task,
4297 .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
4298 .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
4299 .force_dpm_level = &smu7_force_dpm_level,
4300 .power_state_set = smu7_set_power_state_tasks,
4301 .get_power_state_size = smu7_get_power_state_size,
4302 .get_mclk = smu7_dpm_get_mclk,
4303 .get_sclk = smu7_dpm_get_sclk,
4304 .patch_boot_state = smu7_dpm_patch_boot_state,
4305 .get_pp_table_entry = smu7_get_pp_table_entry,
4306 .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
4307 .powerdown_uvd = smu7_powerdown_uvd,
4308 .powergate_uvd = smu7_powergate_uvd,
4309 .powergate_vce = smu7_powergate_vce,
4310 .disable_clock_power_gating = smu7_disable_clock_power_gating,
4311 .update_clock_gatings = smu7_update_clock_gatings,
4312 .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
4313 .display_config_changed = smu7_display_configuration_changed_task,
4314 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
4315 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
4316 .get_temperature = smu7_thermal_get_temperature,
4317 .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
4318 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
4319 .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
4320 .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
4321 .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
4322 .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
4323 .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
4324 .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
4325 .register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt,
4326 .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
4327 .check_states_equal = smu7_check_states_equal,
4328 .set_fan_control_mode = smu7_set_fan_control_mode,
4329 .get_fan_control_mode = smu7_get_fan_control_mode,
4330 .force_clock_level = smu7_force_clock_level,
4331 .print_clock_levels = smu7_print_clock_levels,
4332 .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
4333 .get_sclk_od = smu7_get_sclk_od,
4334 .set_sclk_od = smu7_set_sclk_od,
4335 .get_mclk_od = smu7_get_mclk_od,
4336 .set_mclk_od = smu7_set_mclk_od,
4337 .get_clock_by_type = smu7_get_clock_by_type,
4338 .read_sensor = smu7_read_sensor,
4339 .dynamic_state_management_disable = smu7_disable_dpm_tasks,
4342 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
4343 uint32_t clock_insr)
4347 uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
4349 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
4350 for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
4353 if (temp >= min || i == 0)
4359 int smu7_hwmgr_init(struct pp_hwmgr *hwmgr)
4363 hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
4364 if (hwmgr->pp_table_version == PP_TABLE_V0)
4365 hwmgr->pptable_func = &pptable_funcs;
4366 else if (hwmgr->pp_table_version == PP_TABLE_V1)
4367 hwmgr->pptable_func = &pptable_v1_0_funcs;
4369 pp_smu7_thermal_initialize(hwmgr);