Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm...
authorDave Airlie <airlied@redhat.com>
Wed, 28 Sep 2016 01:09:59 +0000 (11:09 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 28 Sep 2016 01:09:59 +0000 (11:09 +1000)
A bit smaller pull-req this time around.  Some continued DT binding
cleanup to get the corresponding dts bits merged upstream (through
other trees).  And explicit fence-fd support for submit ioctl.

* 'msm-next' of git://people.freedesktop.org/~robclark/linux:
  drm/msm: bump kernel api version for explicit fencing
  drm/msm: submit support for out-fences
  drm/msm: move fence allocation out of msm_gpu_submit()
  drm/msm: submit support for in-fences
  drm/msm: extend the submit ioctl to pass in flags
  drm/msm/mdp5: Set rotation property initial value to DRM_ROTATE_0 insted of 0
  drm/msm/hdmi: don't print error when adding i2c adapter fails
  drm/msm/mdp4: mark symbols static where possible
  drm/msm: Remove call to reservation_object_test_signaled_rcu before wait
  drm/msm/hdmi: Clean up HDMI gpio DT bindings
  drm/msm/mdp4: Fix issue with LCDC/LVDS port parsing

297 files changed:
Documentation/gpu/drm-kms-helpers.rst
Documentation/gpu/drm-kms.rst
Documentation/gpu/kms-properties.csv
arch/x86/kernel/early-quirks.c
drivers/dma-buf/sync_file.c
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/Kconfig
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/cz_smc.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/dce_v6_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/iceland_smc.c
drivers/gpu/drm/amd/amdgpu/r600_dpm.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/si.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_dma.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_dma.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_dpm.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_dpm.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_ih.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_ih.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/si_smc.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/sislands_smc.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdgpu/vid.h
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/asic_reg/si/sid.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/atombios.h
drivers/gpu/drm/amd/include/cgs_common.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c [deleted file]
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h [deleted file]
drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/power_state.h
drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
drivers/gpu/drm/amd/powerplay/inc/smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/arc/arcpgu_drv.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/bochs/bochs_kms.c
drivers/gpu/drm/bochs/bochs_mm.c
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/bridge/adv7511/adv7533.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
drivers/gpu/drm/bridge/dw-hdmi.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_blend.c
drivers/gpu/drm/drm_bridge.c
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_color_mgmt.c [new file with mode: 0644]
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper_internal.h
drivers/gpu/drm/drm_crtc_internal.h
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_encoder.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_global.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_mode_object.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_plane.c [new file with mode: 0644]
drivers/gpu/drm/drm_platform.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/drm_vma_manager.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
drivers/gpu/drm/fsl-dcu/fsl_tcon.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_request.h
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_guc_reg.h
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sw_fence.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_sw_fence.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_vgpu.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_link_training.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_dpll_mgr.h
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_guc.h
drivers/gpu/drm/i915/intel_guc_loader.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/imx-tve.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/qxl/qxl_draw.c
drivers/gpu/drm/qxl/qxl_object.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/sti/sti_drv.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/virtio/virtgpu_drm_bus.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/ipu-v3/Makefile
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-dmfc.c
drivers/gpu/ipu-v3/ipu-ic.c
drivers/gpu/ipu-v3/ipu-image-convert.c [new file with mode: 0644]
drivers/gpu/ipu-v3/ipu-prv.h
drivers/gpu/ipu-v3/ipu-vdi.c [new file with mode: 0644]
include/drm/drmP.h
include/drm/drm_atomic.h
include/drm/drm_atomic_helper.h
include/drm/drm_blend.h [new file with mode: 0644]
include/drm/drm_bridge.h [new file with mode: 0644]
include/drm/drm_color_mgmt.h [new file with mode: 0644]
include/drm/drm_connector.h
include/drm/drm_core.h [deleted file]
include/drm/drm_crtc.h
include/drm/drm_dp_helper.h
include/drm/drm_edid.h
include/drm/drm_encoder.h
include/drm/drm_fb_helper.h
include/drm/drm_framebuffer.h
include/drm/drm_mode_object.h
include/drm/drm_modes.h
include/drm/drm_plane.h [new file with mode: 0644]
include/drm/drm_property.h
include/drm/drm_vma_manager.h
include/drm/i915_pciids.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_placement.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/i915_drm.h
include/uapi/linux/sync_file.h
include/video/imx-ipu-image-convert.h [new file with mode: 0644]
include/video/imx-ipu-v3.h

index 59fa3c1..bb4254d 100644 (file)
@@ -126,6 +126,13 @@ Default bridge callback sequence
 .. kernel-doc:: drivers/gpu/drm/drm_bridge.c
    :doc: bridge callbacks
 
+
+Bridge Helper Reference
+-------------------------
+
+.. kernel-doc:: include/drm/drm_bridge.h
+   :internal:
+
 .. kernel-doc:: drivers/gpu/drm/drm_bridge.c
    :export:
 
@@ -201,6 +208,9 @@ Output Probing Helper Functions Reference
 EDID Helper Functions Reference
 ===============================
 
+.. kernel-doc:: include/drm/drm_edid.h
+   :internal:
+
 .. kernel-doc:: drivers/gpu/drm/drm_edid.c
    :export:
 
index f9a991b..53b872c 100644 (file)
@@ -110,6 +110,21 @@ Note that dumb objects may not be used for gpu acceleration, as has been
 attempted on some ARM embedded platforms. Such drivers really must have
 a hardware-specific ioctl to allocate suitable buffer objects.
 
+Plane Abstraction
+=================
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane.c
+   :doc: overview
+
+Plane Functions Reference
+-------------------------
+
+.. kernel-doc:: include/drm/drm_plane.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane.c
+   :export:
+
 Display Modes Function Reference
 ================================
 
@@ -177,50 +192,6 @@ allocated and zeroed by the driver, possibly as part of a larger
 structure, and registered with a call to :c:func:`drm_crtc_init()`
 with a pointer to CRTC functions.
 
-Planes (:c:type:`struct drm_plane <drm_plane>`)
------------------------------------------------
-
-A plane represents an image source that can be blended with or overlayed
-on top of a CRTC during the scanout process. Planes are associated with
-a frame buffer to crop a portion of the image memory (source) and
-optionally scale it to a destination size. The result is then blended
-with or overlayed on top of a CRTC.
-
-The DRM core recognizes three types of planes:
-
--  DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC.
-   Primary planes are the planes operated upon by CRTC modesetting and
-   flipping operations described in the page_flip hook in
-   :c:type:`struct drm_crtc_funcs <drm_crtc_funcs>`.
--  DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC.
-   Cursor planes are the planes operated upon by the
-   DRM_IOCTL_MODE_CURSOR and DRM_IOCTL_MODE_CURSOR2 ioctls.
--  DRM_PLANE_TYPE_OVERLAY represents all non-primary, non-cursor
-   planes. Some drivers refer to these types of planes as "sprites"
-   internally.
-
-For compatibility with legacy userspace, only overlay planes are made
-available to userspace by default. Userspace clients may set the
-DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate
-that they wish to receive a universal plane list containing all plane
-types.
-
-Plane Initialization
-~~~~~~~~~~~~~~~~~~~~
-
-To create a plane, a KMS drivers allocates and zeroes an instances of
-:c:type:`struct drm_plane <drm_plane>` (possibly as part of a
-larger structure) and registers it with a call to
-:c:func:`drm_universal_plane_init()`. The function takes a
-bitmask of the CRTCs that can be associated with the plane, a pointer to
-the plane functions, a list of format supported formats, and the type of
-plane (primary, cursor, or overlay) being initialized.
-
-Cursor and overlay planes are optional. All drivers should provide one
-primary plane per CRTC (although this requirement may change in the
-future); drivers that do not wish to provide special handling for
-primary planes may make use of the helper functions described in ? to
-create and register a primary plane with standard capabilities.
 
 Cleanup
 -------
@@ -316,12 +287,27 @@ Property Types and Blob Property Support
 .. kernel-doc:: drivers/gpu/drm/drm_property.c
    :export:
 
-Blending and Z-Position properties
-----------------------------------
+Plane Composition Properties
+----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_blend.c
+   :doc: overview
 
 .. kernel-doc:: drivers/gpu/drm/drm_blend.c
    :export:
 
+Color Management Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
+   :doc: overview
+
+.. kernel-doc:: include/drm/drm_color_mgmt.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
+   :export:
+
 Existing KMS Properties
 -----------------------
 
index 4c5ce3e..981873a 100644 (file)
@@ -1,23 +1,10 @@
 Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,Description/Restrictions
-DRM,Generic,“rotation”,BITMASK,"{ 0, ""rotate-0"" }, { 1, ""rotate-90"" }, { 2, ""rotate-180"" }, { 3, ""rotate-270"" }, { 4, ""reflect-x"" }, { 5, ""reflect-y"" }","CRTC, Plane",rotate-(degrees) rotates the image by the specified amount in degrees in counter clockwise direction. reflect-x and reflect-y reflects the image along the specified axis prior to rotation
 ,,“scaling mode”,ENUM,"{ ""None"", ""Full"", ""Center"", ""Full aspect"" }",Connector,"Supported by: amdgpu, gma500, i915, nouveau and radeon."
 ,Connector,“EDID”,BLOB | IMMUTABLE,0,Connector,Contains id of edid blob ptr object.
 ,,“DPMS”,ENUM,"{ “On”, “Standby”, “Suspend”, “Off” }",Connector,Contains DPMS operation mode value.
 ,,“PATH”,BLOB | IMMUTABLE,0,Connector,Contains topology path to a connector.
 ,,“TILE”,BLOB | IMMUTABLE,0,Connector,Contains tiling information for a connector.
 ,,“CRTC_ID”,OBJECT,DRM_MODE_OBJECT_CRTC,Connector,CRTC that connector is attached to (atomic)
-,Plane,“type”,ENUM | IMMUTABLE,"{ ""Overlay"", ""Primary"", ""Cursor"" }",Plane,Plane type
-,,“SRC_X”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source x coordinate in 16.16 fixed point (atomic)
-,,“SRC_Y”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source y coordinate in 16.16 fixed point (atomic)
-,,“SRC_W”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source width in 16.16 fixed point (atomic)
-,,“SRC_H”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source height in 16.16 fixed point (atomic)
-,,“CRTC_X”,SIGNED_RANGE,"Min=INT_MIN, Max=INT_MAX",Plane,Scanout CRTC (destination) x coordinate (atomic)
-,,“CRTC_Y”,SIGNED_RANGE,"Min=INT_MIN, Max=INT_MAX",Plane,Scanout CRTC (destination) y coordinate (atomic)
-,,“CRTC_W”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout CRTC (destination) width (atomic)
-,,“CRTC_H”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout CRTC (destination) height (atomic)
-,,“FB_ID”,OBJECT,DRM_MODE_OBJECT_FB,Plane,Scanout framebuffer (atomic)
-,,“CRTC_ID”,OBJECT,DRM_MODE_OBJECT_CRTC,Plane,CRTC that plane is attached to (atomic)
-,,“zpos”,RANGE,"Min=0, Max=UINT_MAX","Plane,Z-order of the plane.Planes with higher Z-order values are displayed on top, planes with identical Z-order values are display in an undefined order"
 ,DVI-I,“subconnector”,ENUM,"{ “Unknown”, “DVI-D”, “DVI-A” }",Connector,TBD
 ,,“select subconnector”,ENUM,"{ “Automatic”, “DVI-D”, “DVI-A” }",Connector,TBD
 ,TV,“subconnector”,ENUM,"{ ""Unknown"", ""Composite"", ""SVIDEO"", ""Component"", ""SCART"" }",Connector,TBD
@@ -36,12 +23,6 @@ DRM,Generic,“rotation”,BITMASK,"{ 0, ""rotate-0"" }, { 1, ""rotate-90"" }, {
 ,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
 ,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
 ,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
-,,“dirty”,ENUM | IMMUTABLE,"{ ""Off"", ""On"", ""Annotate"" }",Connector,TBD
-,,“DEGAMMA_LUT”,BLOB,0,CRTC,DRM property to set the degamma lookup table (LUT) mapping pixel data from the framebuffer before it is given to the transformation matrix. The data is an interpreted as an array of struct drm_color_lut elements. Hardware might choose not to use the full precision of the LUT elements nor use all the elements of the LUT (for example the hardware might choose to interpolate between LUT[0] and LUT[4]).
-,,“DEGAMMA_LUT_SIZE”,RANGE | IMMUTABLE,"Min=0, Max=UINT_MAX",CRTC,DRM property to gives the size of the lookup table to be set on the DEGAMMA_LUT property (the size depends on the underlying hardware).
-,,“CTM”,BLOB,0,CRTC,DRM property to set the current transformation matrix (CTM) apply to pixel data after the lookup through the degamma LUT and before the lookup through the gamma LUT. The data is an interpreted as a struct drm_color_ctm.
-,,“GAMMA_LUT”,BLOB,0,CRTC,DRM property to set the gamma lookup table (LUT) mapping pixel data after to the transformation matrix to data sent to the connector. The data is an interpreted as an array of struct drm_color_lut elements. Hardware might choose not to use the full precision of the LUT elements nor use all the elements of the LUT (for example the hardware might choose to interpolate between LUT[0] and LUT[4]).
-,,“GAMMA_LUT_SIZE”,RANGE | IMMUTABLE,"Min=0, Max=UINT_MAX",CRTC,DRM property to gives the size of the lookup table to be set on the GAMMA_LUT property (the size depends on the underlying hardware).
 i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
 ,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
 ,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
@@ -95,7 +76,6 @@ armada,CRTC,"""CSC_YUV""",ENUM,"{ ""Auto"" , ""CCIR601"", ""CCIR709"" }",CRTC,TB
 ,,"""contrast""",RANGE,"Min=0, Max=0x7fff",Plane,TBD
 ,,"""saturation""",RANGE,"Min=0, Max=0x7fff",Plane,TBD
 exynos,CRTC,“mode”,ENUM,"{ ""normal"", ""blank"" }",CRTC,TBD
-,Overlay,“zpos”,RANGE,"Min=0, Max=MAX_PLANE-1",Plane,TBD
 i2c/ch7006_drv,Generic,“scale”,RANGE,"Min=0, Max=2",Connector,TBD
 ,TV,“mode”,ENUM,"{ ""PAL"", ""PAL-M"",""PAL-N""}, ”PAL-Nc"" , ""PAL-60"", ""NTSC-M"", ""NTSC-J"" }",Connector,TBD
 nouveau,NV10 Overlay,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
@@ -126,4 +106,3 @@ radeon,DVI-I,“coherent”,RANGE,"Min=0, Max=1",Connector,TBD
 ,FMT Dithering,“dither”,ENUM,"{ ""off"", ""on"" }",Connector,TBD
 rcar-du,Generic,"""alpha""",RANGE,"Min=0, Max=255",Plane,TBD
 ,,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
-,,"""zpos""",RANGE,"Min=1, Max=7",Plane,TBD
index 8b8852b..f306698 100644 (file)
@@ -507,8 +507,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
        INTEL_I915GM_IDS(&gen3_early_ops),
        INTEL_I945G_IDS(&gen3_early_ops),
        INTEL_I945GM_IDS(&gen3_early_ops),
-       INTEL_VLV_M_IDS(&gen6_early_ops),
-       INTEL_VLV_D_IDS(&gen6_early_ops),
+       INTEL_VLV_IDS(&gen6_early_ops),
        INTEL_PINEVIEW_IDS(&gen3_early_ops),
        INTEL_I965G_IDS(&gen3_early_ops),
        INTEL_G33_IDS(&gen3_early_ops),
@@ -521,10 +520,8 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
        INTEL_SNB_M_IDS(&gen6_early_ops),
        INTEL_IVB_M_IDS(&gen6_early_ops),
        INTEL_IVB_D_IDS(&gen6_early_ops),
-       INTEL_HSW_D_IDS(&gen6_early_ops),
-       INTEL_HSW_M_IDS(&gen6_early_ops),
-       INTEL_BDW_M_IDS(&gen8_early_ops),
-       INTEL_BDW_D_IDS(&gen8_early_ops),
+       INTEL_HSW_IDS(&gen6_early_ops),
+       INTEL_BDW_IDS(&gen8_early_ops),
        INTEL_CHV_IDS(&chv_early_ops),
        INTEL_SKL_IDS(&gen9_early_ops),
        INTEL_BXT_IDS(&gen9_early_ops),
index 486d29c..b29a9e8 100644 (file)
@@ -150,6 +150,7 @@ static int sync_file_set_fence(struct sync_file *sync_file,
         */
        if (num_fences == 1) {
                sync_file->fence = fences[0];
+               kfree(fences);
        } else {
                array = fence_array_create(num_fences, fences,
                                           fence_context_alloc(1), 1, false);
@@ -253,10 +254,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
        for (; i_b < b_num_fences; i_b++)
                add_fence(fences, &i, b_fences[i_b]);
 
-       if (i == 0) {
-               add_fence(fences, &i, a_fences[0]);
-               i++;
-       }
+       if (i == 0)
+               fences[i++] = fence_get(a_fences[0]);
 
        if (num_fences > i) {
                nfences = krealloc(fences, i * sizeof(*fences),
@@ -306,7 +305,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &sync_file->wq, wait);
 
-       if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+       if (!poll_does_not_wait(wait) &&
+           !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
                if (fence_add_callback(sync_file->fence, &sync_file->cb,
                                       fence_check_cb_func) < 0)
                        wake_up_all(&sync_file->wq);
index 439d89b..25c7204 100644 (file)
@@ -14,7 +14,8 @@ drm-y       :=        drm_auth.o drm_bufs.o drm_cache.o \
                drm_rect.o drm_vma_manager.o drm_flip_work.o \
                drm_modeset_lock.o drm_atomic.o drm_bridge.o \
                drm_framebuffer.o drm_connector.o drm_blend.o \
-               drm_encoder.o drm_mode_object.o drm_property.o
+               drm_encoder.o drm_mode_object.o drm_property.o \
+               drm_plane.o drm_color_mgmt.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
index f3cb69d..53cf397 100644 (file)
@@ -1,3 +1,10 @@
+config DRM_AMDGPU_SI
+       bool "Enable amdgpu support for SI parts"
+       depends on DRM_AMDGPU
+       help
+         Choose this option if you want to enable experimental support
+         for SI asics.
+
 config DRM_AMDGPU_CIK
        bool "Enable amdgpu support for CIK parts"
        depends on DRM_AMDGPU
index 21dd7c0..dc6df07 100644 (file)
@@ -30,6 +30,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
        ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
        amdgpu_amdkfd_gfx_v7.o
 
+amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
+
 amdgpu-y += \
        vi.o
 
index 3cc2629..ee45d9f 100644 (file)
@@ -64,6 +64,7 @@
 extern int amdgpu_modeset;
 extern int amdgpu_vram_limit;
 extern int amdgpu_gart_size;
+extern int amdgpu_moverate;
 extern int amdgpu_benchmarking;
 extern int amdgpu_testing;
 extern int amdgpu_audio;
@@ -94,6 +95,7 @@ extern unsigned amdgpu_pg_mask;
 extern char *amdgpu_disable_cu;
 extern int amdgpu_sclk_deep_sleep_en;
 extern char *amdgpu_virtual_display;
+extern unsigned amdgpu_pp_feature_mask;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
 #define AMDGPU_MAX_USEC_TIMEOUT                        100000  /* 100 ms */
@@ -108,7 +110,7 @@ extern char *amdgpu_virtual_display;
 #define AMDGPU_MAX_RINGS                       16
 #define AMDGPU_MAX_GFX_RINGS                   1
 #define AMDGPU_MAX_COMPUTE_RINGS               8
-#define AMDGPU_MAX_VCE_RINGS                   2
+#define AMDGPU_MAX_VCE_RINGS                   3
 
 /* max number of IP instances */
 #define AMDGPU_MAX_SDMA_INSTANCES              2
@@ -318,6 +320,10 @@ struct amdgpu_ring_funcs {
        /* note usage for clock and power gating */
        void (*begin_use)(struct amdgpu_ring *ring);
        void (*end_use)(struct amdgpu_ring *ring);
+       void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+       void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+       unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
+       unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
 };
 
 /*
@@ -618,6 +624,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
 int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
                     int pages, struct page **pagelist,
                     dma_addr_t *dma_addr, uint32_t flags);
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
 
 /*
  * GPU MC structures, functions & helpers
@@ -963,6 +970,7 @@ struct amdgpu_ctx {
        spinlock_t              ring_lock;
        struct fence            **fences;
        struct amdgpu_ctx_ring  rings[AMDGPU_MAX_RINGS];
+       bool preamble_presented;
 };
 
 struct amdgpu_ctx_mgr {
@@ -1222,11 +1230,16 @@ struct amdgpu_cs_parser {
        struct fence                    *fence;
        uint64_t                        bytes_moved_threshold;
        uint64_t                        bytes_moved;
+       struct amdgpu_bo_list_entry     *evictable;
 
        /* user fence */
        struct amdgpu_bo_list_entry     uf_entry;
 };
 
+#define AMDGPU_PREAMBLE_IB_PRESENT          (1 << 0) /* bit set means command submit involves a preamble IB */
+#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST    (1 << 1) /* bit set means preamble IB is first presented in belonging context */
+#define AMDGPU_HAVE_CTX_SWITCH              (1 << 2) /* bit set means context switch occured */
+
 struct amdgpu_job {
        struct amd_sched_job    base;
        struct amdgpu_device    *adev;
@@ -1235,9 +1248,10 @@ struct amdgpu_job {
        struct amdgpu_sync      sync;
        struct amdgpu_ib        *ibs;
        struct fence            *fence; /* the hw fence */
+       uint32_t                preamble_status;
        uint32_t                num_ibs;
        void                    *owner;
-       uint64_t                ctx;
+       uint64_t                fence_ctx; /* the fence_context this job uses */
        bool                    vm_needs_flush;
        unsigned                vm_id;
        uint64_t                vm_pd_addr;
@@ -1686,6 +1700,7 @@ struct amdgpu_vce {
        unsigned                harvest_config;
        struct amd_sched_entity entity;
        uint32_t                srbm_soft_reset;
+       unsigned                num_rings;
 };
 
 /*
@@ -1703,6 +1718,10 @@ struct amdgpu_sdma_instance {
 
 struct amdgpu_sdma {
        struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+#ifdef CONFIG_DRM_AMDGPU_SI
+       //SI DMA has a difference trap irq number for the second engine
+       struct amdgpu_irq_src   trap_irq_1;
+#endif
        struct amdgpu_irq_src   trap_irq;
        struct amdgpu_irq_src   illegal_inst_irq;
        int                     num_instances;
@@ -1819,6 +1838,9 @@ struct amdgpu_asic_funcs {
        int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
        /* query virtual capabilities */
        u32 (*get_virtual_caps)(struct amdgpu_device *adev);
+       /* static power management */
+       int (*get_pcie_lanes)(struct amdgpu_device *adev);
+       void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
 };
 
 /*
@@ -1993,6 +2015,8 @@ struct amdgpu_device {
        spinlock_t pcie_idx_lock;
        amdgpu_rreg_t                   pcie_rreg;
        amdgpu_wreg_t                   pcie_wreg;
+       amdgpu_rreg_t                   pciep_rreg;
+       amdgpu_wreg_t                   pciep_wreg;
        /* protects concurrent UVD register access */
        spinlock_t uvd_ctx_idx_lock;
        amdgpu_rreg_t                   uvd_ctx_rreg;
@@ -2033,6 +2057,14 @@ struct amdgpu_device {
        atomic64_t                      num_evictions;
        atomic_t                        gpu_reset_counter;
 
+       /* data for buffer migration throttling */
+       struct {
+               spinlock_t              lock;
+               s64                     last_update_us;
+               s64                     accum_us; /* accumulated microseconds */
+               u32                     log2_max_MBps;
+       } mm_stats;
+
        /* display */
        bool                            enable_virtual_display;
        struct amdgpu_mode_info         mode_info;
@@ -2101,6 +2133,10 @@ struct amdgpu_device {
        /* link all shadow bo */
        struct list_head                shadow_list;
        struct mutex                    shadow_list_lock;
+       /* link all gtt */
+       spinlock_t                      gtt_list_lock;
+       struct list_head                gtt_list;
+
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2133,6 +2169,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
+#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
@@ -2223,6 +2261,9 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
 #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
+#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
+#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
+#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
@@ -2244,9 +2285,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
 #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
+#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
+#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
+#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
+#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2402,6 +2447,8 @@ void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
 u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
 int amdgpu_ttm_global_init(struct amdgpu_device *adev);
+int amdgpu_ttm_init(struct amdgpu_device *adev);
+void amdgpu_ttm_fini(struct amdgpu_device *adev);
 void amdgpu_program_register_sequence(struct amdgpu_device *adev,
                                             const u32 *registers,
                                             const u32 array_size);
@@ -2434,8 +2481,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
                                 struct drm_file *file_priv);
 void amdgpu_driver_preclose_kms(struct drm_device *dev,
                                struct drm_file *file_priv);
-int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
-int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
+int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
@@ -2481,6 +2528,7 @@ static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
 struct amdgpu_bo_va_mapping *
 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
                       uint64_t addr, struct amdgpu_bo **bo);
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
 
 #include "amdgpu_object.h"
 #endif
index 362bedc..1a0a5f7 100644 (file)
@@ -103,11 +103,11 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
                                uint32_t pipe_id, uint32_t queue_id);
 
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
-                               unsigned int timeout, uint32_t pipe_id,
+                               unsigned int utimeout, uint32_t pipe_id,
                                uint32_t queue_id);
 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
-                               unsigned int timeout);
+                               unsigned int utimeout);
 static int kgd_address_watch_disable(struct kgd_dev *kgd);
 static int kgd_address_watch_execute(struct kgd_dev *kgd,
                                        unsigned int watch_point_id,
@@ -437,11 +437,12 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
 }
 
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
-                               unsigned int timeout, uint32_t pipe_id,
+                               unsigned int utimeout, uint32_t pipe_id,
                                uint32_t queue_id)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        uint32_t temp;
+       int timeout = utimeout;
 
        acquire_queue(kgd, pipe_id, queue_id);
        WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
@@ -452,9 +453,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
                temp = RREG32(mmCP_HQD_ACTIVE);
                if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
                        break;
-               if (timeout == 0) {
-                       pr_err("kfd: cp queue preemption time out (%dms)\n",
-                               temp);
+               if (timeout <= 0) {
+                       pr_err("kfd: cp queue preemption time out.\n");
                        release_queue(kgd);
                        return -ETIME;
                }
@@ -467,12 +467,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
 }
 
 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
-                               unsigned int timeout)
+                               unsigned int utimeout)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        struct cik_sdma_rlc_registers *m;
        uint32_t sdma_base_addr;
        uint32_t temp;
+       int timeout = utimeout;
 
        m = get_sdma_mqd(mqd);
        sdma_base_addr = get_sdma_base_addr(m);
@@ -485,7 +486,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
                temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
                if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
                        break;
-               if (timeout == 0)
+               if (timeout <= 0)
                        return -ETIME;
                msleep(20);
                timeout -= 20;
index 04b744d..6697612 100644 (file)
@@ -62,10 +62,10 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
                uint32_t pipe_id, uint32_t queue_id);
 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
-                               unsigned int timeout, uint32_t pipe_id,
+                               unsigned int utimeout, uint32_t pipe_id,
                                uint32_t queue_id);
 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
-                               unsigned int timeout);
+                               unsigned int utimeout);
 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
 static int kgd_address_watch_disable(struct kgd_dev *kgd);
 static int kgd_address_watch_execute(struct kgd_dev *kgd,
@@ -349,11 +349,12 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
 }
 
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
-                               unsigned int timeout, uint32_t pipe_id,
+                               unsigned int utimeout, uint32_t pipe_id,
                                uint32_t queue_id)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        uint32_t temp;
+       int timeout = utimeout;
 
        acquire_queue(kgd, pipe_id, queue_id);
 
@@ -363,9 +364,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
                temp = RREG32(mmCP_HQD_ACTIVE);
                if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
                        break;
-               if (timeout == 0) {
-                       pr_err("kfd: cp queue preemption time out (%dms)\n",
-                               temp);
+               if (timeout <= 0) {
+                       pr_err("kfd: cp queue preemption time out.\n");
                        release_queue(kgd);
                        return -ETIME;
                }
@@ -378,12 +378,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
 }
 
 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
-                               unsigned int timeout)
+                               unsigned int utimeout)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        struct cik_sdma_rlc_registers *m;
        uint32_t sdma_base_addr;
        uint32_t temp;
+       int timeout = utimeout;
 
        m = get_sdma_mqd(mqd);
        sdma_base_addr = get_sdma_base_addr(m);
@@ -396,7 +397,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
                temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
                if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
                        break;
-               if (timeout == 0)
+               if (timeout <= 0)
                        return -ETIME;
                msleep(20);
                timeout -= 20;
index 1b62116..59961db 100644 (file)
@@ -978,6 +978,48 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
                return -EINVAL;
 
        switch (crev) {
+       case 2:
+       case 3:
+       case 5:
+               /* r6xx, r7xx, evergreen, ni, si.
+                * TODO: add support for asic_type <= CHIP_RV770*/
+               if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
+                       args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+
+                       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+                       dividers->post_div = args.v3.ucPostDiv;
+                       dividers->enable_post_div = (args.v3.ucCntlFlag &
+                                                    ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+                       dividers->enable_dithen = (args.v3.ucCntlFlag &
+                                                  ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+                       dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+                       dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
+                       dividers->ref_div = args.v3.ucRefDiv;
+                       dividers->vco_mode = (args.v3.ucCntlFlag &
+                                             ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+               } else {
+                       /* for SI we use ComputeMemoryClockParam for memory plls */
+                       if (adev->asic_type >= CHIP_TAHITI)
+                               return -EINVAL;
+                       args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+                       if (strobe_mode)
+                               args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
+
+                       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+                       dividers->post_div = args.v5.ucPostDiv;
+                       dividers->enable_post_div = (args.v5.ucCntlFlag &
+                                                    ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+                       dividers->enable_dithen = (args.v5.ucCntlFlag &
+                                                  ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+                       dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
+                       dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
+                       dividers->ref_div = args.v5.ucRefDiv;
+                       dividers->vco_mode = (args.v5.ucCntlFlag &
+                                             ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+               }
+               break;
        case 4:
                /* fusion */
                args.v4.ulClock = cpu_to_le32(clock);   /* 10 khz */
@@ -1122,6 +1164,32 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
        amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
 }
 
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+                                         u16 *vddc, u16 *vddci, u16 *mvdd)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+       u8 frev, crev;
+       u16 data_offset;
+       union firmware_info *firmware_info;
+
+       *vddc = 0;
+       *vddci = 0;
+       *mvdd = 0;
+
+       if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+                                  &frev, &crev, &data_offset)) {
+               firmware_info =
+                       (union firmware_info *)(mode_info->atom_context->bios +
+                                               data_offset);
+               *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+               if ((frev == 2) && (crev >= 2)) {
+                       *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
+                       *mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
+               }
+       }
+}
+
 union set_voltage {
        struct _SET_VOLTAGE_PS_ALLOCATION alloc;
        struct _SET_VOLTAGE_PARAMETERS v1;
@@ -1129,6 +1197,52 @@ union set_voltage {
        struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
 };
 
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+                            u16 voltage_id, u16 *voltage)
+{
+       union set_voltage args;
+       int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+       u8 frev, crev;
+
+       if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+               return -EINVAL;
+
+       switch (crev) {
+       case 1:
+               return -EINVAL;
+       case 2:
+               args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+               args.v2.ucVoltageMode = 0;
+               args.v2.usVoltageLevel = 0;
+
+               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+               *voltage = le16_to_cpu(args.v2.usVoltageLevel);
+               break;
+       case 3:
+               args.v3.ucVoltageType = voltage_type;
+               args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
+               args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
+
+               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+               *voltage = le16_to_cpu(args.v3.usVoltageLevel);
+               break;
+       default:
+               DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+                                                     u16 *voltage,
+                                                     u16 leakage_idx)
+{
+       return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
+}
+
 void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
                                 u16 voltage_level,
                                 u8 voltage_type)
@@ -1349,6 +1463,50 @@ static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOL
        return NULL;
 }
 
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+                             u8 voltage_type,
+                             u8 *svd_gpio_id, u8 *svc_gpio_id)
+{
+       int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+       u8 frev, crev;
+       u16 data_offset, size;
+       union voltage_object_info *voltage_info;
+       union voltage_object *voltage_object = NULL;
+
+       if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+                                  &frev, &crev, &data_offset)) {
+               voltage_info = (union voltage_object_info *)
+                       (adev->mode_info.atom_context->bios + data_offset);
+
+               switch (frev) {
+               case 3:
+                       switch (crev) {
+                       case 1:
+                               voltage_object = (union voltage_object *)
+                                       amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
+                                                                     voltage_type,
+                                                                     VOLTAGE_OBJ_SVID2);
+                               if (voltage_object) {
+                                       *svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
+                                       *svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
+                               } else {
+                                       return -EINVAL;
+                               }
+                               break;
+                       default:
+                               DRM_ERROR("unknown voltage object table\n");
+                               return -EINVAL;
+                       }
+                       break;
+               default:
+                       DRM_ERROR("unknown voltage object table\n");
+                       return -EINVAL;
+               }
+
+       }
+       return 0;
+}
+
 bool
 amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
                                u8 voltage_type, u8 voltage_mode)
index 15dd43e..1735615 100644 (file)
@@ -208,5 +208,19 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
 void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
 
 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+                            u16 voltage_id, u16 *voltage);
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+                                                     u16 *voltage,
+                                                     u16 leakage_idx);
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+                                         u16 *vddc, u16 *vddci, u16 *mvdd);
+int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
+                                      u8 clock_type,
+                                      u32 clock,
+                                      bool strobe_mode,
+                                      struct atom_clock_dividers *dividers);
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+                             u8 voltage_type,
+                             u8 *svd_gpio_id, u8 *svc_gpio_id);
 #endif
index bc0440f..f1c53a2 100644 (file)
@@ -616,7 +616,7 @@ static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, un
        return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
 }
 
-int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
+static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
                                  enum amd_ip_block_type block_type,
                                  enum amd_clockgating_state state)
 {
@@ -637,7 +637,7 @@ int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
        return r;
 }
 
-int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
+static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
                                  enum amd_ip_block_type block_type,
                                  enum amd_powergating_state state)
 {
@@ -848,6 +848,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
        case CGS_SYSTEM_INFO_GFX_SE_INFO:
                sys_info->value = adev->gfx.config.max_shader_engines;
                break;
+       case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
+               sys_info->value = adev->pdev->subsystem_device;
+               break;
+       case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
+               sys_info->value = adev->pdev->subsystem_vendor;
+               break;
        default:
                return -ENODEV;
        }
index d80e5d3..b8412bc 100644 (file)
@@ -91,6 +91,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
                                      uint32_t *offset)
 {
        struct drm_gem_object *gobj;
+       unsigned long size;
 
        gobj = drm_gem_object_lookup(p->filp, data->handle);
        if (gobj == NULL)
@@ -101,6 +102,11 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
        p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
        p->uf_entry.tv.shared = true;
        p->uf_entry.user_pages = NULL;
+
+       size = amdgpu_bo_size(p->uf_entry.robj);
+       if (size != PAGE_SIZE || (data->offset + 8) > size)
+               return -EINVAL;
+
        *offset = data->offset;
 
        drm_gem_object_unreference_unlocked(gobj);
@@ -235,56 +241,115 @@ free_chunk:
        return ret;
 }
 
-/* Returns how many bytes TTM can move per IB.
+/* Convert microseconds to bytes. */
+static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
+{
+       if (us <= 0 || !adev->mm_stats.log2_max_MBps)
+               return 0;
+
+       /* Since accum_us is incremented by a million per second, just
+        * multiply it by the number of MB/s to get the number of bytes.
+        */
+       return us << adev->mm_stats.log2_max_MBps;
+}
+
+static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
+{
+       if (!adev->mm_stats.log2_max_MBps)
+               return 0;
+
+       return bytes >> adev->mm_stats.log2_max_MBps;
+}
+
+/* Returns how many bytes TTM can move right now. If no bytes can be moved,
+ * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
+ * which means it can go over the threshold once. If that happens, the driver
+ * will be in debt and no other buffer migrations can be done until that debt
+ * is repaid.
+ *
+ * This approach allows moving a buffer of any size (it's important to allow
+ * that).
+ *
+ * The currency is simply time in microseconds and it increases as the clock
+ * ticks. The accumulated microseconds (us) are converted to bytes and
+ * returned.
  */
 static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
 {
-       u64 real_vram_size = adev->mc.real_vram_size;
-       u64 vram_usage = atomic64_read(&adev->vram_usage);
+       s64 time_us, increment_us;
+       u64 max_bytes;
+       u64 free_vram, total_vram, used_vram;
 
-       /* This function is based on the current VRAM usage.
+       /* Allow a maximum of 200 accumulated ms. This is basically per-IB
+        * throttling.
         *
-        * - If all of VRAM is free, allow relocating the number of bytes that
-        *   is equal to 1/4 of the size of VRAM for this IB.
+        * It means that in order to get full max MBps, at least 5 IBs per
+        * second must be submitted and not more than 200ms apart from each
+        * other.
+        */
+       const s64 us_upper_bound = 200000;
 
-        * - If more than one half of VRAM is occupied, only allow relocating
-        *   1 MB of data for this IB.
-        *
-        * - From 0 to one half of used VRAM, the threshold decreases
-        *   linearly.
-        *         __________________
-        * 1/4 of -|\               |
-        * VRAM    | \              |
-        *         |  \             |
-        *         |   \            |
-        *         |    \           |
-        *         |     \          |
-        *         |      \         |
-        *         |       \________|1 MB
-        *         |----------------|
-        *    VRAM 0 %             100 %
-        *         used            used
-        *
-        * Note: It's a threshold, not a limit. The threshold must be crossed
-        * for buffer relocations to stop, so any buffer of an arbitrary size
-        * can be moved as long as the threshold isn't crossed before
-        * the relocation takes place. We don't want to disable buffer
-        * relocations completely.
+       if (!adev->mm_stats.log2_max_MBps)
+               return 0;
+
+       total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
+       used_vram = atomic64_read(&adev->vram_usage);
+       free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
+
+       spin_lock(&adev->mm_stats.lock);
+
+       /* Increase the amount of accumulated us. */
+       time_us = ktime_to_us(ktime_get());
+       increment_us = time_us - adev->mm_stats.last_update_us;
+       adev->mm_stats.last_update_us = time_us;
+       adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
+                                      us_upper_bound);
+
+       /* This prevents the short period of low performance when the VRAM
+        * usage is low and the driver is in debt or doesn't have enough
+        * accumulated us to fill VRAM quickly.
         *
-        * The idea is that buffers should be placed in VRAM at creation time
-        * and TTM should only do a minimum number of relocations during
-        * command submission. In practice, you need to submit at least
-        * a dozen IBs to move all buffers to VRAM if they are in GTT.
+        * The situation can occur in these cases:
+        * - a lot of VRAM is freed by userspace
+        * - the presence of a big buffer causes a lot of evictions
+        *   (solution: split buffers into smaller ones)
         *
-        * Also, things can get pretty crazy under memory pressure and actual
-        * VRAM usage can change a lot, so playing safe even at 50% does
-        * consistently increase performance.
+        * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
+        * accum_us to a positive number.
+        */
+       if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
+               s64 min_us;
+
+               /* Be more aggresive on dGPUs. Try to fill a portion of free
+                * VRAM now.
+                */
+               if (!(adev->flags & AMD_IS_APU))
+                       min_us = bytes_to_us(adev, free_vram / 4);
+               else
+                       min_us = 0; /* Reset accum_us on APUs. */
+
+               adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
+       }
+
+       /* This returns 0 if the driver is in debt to disallow (optional)
+        * buffer moves.
         */
+       max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 
-       u64 half_vram = real_vram_size >> 1;
-       u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
-       u64 bytes_moved_threshold = half_free_vram >> 1;
-       return max(bytes_moved_threshold, 1024*1024ull);
+       spin_unlock(&adev->mm_stats.lock);
+       return max_bytes;
+}
+
+/* Report how many bytes have really been moved for the last command
+ * submission. This can result in a debt that can stop buffer migrations
+ * temporarily.
+ */
+static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
+                                        u64 num_bytes)
+{
+       spin_lock(&adev->mm_stats.lock);
+       adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
+       spin_unlock(&adev->mm_stats.lock);
 }
 
 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
@@ -297,15 +362,10 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
        if (bo->pin_count)
                return 0;
 
-       /* Avoid moving this one if we have moved too many buffers
-        * for this IB already.
-        *
-        * Note that this allows moving at least one buffer of
-        * any size, because it doesn't take the current "bo"
-        * into account. We don't want to disallow buffer moves
-        * completely.
+       /* Don't move this buffer if we have depleted our allowance
+        * to move it. Don't move anything if the threshold is zero.
         */
-       if (p->bytes_moved <= p->bytes_moved_threshold)
+       if (p->bytes_moved < p->bytes_moved_threshold)
                domain = bo->prefered_domains;
        else
                domain = bo->allowed_domains;
@@ -317,17 +377,67 @@ retry:
        p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
                initial_bytes_moved;
 
-       if (unlikely(r)) {
-               if (r != -ERESTARTSYS && domain != bo->allowed_domains) {
-                       domain = bo->allowed_domains;
-                       goto retry;
-               }
+       if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+               domain = bo->allowed_domains;
+               goto retry;
        }
 
        return r;
 }
 
-int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+/* Last resort, try to evict something from the current working set */
+static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+                               struct amdgpu_bo_list_entry *lobj)
+{
+       uint32_t domain = lobj->robj->allowed_domains;
+       int r;
+
+       if (!p->evictable)
+               return false;
+
+       for (;&p->evictable->tv.head != &p->validated;
+            p->evictable = list_prev_entry(p->evictable, tv.head)) {
+
+               struct amdgpu_bo_list_entry *candidate = p->evictable;
+               struct amdgpu_bo *bo = candidate->robj;
+               u64 initial_bytes_moved;
+               uint32_t other;
+
+               /* If we reached our current BO we can forget it */
+               if (candidate == lobj)
+                       break;
+
+               other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+               /* Check if this BO is in one of the domains we need space for */
+               if (!(other & domain))
+                       continue;
+
+               /* Check if we can move this BO somewhere else */
+               other = bo->allowed_domains & ~domain;
+               if (!other)
+                       continue;
+
+               /* Good we can try to move this BO somewhere else */
+               amdgpu_ttm_placement_from_domain(bo, other);
+               initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+               r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+               p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+                       initial_bytes_moved;
+
+               if (unlikely(r))
+                       break;
+
+               p->evictable = list_prev_entry(p->evictable, tv.head);
+               list_move(&candidate->tv.head, &p->validated);
+
+               return true;
+       }
+
+       return false;
+}
+
+static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                            struct list_head *validated)
 {
        struct amdgpu_bo_list_entry *lobj;
@@ -351,9 +461,15 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                        binding_userptr = true;
                }
 
-               r = amdgpu_cs_bo_validate(p, bo);
+               if (p->evictable == lobj)
+                       p->evictable = NULL;
+
+               do {
+                       r = amdgpu_cs_bo_validate(p, bo);
+               } while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
                if (r)
                        return r;
+
                if (bo->shadow) {
                        r = amdgpu_cs_bo_validate(p, bo);
                        if (r)
@@ -481,6 +597,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 
        p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
        p->bytes_moved = 0;
+       p->evictable = list_last_entry(&p->validated,
+                                      struct amdgpu_bo_list_entry,
+                                      tv.head);
 
        r = amdgpu_cs_list_validate(p, &duplicates);
        if (r) {
@@ -494,6 +613,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                goto error_validate;
        }
 
+       amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
+
        fpriv->vm.last_eviction_counter =
                atomic64_read(&p->adev->num_evictions);
 
@@ -524,8 +645,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                }
        }
 
-       if (p->uf_entry.robj)
-               p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+       if (!r && p->uf_entry.robj) {
+               struct amdgpu_bo *uf = p->uf_entry.robj;
+
+               r = amdgpu_ttm_bind(uf->tbo.ttm, &uf->tbo.mem);
+               p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
+       }
 
 error_validate:
        if (r) {
@@ -735,6 +860,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
                if (r)
                        return r;
 
+               if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
+                       parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
+                       if (!parser->ctx->preamble_presented) {
+                               parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+                               parser->ctx->preamble_presented = true;
+                       }
+               }
+
                if (parser->job->ring && parser->job->ring != ring)
                        return -EINVAL;
 
@@ -874,7 +1007,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        }
 
        job->owner = p->filp;
-       job->ctx = entity->fence_context;
+       job->fence_ctx = entity->fence_context;
        p->fence = fence_get(&job->base.s_fence->finished);
        cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
        job->uf_sequence = cs->out.handle;
@@ -1040,3 +1173,29 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 
        return NULL;
 }
+
+/**
+ * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
+ *
+ * @parser: command submission parser context
+ *
+ * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
+ */
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
+{
+       unsigned i;
+       int r;
+
+       if (!parser->bo_list)
+               return 0;
+
+       for (i = 0; i < parser->bo_list->num_entries; i++) {
+               struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
+
+               r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem);
+               if (unlikely(r))
+                       return r;
+       }
+
+       return 0;
+}
index c38dc47..3ddae5f 100644 (file)
@@ -41,6 +41,9 @@
 #include "atom.h"
 #include "amdgpu_atombios.h"
 #include "amd_pcie.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "si.h"
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
 #include "cik.h"
 #endif
@@ -52,6 +55,11 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
 static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
 
 static const char *amdgpu_asic_name[] = {
+       "TAHITI",
+       "PITCAIRN",
+       "VERDE",
+       "OLAND",
+       "HAINAN",
        "BONAIRE",
        "KAVERI",
        "KABINI",
@@ -1027,7 +1035,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
                /* don't suspend or resume card normally */
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
-               amdgpu_resume_kms(dev, true, true);
+               amdgpu_device_resume(dev, true, true);
 
                dev->pdev->d3_delay = d3_delay;
 
@@ -1037,7 +1045,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
                printk(KERN_INFO "amdgpu: switched off\n");
                drm_kms_helper_poll_disable(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               amdgpu_suspend_kms(dev, true, true);
+               amdgpu_device_suspend(dev, true, true);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
        }
 }
@@ -1231,6 +1239,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
                if (r)
                        return r;
                break;
+#ifdef CONFIG_DRM_AMDGPU_SI
+       case CHIP_VERDE:
+       case CHIP_TAHITI:
+       case CHIP_PITCAIRN:
+       case CHIP_OLAND:
+       case CHIP_HAINAN:
+               adev->family = AMDGPU_FAMILY_SI;
+               r = si_set_ip_blocks(adev);
+               if (r)
+                       return r;
+               break;
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
        case CHIP_BONAIRE:
        case CHIP_HAWAII:
@@ -1347,6 +1367,9 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_block_status[i].valid)
                        continue;
+               if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
+                       adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
+                       continue;
                /* enable clockgating to save power */
                r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
                                                                    AMD_CG_STATE_GATE);
@@ -1490,6 +1513,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 {
        int r, i;
        bool runtime = false;
+       u32 max_MBps;
 
        adev->shutdown = false;
        adev->dev = &pdev->dev;
@@ -1513,6 +1537,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->smc_wreg = &amdgpu_invalid_wreg;
        adev->pcie_rreg = &amdgpu_invalid_rreg;
        adev->pcie_wreg = &amdgpu_invalid_wreg;
+       adev->pciep_rreg = &amdgpu_invalid_rreg;
+       adev->pciep_wreg = &amdgpu_invalid_wreg;
        adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
        adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
        adev->didt_rreg = &amdgpu_invalid_rreg;
@@ -1549,12 +1575,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        spin_lock_init(&adev->didt_idx_lock);
        spin_lock_init(&adev->gc_cac_idx_lock);
        spin_lock_init(&adev->audio_endpt_idx_lock);
+       spin_lock_init(&adev->mm_stats.lock);
 
        INIT_LIST_HEAD(&adev->shadow_list);
        mutex_init(&adev->shadow_list_lock);
 
-       adev->rmmio_base = pci_resource_start(adev->pdev, 5);
-       adev->rmmio_size = pci_resource_len(adev->pdev, 5);
+       INIT_LIST_HEAD(&adev->gtt_list);
+       spin_lock_init(&adev->gtt_list_lock);
+
+       if (adev->asic_type >= CHIP_BONAIRE) {
+               adev->rmmio_base = pci_resource_start(adev->pdev, 5);
+               adev->rmmio_size = pci_resource_len(adev->pdev, 5);
+       } else {
+               adev->rmmio_base = pci_resource_start(adev->pdev, 2);
+               adev->rmmio_size = pci_resource_len(adev->pdev, 2);
+       }
+
        adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
        if (adev->rmmio == NULL) {
                return -ENOMEM;
@@ -1562,8 +1598,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
        DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
 
-       /* doorbell bar mapping */
-       amdgpu_doorbell_init(adev);
+       if (adev->asic_type >= CHIP_BONAIRE)
+               /* doorbell bar mapping */
+               amdgpu_doorbell_init(adev);
 
        /* io port mapping */
        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1660,6 +1697,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 
        adev->accel_working = true;
 
+       /* Initialize the buffer migration limit. */
+       if (amdgpu_moverate >= 0)
+               max_MBps = amdgpu_moverate;
+       else
+               max_MBps = 8; /* Allow 8 MB/s. */
+       /* Get a log2 for easy divisions. */
+       adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
+
        amdgpu_fbdev_init(adev);
 
        r = amdgpu_ib_pool_init(adev);
@@ -1764,7 +1809,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        adev->rio_mem = NULL;
        iounmap(adev->rmmio);
        adev->rmmio = NULL;
-       amdgpu_doorbell_fini(adev);
+       if (adev->asic_type >= CHIP_BONAIRE)
+               amdgpu_doorbell_fini(adev);
        amdgpu_debugfs_regs_cleanup(adev);
        amdgpu_debugfs_remove_files(adev);
 }
@@ -1774,7 +1820,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
  * Suspend & resume.
  */
 /**
- * amdgpu_suspend_kms - initiate device suspend
+ * amdgpu_device_suspend - initiate device suspend
  *
  * @pdev: drm dev pointer
  * @state: suspend state
@@ -1783,7 +1829,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
  * Returns 0 for success or an error on failure.
  * Called at driver suspend.
  */
-int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
+int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 {
        struct amdgpu_device *adev;
        struct drm_crtc *crtc;
@@ -1796,7 +1842,8 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
 
        adev = dev->dev_private;
 
-       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+           dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
                return 0;
 
        drm_kms_helper_poll_disable(dev);
@@ -1851,6 +1898,10 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
                /* Shut down the device */
                pci_disable_device(dev->pdev);
                pci_set_power_state(dev->pdev, PCI_D3hot);
+       } else {
+               r = amdgpu_asic_reset(adev);
+               if (r)
+                       DRM_ERROR("amdgpu asic reset failed\n");
        }
 
        if (fbcon) {
@@ -1862,7 +1913,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
 }
 
 /**
- * amdgpu_resume_kms - initiate device resume
+ * amdgpu_device_resume - initiate device resume
  *
  * @pdev: drm dev pointer
  *
@@ -1870,32 +1921,37 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
  * Returns 0 for success or an error on failure.
  * Called at driver resume.
  */
-int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 {
        struct drm_connector *connector;
        struct amdgpu_device *adev = dev->dev_private;
        struct drm_crtc *crtc;
        int r;
 
-       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+           dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
                return 0;
 
-       if (fbcon) {
+       if (fbcon)
                console_lock();
-       }
+
        if (resume) {
                pci_set_power_state(dev->pdev, PCI_D0);
                pci_restore_state(dev->pdev);
-               if (pci_enable_device(dev->pdev)) {
+               r = pci_enable_device(dev->pdev);
+               if (r) {
                        if (fbcon)
                                console_unlock();
-                       return -1;
+                       return r;
                }
        }
 
        /* post card */
-       if (!amdgpu_card_posted(adev))
-               amdgpu_atom_asic_init(adev->mode_info.atom_context);
+       if (!amdgpu_card_posted(adev) || !resume) {
+               r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+               if (r)
+                       DRM_ERROR("amdgpu asic init failed\n");
+       }
 
        r = amdgpu_resume(adev);
        if (r)
@@ -2163,6 +2219,11 @@ retry:
        }
        if (!r) {
                amdgpu_irq_gpu_reset_resume_helper(adev);
+               if (need_full_reset && amdgpu_need_backup(adev)) {
+                       r = amdgpu_ttm_recover_gart(adev);
+                       if (r)
+                               DRM_ERROR("gart recovery failed!!!\n");
+               }
                r = amdgpu_ib_ring_tests(adev);
                if (r) {
                        dev_err(adev->dev, "ib ring test failed (%d).\n", r);
@@ -2600,7 +2661,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
        while (size) {
                uint32_t value;
 
-               value = RREG32_SMC(*pos >> 2);
+               value = RREG32_SMC(*pos);
                r = put_user(value, (uint32_t *)buf);
                if (r)
                        return r;
@@ -2631,7 +2692,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
                if (r)
                        return r;
 
-               WREG32_SMC(*pos >> 2, value);
+               WREG32_SMC(*pos, value);
 
                result += 4;
                buf += 4;
index 7c911d0..5963626 100644 (file)
  * - 3.3.0 - Add VM support for UVD on supported hardware.
  * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
  * - 3.5.0 - Add support for new UVD_NO_OP register.
+ * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       5
+#define KMS_DRIVER_MINOR       6
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit = 0;
 int amdgpu_gart_size = -1; /* auto */
+int amdgpu_moverate = -1; /* auto */
 int amdgpu_benchmarking = 0;
 int amdgpu_testing = 0;
 int amdgpu_audio = -1;
@@ -93,6 +95,7 @@ unsigned amdgpu_cg_mask = 0xffffffff;
 unsigned amdgpu_pg_mask = 0xffffffff;
 char *amdgpu_disable_cu = NULL;
 char *amdgpu_virtual_display = NULL;
+unsigned amdgpu_pp_feature_mask = 0xffffffff;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -100,6 +103,9 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
 MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
 module_param_named(gartsize, amdgpu_gart_size, int, 0600);
 
+MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
+module_param_named(moverate, amdgpu_moverate, int, 0600);
+
 MODULE_PARM_DESC(benchmark, "Run benchmark");
 module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
 
@@ -172,6 +178,9 @@ module_param_named(powerplay, amdgpu_powerplay, int, 0444);
 
 MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
 module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
+
+MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
+module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
 #endif
 
 MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
@@ -196,6 +205,80 @@ MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_d
 module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
 
 static const struct pci_device_id pciidlist[] = {
+#ifdef  CONFIG_DRM_AMDGPU_SI
+       {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+       {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+       {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+       {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+       {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+       {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+       {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+       {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+       {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+       {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+       {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+       {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+       {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+       {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+       {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+       {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+       {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+       {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+       {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
        /* Kaveri */
        {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
@@ -393,32 +476,72 @@ amdgpu_pci_remove(struct pci_dev *pdev)
        drm_put_dev(dev);
 }
 
+static void
+amdgpu_pci_shutdown(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct amdgpu_device *adev = dev->dev_private;
+
+       /* if we are running in a VM, make sure the device
+        * torn down properly on reboot/shutdown
+        */
+       if (adev->virtualization.is_virtual)
+               amdgpu_pci_remove(pdev);
+}
+
 static int amdgpu_pmops_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
+
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       return amdgpu_suspend_kms(drm_dev, true, true);
+       return amdgpu_device_suspend(drm_dev, true, true);
 }
 
 static int amdgpu_pmops_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       return amdgpu_resume_kms(drm_dev, true, true);
+
+       /* GPU comes up enabled by the bios on resume */
+       if (amdgpu_device_is_px(drm_dev)) {
+               pm_runtime_disable(dev);
+               pm_runtime_set_active(dev);
+               pm_runtime_enable(dev);
+       }
+
+       return amdgpu_device_resume(drm_dev, true, true);
 }
 
 static int amdgpu_pmops_freeze(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
+
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       return amdgpu_suspend_kms(drm_dev, false, true);
+       return amdgpu_device_suspend(drm_dev, false, true);
 }
 
 static int amdgpu_pmops_thaw(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
+
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       return amdgpu_device_resume(drm_dev, false, true);
+}
+
+static int amdgpu_pmops_poweroff(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       return amdgpu_device_suspend(drm_dev, true, true);
+}
+
+static int amdgpu_pmops_restore(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       return amdgpu_resume_kms(drm_dev, false, true);
+       return amdgpu_device_resume(drm_dev, false, true);
 }
 
 static int amdgpu_pmops_runtime_suspend(struct device *dev)
@@ -436,7 +559,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
        drm_kms_helper_poll_disable(drm_dev);
        vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
 
-       ret = amdgpu_suspend_kms(drm_dev, false, false);
+       ret = amdgpu_device_suspend(drm_dev, false, false);
        pci_save_state(pdev);
        pci_disable_device(pdev);
        pci_ignore_hotplug(pdev);
@@ -469,7 +592,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
                return ret;
        pci_set_master(pdev);
 
-       ret = amdgpu_resume_kms(drm_dev, false, false);
+       ret = amdgpu_device_resume(drm_dev, false, false);
        drm_kms_helper_poll_enable(drm_dev);
        vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
        drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
@@ -523,8 +646,8 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
        .resume = amdgpu_pmops_resume,
        .freeze = amdgpu_pmops_freeze,
        .thaw = amdgpu_pmops_thaw,
-       .poweroff = amdgpu_pmops_freeze,
-       .restore = amdgpu_pmops_resume,
+       .poweroff = amdgpu_pmops_poweroff,
+       .restore = amdgpu_pmops_restore,
        .runtime_suspend = amdgpu_pmops_runtime_suspend,
        .runtime_resume = amdgpu_pmops_runtime_resume,
        .runtime_idle = amdgpu_pmops_runtime_idle,
@@ -606,6 +729,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
        .id_table = pciidlist,
        .probe = amdgpu_pci_probe,
        .remove = amdgpu_pci_remove,
+       .shutdown = amdgpu_pci_shutdown,
        .driver.pm = &amdgpu_pm_ops,
 };
 
index bf033b5..107fbb2 100644 (file)
@@ -25,6 +25,7 @@
  */
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
@@ -47,8 +48,35 @@ struct amdgpu_fbdev {
        struct amdgpu_device *adev;
 };
 
+static int
+amdgpufb_open(struct fb_info *info, int user)
+{
+       struct amdgpu_fbdev *rfbdev = info->par;
+       struct amdgpu_device *adev = rfbdev->adev;
+       int ret = pm_runtime_get_sync(adev->ddev->dev);
+       if (ret < 0 && ret != -EACCES) {
+               pm_runtime_mark_last_busy(adev->ddev->dev);
+               pm_runtime_put_autosuspend(adev->ddev->dev);
+               return ret;
+       }
+       return 0;
+}
+
+static int
+amdgpufb_release(struct fb_info *info, int user)
+{
+       struct amdgpu_fbdev *rfbdev = info->par;
+       struct amdgpu_device *adev = rfbdev->adev;
+
+       pm_runtime_mark_last_busy(adev->ddev->dev);
+       pm_runtime_put_autosuspend(adev->ddev->dev);
+       return 0;
+}
+
 static struct fb_ops amdgpufb_ops = {
        .owner = THIS_MODULE,
+       .fb_open = amdgpufb_open,
+       .fb_release = amdgpufb_release,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = drm_fb_helper_cfb_fillrect,
index f5810f7..4127e7c 100644 (file)
@@ -124,7 +124,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        bool skip_preamble, need_ctx_switch;
        unsigned patch_offset = ~0;
        struct amdgpu_vm *vm;
-       uint64_t ctx;
+       uint64_t fence_ctx;
+       uint32_t status = 0, alloc_size;
 
        unsigned i;
        int r = 0;
@@ -135,10 +136,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        /* ring tests don't use a job */
        if (job) {
                vm = job->vm;
-               ctx = job->ctx;
+               fence_ctx = job->fence_ctx;
        } else {
                vm = NULL;
-               ctx = 0;
+               fence_ctx = 0;
        }
 
        if (!ring->ready) {
@@ -151,7 +152,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                return -EINVAL;
        }
 
-       r = amdgpu_ring_alloc(ring, 256 * num_ibs);
+       alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
+               num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+
+       r = amdgpu_ring_alloc(ring, alloc_size);
        if (r) {
                dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
                return r;
@@ -174,13 +178,22 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        /* always set cond_exec_polling to CONTINUE */
        *ring->cond_exe_cpu_addr = 1;
 
-       skip_preamble = ring->current_ctx == ctx;
-       need_ctx_switch = ring->current_ctx != ctx;
+       skip_preamble = ring->current_ctx == fence_ctx;
+       need_ctx_switch = ring->current_ctx != fence_ctx;
+       if (job && ring->funcs->emit_cntxcntl) {
+               if (need_ctx_switch)
+                       status |= AMDGPU_HAVE_CTX_SWITCH;
+               status |= job->preamble_status;
+               amdgpu_ring_emit_cntxcntl(ring, status);
+       }
+
        for (i = 0; i < num_ibs; ++i) {
                ib = &ibs[i];
 
                /* drop preamble IBs if we don't have a context switch */
-               if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
+               if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
+                       skip_preamble &&
+                       !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST))
                        continue;
 
                amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
@@ -209,7 +222,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
                amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
-       ring->current_ctx = ctx;
+       ring->current_ctx = fence_ctx;
+       if (ring->funcs->emit_switch_buffer)
+               amdgpu_ring_emit_switch_buffer(ring);
        amdgpu_ring_commit(ring);
        return 0;
 }
index 5ebb3f4..3ab4c65 100644 (file)
@@ -119,8 +119,6 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
  */
 void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
 {
-       int r;
-
        if (adev->irq.ih.use_bus_addr) {
                if (adev->irq.ih.ring) {
                        /* add 8 bytes for the rptr/wptr shadows and
@@ -132,17 +130,9 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
                        adev->irq.ih.ring = NULL;
                }
        } else {
-               if (adev->irq.ih.ring_obj) {
-                       r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
-                       if (likely(r == 0)) {
-                               amdgpu_bo_kunmap(adev->irq.ih.ring_obj);
-                               amdgpu_bo_unpin(adev->irq.ih.ring_obj);
-                               amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
-                       }
-                       amdgpu_bo_unref(&adev->irq.ih.ring_obj);
-                       adev->irq.ih.ring = NULL;
-                       adev->irq.ih.ring_obj = NULL;
-               }
+               amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
+                                     &adev->irq.ih.gpu_addr,
+                                     (void **)&adev->irq.ih.ring);
                amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
                amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
        }
index 6674d40..8c58079 100644 (file)
@@ -91,7 +91,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
                amdgpu_ib_free(job->adev, &job->ibs[i], f);
 }
 
-void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
 {
        struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
@@ -124,7 +124,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
                return r;
 
        job->owner = owner;
-       job->ctx = entity->fence_context;
+       job->fence_ctx = entity->fence_context;
        *f = fence_get(&job->base.s_fence->finished);
        amdgpu_job_free_resources(job);
        amd_sched_entity_push_job(&job->base);
index b78e740..c2c7fb1 100644 (file)
@@ -296,7 +296,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        break;
                case AMDGPU_HW_IP_VCE:
                        type = AMD_IP_BLOCK_TYPE_VCE;
-                       for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++)
+                       for (i = 0; i < adev->vce.num_rings; i++)
                                ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
                        ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
                        ib_size_alignment = 1;
@@ -542,12 +542,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                return r;
 
        fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
-       if (unlikely(!fpriv))
-               return -ENOMEM;
+       if (unlikely(!fpriv)) {
+               r = -ENOMEM;
+               goto out_suspend;
+       }
 
        r = amdgpu_vm_init(adev, &fpriv->vm);
-       if (r)
-               goto error_free;
+       if (r) {
+               kfree(fpriv);
+               goto out_suspend;
+       }
 
        mutex_init(&fpriv->bo_list_lock);
        idr_init(&fpriv->bo_list_handles);
@@ -556,12 +560,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 
        file_priv->driver_priv = fpriv;
 
+out_suspend:
        pm_runtime_mark_last_busy(dev->dev);
        pm_runtime_put_autosuspend(dev->dev);
-       return 0;
-
-error_free:
-       kfree(fpriv);
 
        return r;
 }
@@ -600,6 +601,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 
        kfree(fpriv);
        file_priv->driver_priv = NULL;
+
+       pm_runtime_mark_last_busy(dev->dev);
+       pm_runtime_put_autosuspend(dev->dev);
 }
 
 /**
@@ -614,6 +618,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 void amdgpu_driver_preclose_kms(struct drm_device *dev,
                                struct drm_file *file_priv)
 {
+       pm_runtime_get_sync(dev->dev);
 }
 
 /*
index b17734e..428aa00 100644 (file)
@@ -38,8 +38,6 @@
 #include "amdgpu_trace.h"
 
 
-int amdgpu_ttm_init(struct amdgpu_device *adev);
-void amdgpu_ttm_fini(struct amdgpu_device *adev);
 
 static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
                                                struct ttm_mem_reg *mem)
@@ -287,6 +285,35 @@ error_free:
        return r;
 }
 
+/**
+ * amdgpu_bo_free_kernel - free BO for kernel use
+ *
+ * @bo: amdgpu BO to free
+ *
+ * unmaps and unpin a BO for kernel internal use.
+ */
+void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+                          void **cpu_addr)
+{
+       if (*bo == NULL)
+               return;
+
+       if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
+               if (cpu_addr)
+                       amdgpu_bo_kunmap(*bo);
+
+               amdgpu_bo_unpin(*bo);
+               amdgpu_bo_unreserve(*bo);
+       }
+       amdgpu_bo_unref(bo);
+
+       if (gpu_addr)
+               *gpu_addr = 0;
+
+       if (cpu_addr)
+               *cpu_addr = NULL;
+}
+
 int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
                                unsigned long size, int byte_align,
                                bool kernel, u32 domain, u64 flags,
@@ -646,6 +673,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                dev_err(bo->adev->dev, "%p pin failed\n", bo);
                goto error;
        }
+       r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem);
+       if (unlikely(r)) {
+               dev_err(bo->adev->dev, "%p bind failed\n", bo);
+               goto error;
+       }
 
        bo->pin_count = 1;
        if (gpu_addr != NULL)
@@ -692,7 +724,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
                bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
                if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
                        bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
-       } else {
+       } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
                bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
        }
 
@@ -918,8 +950,11 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+       WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
+                    !amdgpu_ttm_is_bound(bo->tbo.ttm));
        WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
                     !bo->pin_count);
+       WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
 
        return bo->tbo.offset;
 }
index b6a2739..8255034 100644 (file)
@@ -31,6 +31,8 @@
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
 
+#define AMDGPU_BO_INVALID_OFFSET       LONG_MAX
+
 /**
  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
  * @mem_type:  ttm memory type
@@ -128,6 +130,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
                            unsigned long size, int align,
                            u32 domain, struct amdgpu_bo **bo_ptr,
                            u64 *gpu_addr, void **cpu_addr);
+void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+                          void **cpu_addr);
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
index d153149..8e67c12 100644 (file)
@@ -25,6 +25,7 @@
 #include "amdgpu.h"
 #include "atom.h"
 #include "atombios_encoders.h"
+#include "amdgpu_pll.h"
 #include <asm/div64.h>
 #include <linux/gcd.h>
 
index 5450744..1e7f160 100644 (file)
@@ -30,6 +30,7 @@
 #include "amdgpu_pm.h"
 #include <drm/amdgpu_drm.h>
 #include "amdgpu_powerplay.h"
+#include "si_dpm.h"
 #include "cik_dpm.h"
 #include "vi_dpm.h"
 
@@ -52,10 +53,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
                pp_init->chip_family = adev->family;
                pp_init->chip_id = adev->asic_type;
                pp_init->device = amdgpu_cgs_create_device(adev);
-               pp_init->rev_id = adev->pdev->revision;
-               pp_init->sub_sys_id = adev->pdev->subsystem_device;
-               pp_init->sub_vendor_id = adev->pdev->subsystem_vendor;
-
                ret = amd_powerplay_init(pp_init, amd_pp);
                kfree(pp_init);
 #endif
@@ -63,6 +60,15 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
                amd_pp->pp_handle = (void *)adev;
 
                switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+               case CHIP_TAHITI:
+               case CHIP_PITCAIRN:
+               case CHIP_VERDE:
+               case CHIP_OLAND:
+               case CHIP_HAINAN:
+                       amd_pp->ip_funcs = &si_dpm_ip_funcs;
+               break;
+#endif
 #ifdef CONFIG_DRM_AMDGPU_CIK
                case CHIP_BONAIRE:
                case CHIP_HAWAII:
index 242ba04..777f11b 100644 (file)
@@ -252,28 +252,17 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
  */
 void amdgpu_ring_fini(struct amdgpu_ring *ring)
 {
-       int r;
-       struct amdgpu_bo *ring_obj;
-
-       ring_obj = ring->ring_obj;
        ring->ready = false;
-       ring->ring = NULL;
-       ring->ring_obj = NULL;
 
        amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
        amdgpu_wb_free(ring->adev, ring->fence_offs);
        amdgpu_wb_free(ring->adev, ring->rptr_offs);
        amdgpu_wb_free(ring->adev, ring->wptr_offs);
 
-       if (ring_obj) {
-               r = amdgpu_bo_reserve(ring_obj, false);
-               if (likely(r == 0)) {
-                       amdgpu_bo_kunmap(ring_obj);
-                       amdgpu_bo_unpin(ring_obj);
-                       amdgpu_bo_unreserve(ring_obj);
-               }
-               amdgpu_bo_unref(&ring_obj);
-       }
+       amdgpu_bo_free_kernel(&ring->ring_obj,
+                             &ring->gpu_addr,
+                             (void **)&ring->ring);
+
        amdgpu_debugfs_ring_fini(ring);
 }
 
index 5447973..b63969d 100644 (file)
@@ -89,10 +89,10 @@ int amdgpu_ttm_global_init(struct amdgpu_device *adev)
        global_ref->init = &amdgpu_ttm_mem_global_init;
        global_ref->release = &amdgpu_ttm_mem_global_release;
        r = drm_global_item_ref(global_ref);
-       if (r != 0) {
+       if (r) {
                DRM_ERROR("Failed setting up TTM memory accounting "
                          "subsystem.\n");
-               return r;
+               goto error_mem;
        }
 
        adev->mman.bo_global_ref.mem_glob =
@@ -103,26 +103,30 @@ int amdgpu_ttm_global_init(struct amdgpu_device *adev)
        global_ref->init = &ttm_bo_global_init;
        global_ref->release = &ttm_bo_global_release;
        r = drm_global_item_ref(global_ref);
-       if (r != 0) {
+       if (r) {
                DRM_ERROR("Failed setting up TTM BO subsystem.\n");
-               drm_global_item_unref(&adev->mman.mem_global_ref);
-               return r;
+               goto error_bo;
        }
 
        ring = adev->mman.buffer_funcs_ring;
        rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
        r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
                                  rq, amdgpu_sched_jobs);
-       if (r != 0) {
+       if (r) {
                DRM_ERROR("Failed setting up TTM BO move run queue.\n");
-               drm_global_item_unref(&adev->mman.mem_global_ref);
-               drm_global_item_unref(&adev->mman.bo_global_ref.ref);
-               return r;
+               goto error_entity;
        }
 
        adev->mman.mem_global_referenced = true;
 
        return 0;
+
+error_entity:
+       drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+error_bo:
+       drm_global_item_unref(&adev->mman.mem_global_ref);
+error_mem:
+       return r;
 }
 
 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
@@ -197,6 +201,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
                .lpfn = 0,
                .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
        };
+       unsigned i;
 
        if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
                placement->placement = &placements;
@@ -208,10 +213,25 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
        rbo = container_of(bo, struct amdgpu_bo, tbo);
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
-               if (rbo->adev->mman.buffer_funcs_ring->ready == false)
+               if (rbo->adev->mman.buffer_funcs_ring->ready == false) {
                        amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
-               else
+               } else {
                        amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
+                       for (i = 0; i < rbo->placement.num_placement; ++i) {
+                               if (!(rbo->placements[i].flags &
+                                     TTM_PL_FLAG_TT))
+                                       continue;
+
+                               if (rbo->placements[i].lpfn)
+                                       continue;
+
+                               /* set an upper limit to force directly
+                                * allocating address space for the BO.
+                                */
+                               rbo->placements[i].lpfn =
+                                       rbo->adev->mc.gtt_size >> PAGE_SHIFT;
+                       }
+               }
                break;
        case TTM_PL_TT:
        default:
@@ -226,7 +246,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 
        if (amdgpu_ttm_tt_get_usermm(bo->ttm))
                return -EPERM;
-       return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+       return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
+                                         filp->private_data);
 }
 
 static void amdgpu_move_null(struct ttm_buffer_object *bo,
@@ -256,8 +277,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
        new_start = new_mem->start << PAGE_SHIFT;
 
        switch (old_mem->mem_type) {
-       case TTM_PL_VRAM:
        case TTM_PL_TT:
+               r = amdgpu_ttm_bind(bo->ttm, old_mem);
+               if (r)
+                       return r;
+
+       case TTM_PL_VRAM:
                old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
                break;
        default:
@@ -265,8 +290,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
                return -EINVAL;
        }
        switch (new_mem->mem_type) {
-       case TTM_PL_VRAM:
        case TTM_PL_TT:
+               r = amdgpu_ttm_bind(bo->ttm, new_mem);
+               if (r)
+                       return r;
+
+       case TTM_PL_VRAM:
                new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
                break;
        default:
@@ -311,7 +340,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
        placement.num_busy_placement = 1;
        placement.busy_placement = &placements;
        placements.fpfn = 0;
-       placements.lpfn = 0;
+       placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
        placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
                             interruptible, no_wait_gpu);
@@ -358,7 +387,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
        placement.num_busy_placement = 1;
        placement.busy_placement = &placements;
        placements.fpfn = 0;
-       placements.lpfn = 0;
+       placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
        placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
                             interruptible, no_wait_gpu);
@@ -520,6 +549,7 @@ struct amdgpu_ttm_tt {
        spinlock_t              guptasklock;
        struct list_head        guptasks;
        atomic_t                mmu_invalidations;
+       struct list_head        list;
 };
 
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
@@ -637,7 +667,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
                                   struct ttm_mem_reg *bo_mem)
 {
        struct amdgpu_ttm_tt *gtt = (void*)ttm;
-       uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
        int r;
 
        if (gtt->userptr) {
@@ -647,7 +676,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
                        return r;
                }
        }
-       gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+       gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
        if (!ttm->num_pages) {
                WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
                     ttm->num_pages, bo_mem, ttm);
@@ -658,14 +687,62 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
            bo_mem->mem_type == AMDGPU_PL_OA)
                return -EINVAL;
 
+       return 0;
+}
+
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
+{
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+       return gtt && !list_empty(&gtt->list);
+}
+
+int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       uint32_t flags;
+       int r;
+
+       if (!ttm || amdgpu_ttm_is_bound(ttm))
+               return 0;
+
+       flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
        r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
                ttm->pages, gtt->ttm.dma_address, flags);
 
        if (r) {
-               DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
-                         ttm->num_pages, (unsigned)gtt->offset);
+               DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+                         ttm->num_pages, gtt->offset);
                return r;
        }
+       spin_lock(&gtt->adev->gtt_list_lock);
+       list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+       spin_unlock(&gtt->adev->gtt_list_lock);
+       return 0;
+}
+
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
+{
+       struct amdgpu_ttm_tt *gtt, *tmp;
+       struct ttm_mem_reg bo_mem;
+       uint32_t flags;
+       int r;
+
+       bo_mem.mem_type = TTM_PL_TT;
+       spin_lock(&adev->gtt_list_lock);
+       list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
+               flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
+               r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+                                    gtt->ttm.ttm.pages, gtt->ttm.dma_address,
+                                    flags);
+               if (r) {
+                       spin_unlock(&adev->gtt_list_lock);
+                       DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+                                 gtt->ttm.ttm.num_pages, gtt->offset);
+                       return r;
+               }
+       }
+       spin_unlock(&adev->gtt_list_lock);
        return 0;
 }
 
@@ -673,6 +750,9 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
 {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
+       if (!amdgpu_ttm_is_bound(ttm))
+               return 0;
+
        /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
        if (gtt->adev->gart.ready)
                amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
@@ -680,6 +760,10 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
        if (gtt->userptr)
                amdgpu_ttm_tt_unpin_userptr(ttm);
 
+       spin_lock(&gtt->adev->gtt_list_lock);
+       list_del_init(&gtt->list);
+       spin_unlock(&gtt->adev->gtt_list_lock);
+
        return 0;
 }
 
@@ -716,6 +800,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
                kfree(gtt);
                return NULL;
        }
+       INIT_LIST_HEAD(&gtt->list);
        return &gtt->ttm.ttm;
 }
 
index 72f6bfc..3ee825f 100644 (file)
 
 #include "gpu_scheduler.h"
 
-#define AMDGPU_PL_GDS          TTM_PL_PRIV0
-#define AMDGPU_PL_GWS          TTM_PL_PRIV1
-#define AMDGPU_PL_OA           TTM_PL_PRIV2
+#define AMDGPU_PL_GDS          (TTM_PL_PRIV + 0)
+#define AMDGPU_PL_GWS          (TTM_PL_PRIV + 1)
+#define AMDGPU_PL_OA           (TTM_PL_PRIV + 2)
 
-#define AMDGPU_PL_FLAG_GDS             TTM_PL_FLAG_PRIV0
-#define AMDGPU_PL_FLAG_GWS             TTM_PL_FLAG_PRIV1
-#define AMDGPU_PL_FLAG_OA              TTM_PL_FLAG_PRIV2
+#define AMDGPU_PL_FLAG_GDS             (TTM_PL_FLAG_PRIV << 0)
+#define AMDGPU_PL_FLAG_GWS             (TTM_PL_FLAG_PRIV << 1)
+#define AMDGPU_PL_FLAG_OA              (TTM_PL_FLAG_PRIV << 2)
 
 #define AMDGPU_TTM_LRU_SIZE    20
 
@@ -77,4 +77,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        struct fence **fence);
 
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
+int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
 #endif
index 5cc95f1..7a05f79 100644 (file)
@@ -247,35 +247,28 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
        const struct common_firmware_header *header = NULL;
 
        err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
-                       AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+                              AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
        if (err) {
                dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
-               err = -ENOMEM;
                goto failed;
        }
 
        err = amdgpu_bo_reserve(*bo, false);
        if (err) {
-               amdgpu_bo_unref(bo);
                dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
-               goto failed;
+               goto failed_reserve;
        }
 
        err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
        if (err) {
-               amdgpu_bo_unreserve(*bo);
-               amdgpu_bo_unref(bo);
                dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
-               goto failed;
+               goto failed_pin;
        }
 
        err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
        if (err) {
                dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
-               amdgpu_bo_unpin(*bo);
-               amdgpu_bo_unreserve(*bo);
-               amdgpu_bo_unref(bo);
-               goto failed;
+               goto failed_kmap;
        }
 
        amdgpu_bo_unreserve(*bo);
@@ -290,10 +283,16 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
                        fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
                }
        }
+       return 0;
 
+failed_kmap:
+       amdgpu_bo_unpin(*bo);
+failed_pin:
+       amdgpu_bo_unreserve(*bo);
+failed_reserve:
+       amdgpu_bo_unref(bo);
 failed:
-       if (err)
-               adev->firmware.smu_load = false;
+       adev->firmware.smu_load = false;
 
        return err;
 }
index cc766cc..25dd58a 100644 (file)
@@ -249,22 +249,13 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 
 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
-       int r;
-
        kfree(adev->uvd.saved_bo);
 
        amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
 
-       if (adev->uvd.vcpu_bo) {
-               r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
-               if (!r) {
-                       amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
-                       amdgpu_bo_unpin(adev->uvd.vcpu_bo);
-                       amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-               }
-
-               amdgpu_bo_unref(&adev->uvd.vcpu_bo);
-       }
+       amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
+                             &adev->uvd.gpu_addr,
+                             (void **)&adev->uvd.cpu_addr);
 
        amdgpu_ring_fini(&adev->uvd.ring);
 
@@ -891,6 +882,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
                return -EINVAL;
        }
 
+       r = amdgpu_cs_sysvm_access_required(parser);
+       if (r)
+               return r;
+
        ctx.parser = parser;
        ctx.buf_sizes = buf_sizes;
        ctx.ib_idx = ib_idx;
index da52af2..2c9ea9b 100644 (file)
@@ -634,7 +634,11 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
        uint32_t allocated = 0;
        uint32_t tmp, handle = 0;
        uint32_t *size = &tmp;
-       int i, r = 0, idx = 0;
+       int i, r, idx = 0;
+
+       r = amdgpu_cs_sysvm_access_required(p);
+       if (r)
+               return r;
 
        while (idx < ib->length_dw) {
                uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
@@ -799,6 +803,18 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
        amdgpu_ring_write(ring, VCE_CMD_END);
 }
 
+unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               4; /* amdgpu_vce_ring_emit_ib */
+}
+
+unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               6; /* amdgpu_vce_ring_emit_fence  x1 no user fence */
+}
+
 /**
  * amdgpu_vce_ring_test_ring - test if VCE ring is working
  *
@@ -850,8 +866,8 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        struct fence *fence = NULL;
        long r;
 
-       /* skip vce ring1 ib test for now, since it's not reliable */
-       if (ring == &ring->adev->vce.ring[1])
+       /* skip vce ring1/2 ib test for now, since it's not reliable */
+       if (ring != &ring->adev->vce.ring[0])
                return 0;
 
        r = amdgpu_vce_get_create_msg(ring, 1, NULL);
index 63f83d0..12729d2 100644 (file)
@@ -42,5 +42,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
+unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
+unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
 
 #endif
index bf56f18..bd5af32 100644 (file)
@@ -1163,7 +1163,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        }
 
        flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
-       gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
+       gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
+               adev == bo_va->bo->adev) ? flags : 0;
 
        spin_lock(&vm->status_lock);
        if (!list_empty(&bo_va->vm_status))
index 49a39b1..f7d236f 100644 (file)
@@ -497,7 +497,13 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
                         * SetPixelClock provides the dividers
                         */
                        args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
-                       args.v6.ucPpll = ATOM_EXT_PLL1;
+                       if (adev->asic_type == CHIP_TAHITI ||
+                           adev->asic_type == CHIP_PITCAIRN ||
+                           adev->asic_type == CHIP_VERDE ||
+                           adev->asic_type == CHIP_OLAND)
+                               args.v6.ucPpll = ATOM_PPLL0;
+                       else
+                               args.v6.ucPpll = ATOM_EXT_PLL1;
                        break;
                default:
                        DRM_ERROR("Unknown table version %d %d\n", frev, crev);
index bc56c8a..b374653 100644 (file)
@@ -27,6 +27,7 @@
 #include "amdgpu.h"
 #include "atom.h"
 #include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
 
 #define TARGET_HW_I2C_CLOCK 50
 
index a0d63a2..1d8c375 100644 (file)
@@ -5396,7 +5396,7 @@ static void ci_dpm_disable(struct amdgpu_device *adev)
        amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
                       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
 
-       ci_dpm_powergate_uvd(adev, false);
+       ci_dpm_powergate_uvd(adev, true);
 
        if (!amdgpu_ci_is_smc_running(adev))
                return;
@@ -6036,7 +6036,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
 
        pi->caps_dynamic_ac_timing = true;
 
-       pi->uvd_power_gated = false;
+       pi->uvd_power_gated = true;
 
        /* make sure dc limits are valid */
        if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
@@ -6179,8 +6179,6 @@ static int ci_dpm_late_init(void *handle)
        if (ret)
                return ret;
 
-       ci_dpm_powergate_uvd(adev, true);
-
        return 0;
 }
 
index e71cd12..e6d7bf9 100644 (file)
@@ -847,6 +847,22 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
 }
 
+static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               7 + 4; /* cik_sdma_ring_emit_ib */
+}
+
+static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               6 + /* cik_sdma_ring_emit_hdp_flush */
+               3 + /* cik_sdma_ring_emit_hdp_invalidate */
+               6 + /* cik_sdma_ring_emit_pipeline_sync */
+               12 + /* cik_sdma_ring_emit_vm_flush */
+               9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+}
+
 static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
                                 bool enable)
 {
@@ -1220,6 +1236,8 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
        .test_ib = cik_sdma_ring_test_ib,
        .insert_nop = cik_sdma_ring_insert_nop,
        .pad_ib = cik_sdma_ring_pad_ib,
+       .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
+       .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
 };
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
index 794c5f3..f80a083 100644 (file)
@@ -44,6 +44,7 @@
 
 static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
 static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
+static void cz_dpm_fini(struct amdgpu_device *adev);
 
 static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
 {
@@ -350,6 +351,8 @@ static int cz_parse_power_table(struct amdgpu_device *adev)
 
                ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
                if (ps == NULL) {
+                       for (j = 0; j < i; j++)
+                               kfree(adev->pm.dpm.ps[j].ps_priv);
                        kfree(adev->pm.dpm.ps);
                        return -ENOMEM;
                }
@@ -409,11 +412,11 @@ static int cz_dpm_init(struct amdgpu_device *adev)
 
        ret = amdgpu_get_platform_caps(adev);
        if (ret)
-               return ret;
+               goto err;
 
        ret = amdgpu_parse_extended_power_table(adev);
        if (ret)
-               return ret;
+               goto err;
 
        pi->sram_end = SMC_RAM_END;
 
@@ -467,23 +470,26 @@ static int cz_dpm_init(struct amdgpu_device *adev)
 
        ret = cz_parse_sys_info_table(adev);
        if (ret)
-               return ret;
+               goto err;
 
        cz_patch_voltage_values(adev);
        cz_construct_boot_state(adev);
 
        ret = cz_parse_power_table(adev);
        if (ret)
-               return ret;
+               goto err;
 
        ret = cz_process_firmware_header(adev);
        if (ret)
-               return ret;
+               goto err;
 
        pi->dpm_enabled = true;
        pi->uvd_dynamic_pg = false;
 
        return 0;
+err:
+       cz_dpm_fini(adev);
+       return ret;
 }
 
 static void cz_dpm_fini(struct amdgpu_device *adev)
@@ -672,17 +678,12 @@ static void cz_reset_ap_mask(struct amdgpu_device *adev)
        struct cz_power_info *pi = cz_get_pi(adev);
 
        pi->active_process_mask = 0;
-
 }
 
 static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
                                                        void **table)
 {
-       int ret = 0;
-
-       ret = cz_smu_download_pptable(adev, table);
-
-       return ret;
+       return cz_smu_download_pptable(adev, table);
 }
 
 static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
@@ -822,9 +823,9 @@ static void cz_init_sclk_limit(struct amdgpu_device *adev)
        pi->sclk_dpm.hard_min_clk = 0;
        cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
        level = cz_get_argument(adev);
-       if (level < table->count)
+       if (level < table->count) {
                clock = table->entries[level].clk;
-       else {
+       else {
                DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
                clock = table->entries[table->count - 1].clk;
        }
@@ -850,9 +851,9 @@ static void cz_init_uvd_limit(struct amdgpu_device *adev)
        pi->uvd_dpm.hard_min_clk = 0;
        cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
        level = cz_get_argument(adev);
-       if (level < table->count)
+       if (level < table->count) {
                clock = table->entries[level].vclk;
-       else {
+       else {
                DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
                clock = table->entries[table->count - 1].vclk;
        }
@@ -878,9 +879,9 @@ static void cz_init_vce_limit(struct amdgpu_device *adev)
        pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;
        cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
        level = cz_get_argument(adev);
-       if (level < table->count)
+       if (level < table->count) {
                clock = table->entries[level].ecclk;
-       else {
+       else {
                /* future BIOS would fix this error */
                DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
                clock = table->entries[table->count - 1].ecclk;
@@ -907,9 +908,9 @@ static void cz_init_acp_limit(struct amdgpu_device *adev)
        pi->acp_dpm.hard_min_clk = 0;
        cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
        level = cz_get_argument(adev);
-       if (level < table->count)
+       if (level < table->count) {
                clock = table->entries[level].clk;
-       else {
+       else {
                DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
                clock = table->entries[table->count - 1].clk;
        }
@@ -934,7 +935,6 @@ static void cz_init_sclk_threshold(struct amdgpu_device *adev)
        struct cz_power_info *pi = cz_get_pi(adev);
 
        pi->low_sclk_interrupt_threshold = 0;
-
 }
 
 static void cz_dpm_setup_asic(struct amdgpu_device *adev)
@@ -1207,7 +1207,7 @@ static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
        int ret;
 
        if (pi->caps_sq_ramping || pi->caps_db_ramping ||
-                       pi->caps_td_ramping || pi->caps_tcp_ramping) {
+           pi->caps_td_ramping || pi->caps_tcp_ramping) {
                if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
                        ret = cz_disable_cgpg(adev);
                        if (ret) {
@@ -1281,7 +1281,7 @@ static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
        ps->force_high = false;
        ps->need_dfs_bypass = true;
        pi->video_start = new_rps->dclk || new_rps->vclk ||
-                               new_rps->evclk || new_rps->ecclk;
+                         new_rps->evclk || new_rps->ecclk;
 
        if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
                        ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
@@ -1339,7 +1339,6 @@ static int cz_dpm_enable(struct amdgpu_device *adev)
        }
 
        cz_reset_acp_boot_level(adev);
-
        cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
 
        return 0;
@@ -1669,7 +1668,6 @@ static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
        struct amdgpu_ps *ps = &pi->requested_rps;
 
        cz_update_current_ps(adev, ps);
-
 }
 
 static int cz_dpm_force_highest(struct amdgpu_device *adev)
@@ -2201,7 +2199,6 @@ static int cz_update_vce_dpm(struct amdgpu_device *adev)
        /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
        if (pi->caps_stable_power_state) {
                pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk;
-
        } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */
                /* leave it as set by user */
                /*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/
index ac7fee7..95887e4 100644 (file)
@@ -29,6 +29,8 @@
 #include "cz_smumgr.h"
 #include "smu_ucode_xfer_cz.h"
 #include "amdgpu_ucode.h"
+#include "cz_dpm.h"
+#include "vi_dpm.h"
 
 #include "smu/smu_8_0_d.h"
 #include "smu/smu_8_0_sh_mask.h"
@@ -48,7 +50,7 @@ static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
        return priv;
 }
 
-int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
+static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
 {
        int i;
        u32 content = 0, tmp;
@@ -140,7 +142,7 @@ int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
        return 0;
 }
 
-int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
                                                u32 value, u32 limit)
 {
        int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
new file mode 100644 (file)
index 0000000..d3512f3
--- /dev/null
@@ -0,0 +1,3160 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+#include "atombios_crtc.h"
+#include "atombios_encoders.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+#include "si/si_reg.h"
+#include "si/sid.h"
+
+static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
+static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+
+static const u32 crtc_offsets[6] =
+{
+       SI_CRTC0_REGISTER_OFFSET,
+       SI_CRTC1_REGISTER_OFFSET,
+       SI_CRTC2_REGISTER_OFFSET,
+       SI_CRTC3_REGISTER_OFFSET,
+       SI_CRTC4_REGISTER_OFFSET,
+       SI_CRTC5_REGISTER_OFFSET
+};
+
+static const uint32_t dig_offsets[] = {
+       SI_CRTC0_REGISTER_OFFSET,
+       SI_CRTC1_REGISTER_OFFSET,
+       SI_CRTC2_REGISTER_OFFSET,
+       SI_CRTC3_REGISTER_OFFSET,
+       SI_CRTC4_REGISTER_OFFSET,
+       SI_CRTC5_REGISTER_OFFSET,
+       (0x13830 - 0x7030) >> 2,
+};
+
+static const struct {
+       uint32_t        reg;
+       uint32_t        vblank;
+       uint32_t        vline;
+       uint32_t        hpd;
+
+} interrupt_status_offsets[6] = { {
+       .reg = DISP_INTERRUPT_STATUS,
+       .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
+}, {
+       .reg = DISP_INTERRUPT_STATUS_CONTINUE,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
+}, {
+       .reg = DISP_INTERRUPT_STATUS_CONTINUE2,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
+}, {
+       .reg = DISP_INTERRUPT_STATUS_CONTINUE3,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
+}, {
+       .reg = DISP_INTERRUPT_STATUS_CONTINUE4,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
+}, {
+       .reg = DISP_INTERRUPT_STATUS_CONTINUE5,
+       .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
+       .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
+       .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
+} };
+
+static const uint32_t hpd_int_control_offsets[6] = {
+       DC_HPD1_INT_CONTROL,
+       DC_HPD2_INT_CONTROL,
+       DC_HPD3_INT_CONTROL,
+       DC_HPD4_INT_CONTROL,
+       DC_HPD5_INT_CONTROL,
+       DC_HPD6_INT_CONTROL,
+};
+
+static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
+                                    u32 block_offset, u32 reg)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
+       return 0;
+}
+
+static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
+                                     u32 block_offset, u32 reg, u32 v)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
+}
+
+static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
+{
+       if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+               return true;
+       else
+               return false;
+}
+
+static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
+{
+       u32 pos1, pos2;
+
+       pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+       pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+       if (pos1 != pos2)
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v6_0_wait_for_vblank - vblank wait asic callback.
+ *
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+       unsigned i = 0;
+
+       if (crtc >= adev->mode_info.num_crtc)
+               return;
+
+       if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+               return;
+
+       /* depending on when we hit vblank, we may be close to active; if so,
+        * wait for another frame.
+        */
+       while (dce_v6_0_is_in_vblank(adev, crtc)) {
+               if (i++ % 100 == 0) {
+                       if (!dce_v6_0_is_counter_moving(adev, crtc))
+                               break;
+               }
+       }
+
+       while (!dce_v6_0_is_in_vblank(adev, crtc)) {
+               if (i++ % 100 == 0) {
+                       if (!dce_v6_0_is_counter_moving(adev, crtc))
+                               break;
+               }
+       }
+}
+
+static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+       if (crtc >= adev->mode_info.num_crtc)
+               return 0;
+       else
+               return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Enable pflip interrupts */
+       for (i = 0; i <= adev->mode_info.num_crtc; i++)
+               amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       /* Disable pflip interrupts */
+       for (i = 0; i <= adev->mode_info.num_crtc; i++)
+               amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
+/**
+ * dce_v6_0_page_flip - pageflip callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+static void dce_v6_0_page_flip(struct amdgpu_device *adev,
+                              int crtc_id, u64 crtc_base, bool async)
+{
+       struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+       /* flip at hsync for async, default is vsync */
+       WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
+              EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+       /* update the scanout addresses */
+       WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(crtc_base));
+       WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              (u32)crtc_base);
+
+       /* post the write */
+       RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
+}
+
+static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+                                       u32 *vbl, u32 *position)
+{
+       if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+               return -EINVAL;
+       *vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
+       *position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+       return 0;
+
+}
+
+/**
+ * dce_v6_0_hpd_sense - hpd sense callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
+                              enum amdgpu_hpd_id hpd)
+{
+       bool connected = false;
+
+       switch (hpd) {
+       case AMDGPU_HPD_1:
+               if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case AMDGPU_HPD_2:
+               if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case AMDGPU_HPD_3:
+               if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case AMDGPU_HPD_4:
+               if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case AMDGPU_HPD_5:
+               if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       case AMDGPU_HPD_6:
+               if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+                       connected = true;
+               break;
+       default:
+               break;
+       }
+
+       return connected;
+}
+
+/**
+ * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
+                                     enum amdgpu_hpd_id hpd)
+{
+       u32 tmp;
+       bool connected = dce_v6_0_hpd_sense(adev, hpd);
+
+       switch (hpd) {
+       case AMDGPU_HPD_1:
+               tmp = RREG32(DC_HPD1_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD1_INT_CONTROL, tmp);
+               break;
+       case AMDGPU_HPD_2:
+               tmp = RREG32(DC_HPD2_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD2_INT_CONTROL, tmp);
+               break;
+       case AMDGPU_HPD_3:
+               tmp = RREG32(DC_HPD3_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD3_INT_CONTROL, tmp);
+               break;
+       case AMDGPU_HPD_4:
+               tmp = RREG32(DC_HPD4_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD4_INT_CONTROL, tmp);
+               break;
+       case AMDGPU_HPD_5:
+               tmp = RREG32(DC_HPD5_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD5_INT_CONTROL, tmp);
+                       break;
+       case AMDGPU_HPD_6:
+               tmp = RREG32(DC_HPD6_INT_CONTROL);
+               if (connected)
+                       tmp &= ~DC_HPDx_INT_POLARITY;
+               else
+                       tmp |= DC_HPDx_INT_POLARITY;
+               WREG32(DC_HPD6_INT_CONTROL, tmp);
+               break;
+       default:
+               break;
+       }
+}
+
+/**
+ * dce_v6_0_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_connector *connector;
+       u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+               DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+               if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+                   connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+                       /* don't try to enable hpd on eDP or LVDS avoid breaking the
+                        * aux dp channel on imac and help (but not completely fix)
+                        * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+                        * also avoid interrupt storms during dpms.
+                        */
+                       continue;
+               }
+               switch (amdgpu_connector->hpd.hpd) {
+               case AMDGPU_HPD_1:
+                       WREG32(DC_HPD1_CONTROL, tmp);
+                       break;
+               case AMDGPU_HPD_2:
+                       WREG32(DC_HPD2_CONTROL, tmp);
+                       break;
+               case AMDGPU_HPD_3:
+                       WREG32(DC_HPD3_CONTROL, tmp);
+                       break;
+               case AMDGPU_HPD_4:
+                       WREG32(DC_HPD4_CONTROL, tmp);
+                       break;
+               case AMDGPU_HPD_5:
+                       WREG32(DC_HPD5_CONTROL, tmp);
+                       break;
+               case AMDGPU_HPD_6:
+                       WREG32(DC_HPD6_CONTROL, tmp);
+                       break;
+               default:
+                       break;
+               }
+               dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
+               amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+       }
+
+}
+
+/**
+ * dce_v6_0_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_connector *connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+               switch (amdgpu_connector->hpd.hpd) {
+               case AMDGPU_HPD_1:
+                       WREG32(DC_HPD1_CONTROL, 0);
+                       break;
+               case AMDGPU_HPD_2:
+                       WREG32(DC_HPD2_CONTROL, 0);
+                       break;
+               case AMDGPU_HPD_3:
+                       WREG32(DC_HPD3_CONTROL, 0);
+                       break;
+               case AMDGPU_HPD_4:
+                       WREG32(DC_HPD4_CONTROL, 0);
+                       break;
+               case AMDGPU_HPD_5:
+                       WREG32(DC_HPD5_CONTROL, 0);
+                       break;
+               case AMDGPU_HPD_6:
+                       WREG32(DC_HPD6_CONTROL, 0);
+                       break;
+               default:
+                       break;
+               }
+               amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+       }
+}
+
+static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+       return SI_DC_GPIO_HPD_A;
+}
+
+static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
+{
+       DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n");
+
+       return true;
+}
+
+static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
+{
+       if (crtc >= adev->mode_info.num_crtc)
+               return 0;
+       else
+               return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
+                                   struct amdgpu_mode_mc_save *save)
+{
+       u32 crtc_enabled, tmp, frame_count;
+       int i, j;
+
+       save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+       save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+       /* disable VGA render */
+       WREG32(VGA_RENDER_CONTROL, 0);
+
+       /* blank the display controllers */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+               if (crtc_enabled) {
+                       save->crtc_enabled[i] = true;
+                       tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+
+                       if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+                               dce_v6_0_vblank_wait(adev, i);
+                               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                               tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+                               WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       }
+                       /* wait for the next frame */
+                       frame_count = evergreen_get_vblank_counter(adev, i);
+                       for (j = 0; j < adev->usec_timeout; j++) {
+                               if (evergreen_get_vblank_counter(adev, i) != frame_count)
+                                       break;
+                               udelay(1);
+                       }
+
+                       /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                       tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+                       tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+                       WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+                       save->crtc_enabled[i] = false;
+                       /* ***** */
+               } else {
+                       save->crtc_enabled[i] = false;
+               }
+       }
+}
+
+static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
+                                     struct amdgpu_mode_mc_save *save)
+{
+       u32 tmp;
+       int i, j;
+
+       /* update crtc base addresses */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+                      upper_32_bits(adev->mc.vram_start));
+               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+                      upper_32_bits(adev->mc.vram_start));
+               WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+                      (u32)adev->mc.vram_start);
+               WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+                      (u32)adev->mc.vram_start);
+       }
+
+       WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
+       WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
+
+       /* unlock regs and wait for update */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               if (save->crtc_enabled[i]) {
+                       tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+                       if ((tmp & 0x7) != 3) {
+                               tmp &= ~0x7;
+                               tmp |= 0x3;
+                               WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+                       if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+                               tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+                               WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+                       }
+                       tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+                       if (tmp & 1) {
+                               tmp &= ~1;
+                               WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+                       }
+                       for (j = 0; j < adev->usec_timeout; j++) {
+                               tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+                               if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+                                       break;
+                               udelay(1);
+                       }
+               }
+       }
+
+       /* Unlock vga access */
+       WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+       mdelay(1);
+       WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+
+}
+
+static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
+                                         bool render)
+{
+       if (!render) 
+               WREG32(R_000300_VGA_RENDER_CONTROL,
+                       RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+
+}
+
+static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
+{
+
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+       int bpc = 0;
+       u32 tmp = 0;
+       enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
+
+       if (connector) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+               bpc = amdgpu_connector_get_monitor_bpc(connector);
+               dither = amdgpu_connector->dither;
+       }
+
+       /* LVDS FMT is set up by atom */
+       if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+               return;
+
+       if (bpc == 0)
+               return;
+
+
+       switch (bpc) {
+       case 6:
+               if (dither == AMDGPU_FMT_DITHER_ENABLE)
+                       /* XXX sort out optimal dither settings */
+                       tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+                               FMT_SPATIAL_DITHER_EN);
+               else
+                       tmp |= FMT_TRUNCATE_EN;
+               break;
+       case 8:
+               if (dither == AMDGPU_FMT_DITHER_ENABLE)
+                       /* XXX sort out optimal dither settings */
+                       tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+                               FMT_RGB_RANDOM_ENABLE |
+                               FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+               else
+                       tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+               break;
+       case 10:
+       default:
+               /* not needed */
+               break;
+       }
+
+       WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+/**
+ * cik_get_number_of_dram_channels - get the number of dram channels
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up the number of video ram channels (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the number of dram channels
+ */
+static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+       switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
+       case 0:
+       default:
+               return 1;
+       case 1:
+               return 2;
+       case 2:
+               return 4;
+       case 3:
+               return 8;
+       case 4:
+               return 3;
+       case 5:
+               return 6;
+       case 6:
+               return 10;
+       case 7:
+               return 12;
+       case 8:
+               return 16;
+       }
+}
+
+struct dce6_wm_params {
+       u32 dram_channels; /* number of dram channels */
+       u32 yclk;          /* bandwidth per dram data pin in kHz */
+       u32 sclk;          /* engine clock in kHz */
+       u32 disp_clk;      /* display clock in kHz */
+       u32 src_width;     /* viewport width */
+       u32 active_time;   /* active display time in ns */
+       u32 blank_time;    /* blank time in ns */
+       bool interlaced;    /* mode is interlaced */
+       fixed20_12 vsc;    /* vertical scale ratio */
+       u32 num_heads;     /* number of active crtcs */
+       u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+       u32 lb_size;       /* line buffer allocated to pipe */
+       u32 vtaps;         /* vertical scaler taps */
+};
+
+/**
+ * dce_v6_0_dram_bandwidth - get the dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the raw dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
+{
+       /* Calculate raw DRAM Bandwidth */
+       fixed20_12 dram_efficiency; /* 0.7 */
+       fixed20_12 yclk, dram_channels, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       yclk.full = dfixed_const(wm->yclk);
+       yclk.full = dfixed_div(yclk, a);
+       dram_channels.full = dfixed_const(wm->dram_channels * 4);
+       a.full = dfixed_const(10);
+       dram_efficiency.full = dfixed_const(7);
+       dram_efficiency.full = dfixed_div(dram_efficiency, a);
+       bandwidth.full = dfixed_mul(dram_channels, yclk);
+       bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dram bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth for display in MBytes/s
+ */
+static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+       /* Calculate DRAM Bandwidth and the part allocated to display. */
+       fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+       fixed20_12 yclk, dram_channels, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       yclk.full = dfixed_const(wm->yclk);
+       yclk.full = dfixed_div(yclk, a);
+       dram_channels.full = dfixed_const(wm->dram_channels * 4);
+       a.full = dfixed_const(10);
+       disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+       disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+       bandwidth.full = dfixed_mul(dram_channels, yclk);
+       bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_data_return_bandwidth - get the data return bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the data return bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the data return bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
+{
+       /* Calculate the display Data return Bandwidth */
+       fixed20_12 return_efficiency; /* 0.8 */
+       fixed20_12 sclk, bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       sclk.full = dfixed_const(wm->sclk);
+       sclk.full = dfixed_div(sclk, a);
+       a.full = dfixed_const(10);
+       return_efficiency.full = dfixed_const(8);
+       return_efficiency.full = dfixed_div(return_efficiency, a);
+       a.full = dfixed_const(32);
+       bandwidth.full = dfixed_mul(a, sclk);
+       bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dmif bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dmif bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
+{
+       /* Calculate the DMIF Request Bandwidth */
+       fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+       fixed20_12 disp_clk, bandwidth;
+       fixed20_12 a, b;
+
+       a.full = dfixed_const(1000);
+       disp_clk.full = dfixed_const(wm->disp_clk);
+       disp_clk.full = dfixed_div(disp_clk, a);
+       a.full = dfixed_const(32);
+       b.full = dfixed_mul(a, disp_clk);
+
+       a.full = dfixed_const(10);
+       disp_clk_request_efficiency.full = dfixed_const(8);
+       disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+       bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_available_bandwidth - get the min available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the min available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the min available bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
+{
+       /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+       u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
+       u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
+       u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
+
+       return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+/**
+ * dce_v6_0_average_bandwidth - get the average available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the average available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the average available bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
+{
+       /* Calculate the display mode Average Bandwidth
+        * DisplayMode should contain the source and destination dimensions,
+        * timing, etc.
+        */
+       fixed20_12 bpp;
+       fixed20_12 line_time;
+       fixed20_12 src_width;
+       fixed20_12 bandwidth;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1000);
+       line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+       line_time.full = dfixed_div(line_time, a);
+       bpp.full = dfixed_const(wm->bytes_per_pixel);
+       src_width.full = dfixed_const(wm->src_width);
+       bandwidth.full = dfixed_mul(src_width, bpp);
+       bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+       bandwidth.full = dfixed_div(bandwidth, line_time);
+
+       return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_latency_watermark - get the latency watermark
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the latency watermark (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the latency watermark in ns
+ */
+static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
+{
+       /* First calculate the latency in ns */
+       u32 mc_latency = 2000; /* 2000 ns. */
+       u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
+       u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+       u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+       u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+       u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+               (wm->num_heads * cursor_line_pair_return_time);
+       u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+       u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+       u32 tmp, dmif_size = 12288;
+       fixed20_12 a, b, c;
+
+       if (wm->num_heads == 0)
+               return 0;
+
+       a.full = dfixed_const(2);
+       b.full = dfixed_const(1);
+       if ((wm->vsc.full > a.full) ||
+           ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+           (wm->vtaps >= 5) ||
+           ((wm->vsc.full >= a.full) && wm->interlaced))
+               max_src_lines_per_dst_line = 4;
+       else
+               max_src_lines_per_dst_line = 2;
+
+       a.full = dfixed_const(available_bandwidth);
+       b.full = dfixed_const(wm->num_heads);
+       a.full = dfixed_div(a, b);
+
+       b.full = dfixed_const(mc_latency + 512);
+       c.full = dfixed_const(wm->disp_clk);
+       b.full = dfixed_div(b, c);
+
+       c.full = dfixed_const(dmif_size);
+       b.full = dfixed_div(c, b);
+
+       tmp = min(dfixed_trunc(a), dfixed_trunc(b));
+
+       b.full = dfixed_const(1000);
+       c.full = dfixed_const(wm->disp_clk);
+       b.full = dfixed_div(c, b);
+       c.full = dfixed_const(wm->bytes_per_pixel);
+       b.full = dfixed_mul(b, c);
+
+       lb_fill_bw = min(tmp, dfixed_trunc(b));
+
+       a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+       b.full = dfixed_const(1000);
+       c.full = dfixed_const(lb_fill_bw);
+       b.full = dfixed_div(c, b);
+       a.full = dfixed_div(a, b);
+       line_fill_time = dfixed_trunc(a);
+
+       if (line_fill_time < wm->active_time)
+               return latency;
+       else
+               return latency + (line_fill_time - wm->active_time);
+
+}
+
+/**
+ * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
+ * average and available dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+       if (dce_v6_0_average_bandwidth(wm) <=
+           (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
+ * average and available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * available bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
+{
+       if (dce_v6_0_average_bandwidth(wm) <=
+           (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v6_0_check_latency_hiding - check latency hiding
+ *
+ * @wm: watermark calculation data
+ *
+ * Check latency hiding (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
+{
+       u32 lb_partitions = wm->lb_size / wm->src_width;
+       u32 line_time = wm->active_time + wm->blank_time;
+       u32 latency_tolerant_lines;
+       u32 latency_hiding;
+       fixed20_12 a;
+
+       a.full = dfixed_const(1);
+       if (wm->vsc.full > a.full)
+               latency_tolerant_lines = 1;
+       else {
+               if (lb_partitions <= (wm->vtaps + 1))
+                       latency_tolerant_lines = 1;
+               else
+                       latency_tolerant_lines = 2;
+       }
+
+       latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+       if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
+               return true;
+       else
+               return false;
+}
+
+/**
+ * dce_v6_0_program_watermarks - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @lb_size: line buffer size
+ * @num_heads: number of display controllers in use
+ *
+ * Calculate and program the display watermarks for the
+ * selected display controller (CIK).
+ */
+static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
+                                       struct amdgpu_crtc *amdgpu_crtc,
+                                       u32 lb_size, u32 num_heads)
+{
+       struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
+       struct dce6_wm_params wm_low, wm_high;
+       u32 dram_channels;
+       u32 pixel_period;
+       u32 line_time = 0;
+       u32 latency_watermark_a = 0, latency_watermark_b = 0;
+       u32 priority_a_mark = 0, priority_b_mark = 0;
+       u32 priority_a_cnt = PRIORITY_OFF;
+       u32 priority_b_cnt = PRIORITY_OFF;
+       u32 tmp, arb_control3;
+       fixed20_12 a, b, c;
+
+       if (amdgpu_crtc->base.enabled && num_heads && mode) {
+               pixel_period = 1000000 / (u32)mode->clock;
+               line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+               priority_a_cnt = 0;
+               priority_b_cnt = 0;
+
+               dram_channels = si_get_number_of_dram_channels(adev);
+
+               /* watermark for high clocks */
+               if (adev->pm.dpm_enabled) {
+                       wm_high.yclk =
+                               amdgpu_dpm_get_mclk(adev, false) * 10;
+                       wm_high.sclk =
+                               amdgpu_dpm_get_sclk(adev, false) * 10;
+               } else {
+                       wm_high.yclk = adev->pm.current_mclk * 10;
+                       wm_high.sclk = adev->pm.current_sclk * 10;
+               }
+
+               wm_high.disp_clk = mode->clock;
+               wm_high.src_width = mode->crtc_hdisplay;
+               wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+               wm_high.blank_time = line_time - wm_high.active_time;
+               wm_high.interlaced = false;
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       wm_high.interlaced = true;
+               wm_high.vsc = amdgpu_crtc->vsc;
+               wm_high.vtaps = 1;
+               if (amdgpu_crtc->rmx_type != RMX_OFF)
+                       wm_high.vtaps = 2;
+               wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+               wm_high.lb_size = lb_size;
+               wm_high.dram_channels = dram_channels;
+               wm_high.num_heads = num_heads;
+
+               if (adev->pm.dpm_enabled) {
+               /* watermark for low clocks */
+                       wm_low.yclk =
+                               amdgpu_dpm_get_mclk(adev, true) * 10;
+                       wm_low.sclk =
+                               amdgpu_dpm_get_sclk(adev, true) * 10;
+               } else {
+                       wm_low.yclk = adev->pm.current_mclk * 10;
+                       wm_low.sclk = adev->pm.current_sclk * 10;
+               }
+
+               wm_low.disp_clk = mode->clock;
+               wm_low.src_width = mode->crtc_hdisplay;
+               wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+               wm_low.blank_time = line_time - wm_low.active_time;
+               wm_low.interlaced = false;
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       wm_low.interlaced = true;
+               wm_low.vsc = amdgpu_crtc->vsc;
+               wm_low.vtaps = 1;
+               if (amdgpu_crtc->rmx_type != RMX_OFF)
+                       wm_low.vtaps = 2;
+               wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+               wm_low.lb_size = lb_size;
+               wm_low.dram_channels = dram_channels;
+               wm_low.num_heads = num_heads;
+
+               /* set for high clocks */
+               latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
+               /* set for low clocks */
+               latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
+
+               /* possibly force display priority to high */
+               /* should really do this at mode validation time... */
+               if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+                   !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+                   !dce_v6_0_check_latency_hiding(&wm_high) ||
+                   (adev->mode_info.disp_priority == 2)) {
+                       DRM_DEBUG_KMS("force priority to high\n");
+                       priority_a_cnt |= PRIORITY_ALWAYS_ON;
+                       priority_b_cnt |= PRIORITY_ALWAYS_ON;
+               }
+               if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+                   !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+                   !dce_v6_0_check_latency_hiding(&wm_low) ||
+                   (adev->mode_info.disp_priority == 2)) {
+                       DRM_DEBUG_KMS("force priority to high\n");
+                       priority_a_cnt |= PRIORITY_ALWAYS_ON;
+                       priority_b_cnt |= PRIORITY_ALWAYS_ON;
+               }
+
+               a.full = dfixed_const(1000);
+               b.full = dfixed_const(mode->clock);
+               b.full = dfixed_div(b, a);
+               c.full = dfixed_const(latency_watermark_a);
+               c.full = dfixed_mul(c, b);
+               c.full = dfixed_mul(c, amdgpu_crtc->hsc);
+               c.full = dfixed_div(c, a);
+               a.full = dfixed_const(16);
+               c.full = dfixed_div(c, a);
+               priority_a_mark = dfixed_trunc(c);
+               priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+               a.full = dfixed_const(1000);
+               b.full = dfixed_const(mode->clock);
+               b.full = dfixed_div(b, a);
+               c.full = dfixed_const(latency_watermark_b);
+               c.full = dfixed_mul(c, b);
+               c.full = dfixed_mul(c, amdgpu_crtc->hsc);
+               c.full = dfixed_div(c, a);
+               a.full = dfixed_const(16);
+               c.full = dfixed_div(c, a);
+               priority_b_mark = dfixed_trunc(c);
+               priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+       }
+
+       /* select wm A */
+       arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+       tmp = arb_control3;
+       tmp &= ~LATENCY_WATERMARK_MASK(3);
+       tmp |= LATENCY_WATERMARK_MASK(1);
+       WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+       WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
+              (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+               LATENCY_HIGH_WATERMARK(line_time)));
+       /* select wm B */
+       tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+       tmp &= ~LATENCY_WATERMARK_MASK(3);
+       tmp |= LATENCY_WATERMARK_MASK(2);
+       WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+       WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
+              (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+               LATENCY_HIGH_WATERMARK(line_time)));
+       /* restore original selection */
+       WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
+
+       /* write the priority marks */
+       WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
+       WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
+
+       /* save values for DPM */
+       amdgpu_crtc->line_time = line_time;
+       amdgpu_crtc->wm_high = latency_watermark_a;
+}
+
+/* watermark setup */
+static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
+                                  struct amdgpu_crtc *amdgpu_crtc,
+                                  struct drm_display_mode *mode,
+                                  struct drm_display_mode *other_mode)
+{
+       u32 tmp, buffer_alloc, i;
+       u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
+       /*
+        * Line Buffer Setup
+        * There are 3 line buffers, each one shared by 2 display controllers.
+        * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+        * the display controllers.  The paritioning is done via one of four
+        * preset allocations specified in bits 21:20:
+        *  0 - half lb
+        *  2 - whole lb, other crtc must be disabled
+        */
+       /* this can get tricky if we have two large displays on a paired group
+        * of crtcs.  Ideally for multiple large displays we'd assign them to
+        * non-linked crtcs for maximum line buffer allocation.
+        */
+       if (amdgpu_crtc->base.enabled && mode) {
+               if (other_mode) {
+                       tmp = 0; /* 1/2 */
+                       buffer_alloc = 1;
+               } else {
+                       tmp = 2; /* whole */
+                       buffer_alloc = 2;
+               }
+       } else {
+               tmp = 0;
+               buffer_alloc = 0;
+       }
+
+       WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
+              DC_LB_MEMORY_CONFIG(tmp));
+
+       WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+              DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+                   DMIF_BUFFERS_ALLOCATED_COMPLETED)
+                       break;
+               udelay(1);
+       }
+
+       if (amdgpu_crtc->base.enabled && mode) {
+               switch (tmp) {
+               case 0:
+               default:
+                       return 4096 * 2;
+               case 2:
+                       return 8192 * 2;
+               }
+       }
+
+       /* controller not enabled, so no lb used */
+       return 0;
+}
+
+
+/**
+ *
+ * dce_v6_0_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
+{
+       struct drm_display_mode *mode0 = NULL;
+       struct drm_display_mode *mode1 = NULL;
+       u32 num_heads = 0, lb_size;
+       int i;
+
+       if (!adev->mode_info.mode_config_initialized)
+               return;
+
+       amdgpu_update_display_priority(adev);
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               if (adev->mode_info.crtcs[i]->base.enabled)
+                       num_heads++;
+       }
+       for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
+               mode0 = &adev->mode_info.crtcs[i]->base.mode;
+               mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
+               lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
+               dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
+               lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
+               dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
+       }
+}
+/*
+static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
+{
+       int i;
+       u32 offset, tmp;
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               offset = adev->mode_info.audio.pin[i].offset;
+               tmp = RREG32_AUDIO_ENDPT(offset,
+                                     AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+               if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
+                       adev->mode_info.audio.pin[i].connected = false;
+               else
+                       adev->mode_info.audio.pin[i].connected = true;
+       }
+
+}
+
+static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
+{
+       int i;
+
+       dce_v6_0_audio_get_connected_pins(adev);
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               if (adev->mode_info.audio.pin[i].connected)
+                       return &adev->mode_info.audio.pin[i];
+       }
+       DRM_ERROR("No connected audio pins found!\n");
+       return NULL;
+}
+
+static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
+{
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+       u32 offset;
+
+       if (!dig || !dig->afmt || !dig->afmt->pin)
+               return;
+
+       offset = dig->afmt->offset;
+
+       WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
+              AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+
+}
+
+static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
+                                               struct drm_display_mode *mode)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
+
+}
+*/
+static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
+                                 struct amdgpu_audio_pin *pin,
+                                 bool enable)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
+}
+
+static const u32 pin_offsets[7] =
+{
+       (0x1780 - 0x1780),
+       (0x1786 - 0x1780),
+       (0x178c - 0x1780),
+       (0x1792 - 0x1780),
+       (0x1798 - 0x1780),
+       (0x179d - 0x1780),
+       (0x17a4 - 0x1780),
+};
+
+static int dce_v6_0_audio_init(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
+{
+
+}
+
+/*
+static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+       DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
+}
+*/
+/*
+ * build a HDMI Video Info Frame
+ */
+/*
+static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
+                                              void *buffer, size_t size)
+{
+       DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+       DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
+}
+*/
+/*
+ * update the info frames with the data from the current display mode
+ */
+static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode)
+{
+       DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
+}
+
+static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
+{
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+       if (!dig || !dig->afmt)
+               return;
+
+       /* Silent, r600_hdmi_enable will raise WARN for us */
+       if (enable && dig->afmt->enabled)
+               return;
+       if (!enable && !dig->afmt->enabled)
+               return;
+
+       if (!enable && dig->afmt->pin) {
+               dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
+               dig->afmt->pin = NULL;
+       }
+
+       dig->afmt->enabled = enable;
+
+       DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
+                 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
+}
+
+static void dce_v6_0_afmt_init(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->mode_info.num_dig; i++)
+               adev->mode_info.afmt[i] = NULL;
+
+       /* DCE8 has audio blocks tied to DIG encoders */
+       for (i = 0; i < adev->mode_info.num_dig; i++) {
+               adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
+               if (adev->mode_info.afmt[i]) {
+                       adev->mode_info.afmt[i]->offset = dig_offsets[i];
+                       adev->mode_info.afmt[i]->id = i;
+               }
+       }
+}
+
+static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->mode_info.num_dig; i++) {
+               kfree(adev->mode_info.afmt[i]);
+               adev->mode_info.afmt[i] = NULL;
+       }
+}
+
+static const u32 vga_control_regs[6] =
+{
+       AVIVO_D1VGA_CONTROL,
+       AVIVO_D2VGA_CONTROL,
+       EVERGREEN_D3VGA_CONTROL,
+       EVERGREEN_D4VGA_CONTROL,
+       EVERGREEN_D5VGA_CONTROL,
+       EVERGREEN_D6VGA_CONTROL,
+};
+
+static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       u32 vga_control;
+
+       vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
+       if (enable)
+               WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
+       else
+               WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
+}
+
+static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+
+       if (enable)
+               WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
+       else
+               WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
+}
+
+static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
+                                    struct drm_framebuffer *fb,
+                                    int x, int y, int atomic)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_framebuffer *amdgpu_fb;
+       struct drm_framebuffer *target_fb;
+       struct drm_gem_object *obj;
+       struct amdgpu_bo *rbo;
+       uint64_t fb_location, tiling_flags;
+       uint32_t fb_format, fb_pitch_pixels, pipe_config;
+       u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+       u32 viewport_w, viewport_h;
+       int r;
+       bool bypass_lut = false;
+
+       /* no fb bound */
+       if (!atomic && !crtc->primary->fb) {
+               DRM_DEBUG_KMS("No FB bound\n");
+               return 0;
+       }
+
+       if (atomic) {
+               amdgpu_fb = to_amdgpu_framebuffer(fb);
+               target_fb = fb;
+       }
+       else {
+               amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+               target_fb = crtc->primary->fb;
+       }
+
+       /* If atomic, assume fb object is pinned & idle & fenced and
+        * just update base pointers
+        */
+       obj = amdgpu_fb->obj;
+       rbo = gem_to_amdgpu_bo(obj);
+       r = amdgpu_bo_reserve(rbo, false);
+       if (unlikely(r != 0))
+               return r;
+
+       if (atomic)
+               fb_location = amdgpu_bo_gpu_offset(rbo);
+       else {
+               r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+               if (unlikely(r != 0)) {
+                       amdgpu_bo_unreserve(rbo);
+                       return -EINVAL;
+               }
+       }
+
+       amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
+       amdgpu_bo_unreserve(rbo);
+
+       switch (target_fb->pixel_format) {
+       case DRM_FORMAT_C8:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+               break;
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_ARGB4444:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_ARGB1555:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_BGRA5551:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_RGB565:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+               break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_BGRA1010102:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
+       default:
+               DRM_ERROR("Unsupported screen format %s\n",
+                         drm_get_format_name(target_fb->pixel_format));
+               return -EINVAL;
+       }
+
+       if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
+               unsigned bankw, bankh, mtaspect, tile_split, num_banks;
+
+               bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+               bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+               mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+               tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+               num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+               fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+               fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+               fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
+               fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
+               fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
+               fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+       } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1)
+               fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+
+       pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+       fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
+
+       dce_v6_0_vga_enable(crtc, false);
+
+       /* Make sure surface address is updated at vertical blank rather than
+        * horizontal blank
+        */
+       WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+       WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(fb_location));
+       WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(fb_location));
+       WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+       WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+       WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+       WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
+
+       /*
+        * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+        * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+        * retain the full precision throughout the pipeline.
+        */
+       WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
+                (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
+                ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+
+       if (bypass_lut)
+               DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
+       WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
+       WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
+
+       fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+       WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
+
+       dce_v6_0_grph_enable(crtc, true);
+
+       WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
+                      target_fb->height);
+       x &= ~3;
+       y &= ~1;
+       WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
+              (x << 16) | y);
+       viewport_w = crtc->mode.hdisplay;
+       viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+
+       WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
+              (viewport_w << 16) | viewport_h);
+
+       /* set pageflip to happen anywhere in vblank interval */
+       WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
+
+       if (!atomic && fb && fb != crtc->primary->fb) {
+               amdgpu_fb = to_amdgpu_framebuffer(fb);
+               rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+               r = amdgpu_bo_reserve(rbo, false);
+               if (unlikely(r != 0))
+                       return r;
+               amdgpu_bo_unpin(rbo);
+               amdgpu_bo_unreserve(rbo);
+       }
+
+       /* Bytes per pixel may have changed */
+       dce_v6_0_bandwidth_update(adev);
+
+       return 0;
+
+}
+
+static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
+                                   struct drm_display_mode *mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
+                      EVERGREEN_INTERLEAVE_EN);
+       else
+               WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
+}
+
+static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
+{
+
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       int i;
+
+       DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
+
+       WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+              (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+               NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+       WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
+              NI_GRPH_PRESCALE_BYPASS);
+       WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
+              NI_OVL_PRESCALE_BYPASS);
+       WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+              (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+               NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+
+
+       WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+       WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
+
+       WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
+       WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
+       WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
+
+       WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
+
+       WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+       for (i = 0; i < 256; i++) {
+               WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
+                      (amdgpu_crtc->lut_r[i] << 20) |
+                      (amdgpu_crtc->lut_g[i] << 10) |
+                      (amdgpu_crtc->lut_b[i] << 0));
+       }
+
+       WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+              (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+               NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+               NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+               NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+       WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
+              (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+               NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+       WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+              (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+               NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+       WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+              (NI_OUTPUT_CSC_GRPH_MODE(0) |
+               NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+       /* XXX match this to the depth of the crtc fmt block, move to modeset? */
+       WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
+
+
+}
+
+static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+       switch (amdgpu_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+               if (dig->linkb)
+                       return 1;
+               else
+                       return 0;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+               if (dig->linkb)
+                       return 3;
+               else
+                       return 2;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+               if (dig->linkb)
+                       return 5;
+               else
+                       return 4;
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+               return 6;
+               break;
+       default:
+               DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
+               return 0;
+       }
+}
+
+/**
+ * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
+ * monitors a dedicated PPLL must be used.  If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself.  If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ *
+ */
+static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       u32 pll_in_use;
+       int pll;
+
+       if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
+               if (adev->clock.dp_extclk)
+                       /* skip PPLL programming if using ext clock */
+                       return ATOM_PPLL_INVALID;
+               else
+                       return ATOM_PPLL0;
+       } else {
+               /* use the same PPLL for all monitors with the same clock */
+               pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
+               if (pll != ATOM_PPLL_INVALID)
+                       return pll;
+       }
+
+       /*  PPLL1, and PPLL2 */
+       pll_in_use = amdgpu_pll_get_use_mask(crtc);
+       if (!(pll_in_use & (1 << ATOM_PPLL2)))
+               return ATOM_PPLL2;
+       if (!(pll_in_use & (1 << ATOM_PPLL1)))
+               return ATOM_PPLL1;
+       DRM_ERROR("unable to allocate a PPLL\n");
+       return ATOM_PPLL_INVALID;
+}
+
+static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       uint32_t cur_lock;
+
+       cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
+       if (lock)
+               cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+       else
+               cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+       WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
+}
+
+static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+
+       WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
+                  EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+                  EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+
+
+}
+
+static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+
+       WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(amdgpu_crtc->cursor_addr));
+       WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(amdgpu_crtc->cursor_addr));
+
+       WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
+                  EVERGREEN_CURSOR_EN |
+                  EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+                  EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+
+}
+
+static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
+                                      int x, int y)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct amdgpu_device *adev = crtc->dev->dev_private;
+       int xorigin = 0, yorigin = 0;
+
+       int w = amdgpu_crtc->cursor_width;
+
+       /* avivo cursor are offset into the total surface */
+       x += crtc->x;
+       y += crtc->y;
+       DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+       if (x < 0) {
+               xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+               x = 0;
+       }
+       if (y < 0) {
+               yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+               y = 0;
+       }
+
+       WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
+       WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
+
+       amdgpu_crtc->cursor_x = x;
+       amdgpu_crtc->cursor_y = y;
+       return 0;
+}
+
+static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
+                                    int x, int y)
+{
+       int ret;
+
+       dce_v6_0_lock_cursor(crtc, true);
+       ret = dce_v6_0_cursor_move_locked(crtc, x, y);
+       dce_v6_0_lock_cursor(crtc, false);
+
+       return ret;
+}
+
+static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
+                                    struct drm_file *file_priv,
+                                    uint32_t handle,
+                                    uint32_t width,
+                                    uint32_t height,
+                                    int32_t hot_x,
+                                    int32_t hot_y)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_gem_object *obj;
+       struct amdgpu_bo *aobj;
+       int ret;
+
+       if (!handle) {
+               /* turn off cursor */
+               dce_v6_0_hide_cursor(crtc);
+               obj = NULL;
+               goto unpin;
+       }
+
+       if ((width > amdgpu_crtc->max_cursor_width) ||
+           (height > amdgpu_crtc->max_cursor_height)) {
+               DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+               return -EINVAL;
+       }
+
+       obj = drm_gem_object_lookup(file_priv, handle);
+       if (!obj) {
+               DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
+               return -ENOENT;
+       }
+
+       aobj = gem_to_amdgpu_bo(obj);
+       ret = amdgpu_bo_reserve(aobj, false);
+       if (ret != 0) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       amdgpu_bo_unreserve(aobj);
+       if (ret) {
+               DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
+       amdgpu_crtc->cursor_width = width;
+       amdgpu_crtc->cursor_height = height;
+
+       dce_v6_0_lock_cursor(crtc, true);
+
+       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+           hot_y != amdgpu_crtc->cursor_hot_y) {
+               int x, y;
+
+               x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
+               y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
+
+               dce_v6_0_cursor_move_locked(crtc, x, y);
+
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
+       }
+
+       dce_v6_0_show_cursor(crtc);
+       dce_v6_0_lock_cursor(crtc, false);
+
+unpin:
+       if (amdgpu_crtc->cursor_bo) {
+               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+               ret = amdgpu_bo_reserve(aobj, false);
+               if (likely(ret == 0)) {
+                       amdgpu_bo_unpin(aobj);
+                       amdgpu_bo_unreserve(aobj);
+               }
+               drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+       }
+
+       amdgpu_crtc->cursor_bo = obj;
+       return 0;
+}
+
+static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (amdgpu_crtc->cursor_bo) {
+               dce_v6_0_lock_cursor(crtc, true);
+
+               dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
+                                           amdgpu_crtc->cursor_y);
+
+               dce_v6_0_show_cursor(crtc);
+
+               dce_v6_0_lock_cursor(crtc, false);
+       }
+}
+
+static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                  u16 *blue, uint32_t size)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       int i;
+
+       /* userspace palettes are always correct as is */
+       for (i = 0; i < size; i++) {
+               amdgpu_crtc->lut_r[i] = red[i] >> 6;
+               amdgpu_crtc->lut_g[i] = green[i] >> 6;
+               amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+       }
+       dce_v6_0_crtc_load_lut(crtc);
+
+       return 0;
+}
+
+static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       drm_crtc_cleanup(crtc);
+       kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
+       .cursor_set2 = dce_v6_0_crtc_cursor_set2,
+       .cursor_move = dce_v6_0_crtc_cursor_move,
+       .gamma_set = dce_v6_0_crtc_gamma_set,
+       .set_config = amdgpu_crtc_set_config,
+       .destroy = dce_v6_0_crtc_destroy,
+       .page_flip_target = amdgpu_crtc_page_flip_target,
+};
+
+static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       unsigned type;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               amdgpu_crtc->enabled = true;
+               amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
+               amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
+               /* Make sure VBLANK and PFLIP interrupts are still enabled */
+               type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+               amdgpu_irq_update(adev, &adev->crtc_irq, type);
+               amdgpu_irq_update(adev, &adev->pageflip_irq, type);
+               drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+               dce_v6_0_crtc_load_lut(crtc);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+               if (amdgpu_crtc->enabled)
+                       amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
+               amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
+               amdgpu_crtc->enabled = false;
+               break;
+       }
+       /* adjust pm to dpms */
+       amdgpu_pm_compute_clocks(adev);
+}
+
+static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
+{
+       /* disable crtc pair power gating before programming */
+       amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
+       amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
+       dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
+{
+       dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+       amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
+}
+
+static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
+{
+
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_atom_ss ss;
+       int i;
+
+       dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+       if (crtc->primary->fb) {
+               int r;
+               struct amdgpu_framebuffer *amdgpu_fb;
+               struct amdgpu_bo *rbo;
+
+               amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+               rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+               r = amdgpu_bo_reserve(rbo, false);
+               if (unlikely(r))
+                       DRM_ERROR("failed to reserve rbo before unpin\n");
+               else {
+                       amdgpu_bo_unpin(rbo);
+                       amdgpu_bo_unreserve(rbo);
+               }
+       }
+       /* disable the GRPH */
+       dce_v6_0_grph_enable(crtc, false);
+
+       amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               if (adev->mode_info.crtcs[i] &&
+                   adev->mode_info.crtcs[i]->enabled &&
+                   i != amdgpu_crtc->crtc_id &&
+                   amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
+                       /* one other crtc is using this pll don't turn
+                        * off the pll
+                        */
+                       goto done;
+               }
+       }
+
+       switch (amdgpu_crtc->pll_id) {
+       case ATOM_PPLL1:
+       case ATOM_PPLL2:
+               /* disable the ppll */
+               amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
+                                                0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+               break;
+       default:
+               break;
+       }
+done:
+       amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+       amdgpu_crtc->adjusted_clock = 0;
+       amdgpu_crtc->encoder = NULL;
+       amdgpu_crtc->connector = NULL;
+}
+
+static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode,
+                                 int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (!amdgpu_crtc->adjusted_clock)
+               return -EINVAL;
+
+       amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+       amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
+       dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+       amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
+       amdgpu_atombios_crtc_scaler_setup(crtc);
+       dce_v6_0_cursor_reset(crtc);
+       /* update the hw version fpr dpm */
+       amdgpu_crtc->hw_mode = *adjusted_mode;
+
+       return 0;
+}
+
+static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
+                                    const struct drm_display_mode *mode,
+                                    struct drm_display_mode *adjusted_mode)
+{
+
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct drm_encoder *encoder;
+
+       /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc == crtc) {
+                       amdgpu_crtc->encoder = encoder;
+                       amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+                       break;
+               }
+       }
+       if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+               amdgpu_crtc->encoder = NULL;
+               amdgpu_crtc->connector = NULL;
+               return false;
+       }
+       if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+               return false;
+       if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
+               return false;
+       /* pick pll */
+       amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
+       /* if we can't get a PPLL for a non-DP encoder, fail */
+       if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
+           !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
+               return false;
+
+       return true;
+}
+
+static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+                                 struct drm_framebuffer *old_fb)
+{
+       return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
+                                        struct drm_framebuffer *fb,
+                                        int x, int y, enum mode_set_atomic state)
+{
+       return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
+       .dpms = dce_v6_0_crtc_dpms,
+       .mode_fixup = dce_v6_0_crtc_mode_fixup,
+       .mode_set = dce_v6_0_crtc_mode_set,
+       .mode_set_base = dce_v6_0_crtc_set_base,
+       .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
+       .prepare = dce_v6_0_crtc_prepare,
+       .commit = dce_v6_0_crtc_commit,
+       .load_lut = dce_v6_0_crtc_load_lut,
+       .disable = dce_v6_0_crtc_disable,
+};
+
+static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
+{
+       struct amdgpu_crtc *amdgpu_crtc;
+       int i;
+
+       amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+                             (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+       if (amdgpu_crtc == NULL)
+               return -ENOMEM;
+
+       drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
+
+       drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+       amdgpu_crtc->crtc_id = index;
+       adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+       amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
+       amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
+       adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+       adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+
+       for (i = 0; i < 256; i++) {
+               amdgpu_crtc->lut_r[i] = i << 2;
+               amdgpu_crtc->lut_g[i] = i << 2;
+               amdgpu_crtc->lut_b[i] = i << 2;
+       }
+
+       amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
+
+       amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+       amdgpu_crtc->adjusted_clock = 0;
+       amdgpu_crtc->encoder = NULL;
+       amdgpu_crtc->connector = NULL;
+       drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
+
+       return 0;
+}
+
+static int dce_v6_0_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
+       adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
+
+       dce_v6_0_set_display_funcs(adev);
+       dce_v6_0_set_irq_funcs(adev);
+
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+       case CHIP_PITCAIRN:
+       case CHIP_VERDE:
+               adev->mode_info.num_crtc = 6;
+               adev->mode_info.num_hpd = 6;
+               adev->mode_info.num_dig = 6;
+               break;
+       case CHIP_OLAND:
+               adev->mode_info.num_crtc = 2;
+               adev->mode_info.num_hpd = 2;
+               adev->mode_info.num_dig = 2;
+               break;
+       default:
+               /* FIXME: not supported yet */
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int dce_v6_0_sw_init(void *handle)
+{
+       int r, i;
+       bool ret;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+               if (r)
+                       return r;
+       }
+
+       for (i = 8; i < 20; i += 2) {
+               r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+               if (r)
+                       return r;
+       }
+
+       /* HPD hotplug */
+       r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+       if (r)
+               return r;
+
+       adev->mode_info.mode_config_initialized = true;
+
+       adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+
+       adev->ddev->mode_config.async_page_flip = true;
+
+       adev->ddev->mode_config.max_width = 16384;
+       adev->ddev->mode_config.max_height = 16384;
+
+       adev->ddev->mode_config.preferred_depth = 24;
+       adev->ddev->mode_config.prefer_shadow = 1;
+
+       adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+       r = amdgpu_modeset_create_props(adev);
+       if (r)
+               return r;
+
+       adev->ddev->mode_config.max_width = 16384;
+       adev->ddev->mode_config.max_height = 16384;
+
+       /* allocate crtcs */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               r = dce_v6_0_crtc_init(adev, i);
+               if (r)
+                       return r;
+       }
+
+       ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
+       if (ret)
+               amdgpu_print_display_setup(adev->ddev);
+       else
+               return -EINVAL;
+
+       /* setup afmt */
+       dce_v6_0_afmt_init(adev);
+
+       r = dce_v6_0_audio_init(adev);
+       if (r)
+               return r;
+
+       drm_kms_helper_poll_init(adev->ddev);
+
+       return r;
+}
+
+static int dce_v6_0_sw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       kfree(adev->mode_info.bios_hardcoded_edid);
+
+       drm_kms_helper_poll_fini(adev->ddev);
+
+       dce_v6_0_audio_fini(adev);
+
+       dce_v6_0_afmt_fini(adev);
+
+       drm_mode_config_cleanup(adev->ddev);
+       adev->mode_info.mode_config_initialized = false;
+
+       return 0;
+}
+
+static int dce_v6_0_hw_init(void *handle)
+{
+       int i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       /* init dig PHYs, disp eng pll */
+       amdgpu_atombios_encoder_init_dig(adev);
+       amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+
+       /* initialize hpd */
+       dce_v6_0_hpd_init(adev);
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+       }
+
+       dce_v6_0_pageflip_interrupt_init(adev);
+
+       return 0;
+}
+
+static int dce_v6_0_hw_fini(void *handle)
+{
+       int i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       dce_v6_0_hpd_fini(adev);
+
+       for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+               dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+       }
+
+       dce_v6_0_pageflip_interrupt_fini(adev);
+
+       return 0;
+}
+
+static int dce_v6_0_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       amdgpu_atombios_scratch_regs_save(adev);
+
+       return dce_v6_0_hw_fini(handle);
+}
+
+static int dce_v6_0_resume(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int ret;
+
+       ret = dce_v6_0_hw_init(handle);
+
+       amdgpu_atombios_scratch_regs_restore(adev);
+
+       /* turn on the BL */
+       if (adev->mode_info.bl_encoder) {
+               u8 bl_level = amdgpu_display_backlight_get_level(adev,
+                                                                 adev->mode_info.bl_encoder);
+               amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
+                                                   bl_level);
+       }
+
+       return ret;
+}
+
+static bool dce_v6_0_is_idle(void *handle)
+{
+       return true;
+}
+
+static int dce_v6_0_wait_for_idle(void *handle)
+{
+       return 0;
+}
+
+static int dce_v6_0_soft_reset(void *handle)
+{
+       DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
+       return 0;
+}
+
+static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+                                                    int crtc,
+                                                    enum amdgpu_interrupt_state state)
+{
+       u32 reg_block, interrupt_mask;
+
+       if (crtc >= adev->mode_info.num_crtc) {
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       switch (crtc) {
+       case 0:
+               reg_block = SI_CRTC0_REGISTER_OFFSET;
+               break;
+       case 1:
+               reg_block = SI_CRTC1_REGISTER_OFFSET;
+               break;
+       case 2:
+               reg_block = SI_CRTC2_REGISTER_OFFSET;
+               break;
+       case 3:
+               reg_block = SI_CRTC3_REGISTER_OFFSET;
+               break;
+       case 4:
+               reg_block = SI_CRTC4_REGISTER_OFFSET;
+               break;
+       case 5:
+               reg_block = SI_CRTC5_REGISTER_OFFSET;
+               break;
+       default:
+               DRM_DEBUG("invalid crtc %d\n", crtc);
+               return;
+       }
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               interrupt_mask = RREG32(INT_MASK + reg_block);
+               interrupt_mask &= ~VBLANK_INT_MASK;
+               WREG32(INT_MASK + reg_block, interrupt_mask);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               interrupt_mask = RREG32(INT_MASK + reg_block);
+               interrupt_mask |= VBLANK_INT_MASK;
+               WREG32(INT_MASK + reg_block, interrupt_mask);
+               break;
+       default:
+               break;
+       }
+}
+
+static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
+                                                   int crtc,
+                                                   enum amdgpu_interrupt_state state)
+{
+
+}
+
+static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+                                           struct amdgpu_irq_src *src,
+                                           unsigned type,
+                                           enum amdgpu_interrupt_state state)
+{
+       u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+       switch (type) {
+       case AMDGPU_HPD_1:
+               dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
+               break;
+       case AMDGPU_HPD_2:
+               dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
+               break;
+       case AMDGPU_HPD_3:
+               dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
+               break;
+       case AMDGPU_HPD_4:
+               dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
+               break;
+       case AMDGPU_HPD_5:
+               dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
+               break;
+       case AMDGPU_HPD_6:
+               dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
+               break;
+       default:
+               DRM_DEBUG("invalid hdp %d\n", type);
+               return 0;
+       }
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+               dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+               WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+               dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+               WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+                                            struct amdgpu_irq_src *src,
+                                            unsigned type,
+                                            enum amdgpu_interrupt_state state)
+{
+       switch (type) {
+       case AMDGPU_CRTC_IRQ_VBLANK1:
+               dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK2:
+               dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK3:
+               dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK4:
+               dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK5:
+               dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VBLANK6:
+               dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE1:
+               dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE2:
+               dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE3:
+               dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE4:
+               dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE5:
+               dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
+               break;
+       case AMDGPU_CRTC_IRQ_VLINE6:
+               dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
+                            struct amdgpu_irq_src *source,
+                            struct amdgpu_iv_entry *entry)
+{
+       unsigned crtc = entry->src_id - 1;
+       uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
+       unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+
+       switch (entry->src_data) {
+       case 0: /* vblank */
+               if (disp_int & interrupt_status_offsets[crtc].vblank)
+                       WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+               else
+                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+               if (amdgpu_irq_enabled(adev, source, irq_type)) {
+                       drm_handle_vblank(adev->ddev, crtc);
+               }
+               DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+               break;
+       case 1: /* vline */
+               if (disp_int & interrupt_status_offsets[crtc].vline)
+                       WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+               else
+                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+               DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+               break;
+       default:
+               DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+               break;
+       }
+
+       return 0;
+}
+
+static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+                                                struct amdgpu_irq_src *src,
+                                                unsigned type,
+                                                enum amdgpu_interrupt_state state)
+{
+       u32 reg;
+
+       if (type >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", type);
+               return -EINVAL;
+       }
+
+       reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
+       if (state == AMDGPU_IRQ_STATE_DISABLE)
+               WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+                      reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+       else
+               WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+                      reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+
+       return 0;
+}
+
+static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
+                                struct amdgpu_irq_src *source,
+                                struct amdgpu_iv_entry *entry)
+{
+               unsigned long flags;
+       unsigned crtc_id;
+       struct amdgpu_crtc *amdgpu_crtc;
+       struct amdgpu_flip_work *works;
+
+       crtc_id = (entry->src_id - 8) >> 1;
+       amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+       if (crtc_id >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+               return -EINVAL;
+       }
+
+       if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
+           GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+               WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
+                      GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+
+       /* IRQ could occur when in initial stage */
+       if (amdgpu_crtc == NULL)
+               return 0;
+
+       spin_lock_irqsave(&adev->ddev->event_lock, flags);
+       works = amdgpu_crtc->pflip_works;
+       if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+               DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+                                               "AMDGPU_FLIP_SUBMITTED(%d)\n",
+                                               amdgpu_crtc->pflip_status,
+                                               AMDGPU_FLIP_SUBMITTED);
+               spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+               return 0;
+       }
+
+       /* page flip completed. clean up */
+       amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+       amdgpu_crtc->pflip_works = NULL;
+
+       /* wakeup usersapce */
+       if (works->event)
+               drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
+
+       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+       drm_crtc_vblank_put(&amdgpu_crtc->base);
+       schedule_work(&works->unpin_work);
+
+       return 0;
+}
+
+static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
+                           struct amdgpu_irq_src *source,
+                           struct amdgpu_iv_entry *entry)
+{
+       uint32_t disp_int, mask, int_control, tmp;
+       unsigned hpd;
+
+       if (entry->src_data > 6) {
+               DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+               return 0;
+       }
+
+       hpd = entry->src_data;
+       disp_int = RREG32(interrupt_status_offsets[hpd].reg);
+       mask = interrupt_status_offsets[hpd].hpd;
+       int_control = hpd_int_control_offsets[hpd];
+
+       if (disp_int & mask) {
+               tmp = RREG32(int_control);
+               tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+               WREG32(int_control, tmp);
+               schedule_work(&adev->hotplug_work);
+               DRM_INFO("IH: HPD%d\n", hpd + 1);
+       }
+
+       return 0;
+
+}
+
+static int dce_v6_0_set_clockgating_state(void *handle,
+                                         enum amd_clockgating_state state)
+{
+       return 0;
+}
+
+static int dce_v6_0_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state)
+{
+       return 0;
+}
+
+const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+       .name = "dce_v6_0",
+       .early_init = dce_v6_0_early_init,
+       .late_init = NULL,
+       .sw_init = dce_v6_0_sw_init,
+       .sw_fini = dce_v6_0_sw_fini,
+       .hw_init = dce_v6_0_hw_init,
+       .hw_fini = dce_v6_0_hw_fini,
+       .suspend = dce_v6_0_suspend,
+       .resume = dce_v6_0_resume,
+       .is_idle = dce_v6_0_is_idle,
+       .wait_for_idle = dce_v6_0_wait_for_idle,
+       .soft_reset = dce_v6_0_soft_reset,
+       .set_clockgating_state = dce_v6_0_set_clockgating_state,
+       .set_powergating_state = dce_v6_0_set_powergating_state,
+};
+
+static void
+dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
+                         struct drm_display_mode *mode,
+                         struct drm_display_mode *adjusted_mode)
+{
+
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+       amdgpu_encoder->pixel_clock = adjusted_mode->clock;
+
+       /* need to call this here rather than in prepare() since we need some crtc info */
+       amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+       /* set scaler clears this on some chips */
+       dce_v6_0_set_interleave(encoder->crtc, mode);
+
+       if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+               dce_v6_0_afmt_enable(encoder, true);
+               dce_v6_0_afmt_setmode(encoder, adjusted_mode);
+       }
+}
+
+static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
+{
+
+       struct amdgpu_device *adev = encoder->dev->dev_private;
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+
+       if ((amdgpu_encoder->active_device &
+            (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+           (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
+            ENCODER_OBJECT_ID_NONE)) {
+               struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+               if (dig) {
+                       dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
+                       if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
+                               dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
+               }
+       }
+
+       amdgpu_atombios_scratch_regs_lock(adev, true);
+
+       if (connector) {
+               struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+               /* select the clock/data port if it uses a router */
+               if (amdgpu_connector->router.cd_valid)
+                       amdgpu_i2c_router_select_cd_port(amdgpu_connector);
+
+               /* turn eDP panel on for mode set */
+               if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+                       amdgpu_atombios_encoder_set_edp_panel_power(connector,
+                                                            ATOM_TRANSMITTER_ACTION_POWER_ON);
+       }
+
+       /* this is needed for the pll/ss setup to work correctly in some cases */
+       amdgpu_atombios_encoder_set_crtc_source(encoder);
+       /* set up the FMT blocks */
+       dce_v6_0_program_fmt(encoder);
+}
+
+static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
+{
+
+       struct drm_device *dev = encoder->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+
+       /* need to call this here as we need the crtc set up */
+       amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+       amdgpu_atombios_scratch_regs_lock(adev, false);
+}
+
+static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
+{
+
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       struct amdgpu_encoder_atom_dig *dig;
+
+       amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+       if (amdgpu_atombios_encoder_is_digital(encoder)) {
+               if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+                       dce_v6_0_afmt_enable(encoder, false);
+               dig = amdgpu_encoder->enc_priv;
+               dig->dig_encoder = -1;
+       }
+       amdgpu_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
+                     struct drm_display_mode *mode,
+                     struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
+                                   const struct drm_display_mode *mode,
+                                   struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
+       .dpms = dce_v6_0_ext_dpms,
+       .mode_fixup = dce_v6_0_ext_mode_fixup,
+       .prepare = dce_v6_0_ext_prepare,
+       .mode_set = dce_v6_0_ext_mode_set,
+       .commit = dce_v6_0_ext_commit,
+       .disable = dce_v6_0_ext_disable,
+       /* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
+       .dpms = amdgpu_atombios_encoder_dpms,
+       .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+       .prepare = dce_v6_0_encoder_prepare,
+       .mode_set = dce_v6_0_encoder_mode_set,
+       .commit = dce_v6_0_encoder_commit,
+       .disable = dce_v6_0_encoder_disable,
+       .detect = amdgpu_atombios_encoder_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
+       .dpms = amdgpu_atombios_encoder_dpms,
+       .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+       .prepare = dce_v6_0_encoder_prepare,
+       .mode_set = dce_v6_0_encoder_mode_set,
+       .commit = dce_v6_0_encoder_commit,
+       .detect = amdgpu_atombios_encoder_dac_detect,
+};
+
+static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
+{
+       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+       if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+               amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
+       kfree(amdgpu_encoder->enc_priv);
+       drm_encoder_cleanup(encoder);
+       kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
+       .destroy = dce_v6_0_encoder_destroy,
+};
+
+static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
+                                uint32_t encoder_enum,
+                                uint32_t supported_device,
+                                u16 caps)
+{
+       struct drm_device *dev = adev->ddev;
+       struct drm_encoder *encoder;
+       struct amdgpu_encoder *amdgpu_encoder;
+
+       /* see if we already added it */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               amdgpu_encoder = to_amdgpu_encoder(encoder);
+               if (amdgpu_encoder->encoder_enum == encoder_enum) {
+                       amdgpu_encoder->devices |= supported_device;
+                       return;
+               }
+
+       }
+
+       /* add a new one */
+       amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+       if (!amdgpu_encoder)
+               return;
+
+       encoder = &amdgpu_encoder->base;
+       switch (adev->mode_info.num_crtc) {
+       case 1:
+               encoder->possible_crtcs = 0x1;
+               break;
+       case 2:
+       default:
+               encoder->possible_crtcs = 0x3;
+               break;
+       case 4:
+               encoder->possible_crtcs = 0xf;
+               break;
+       case 6:
+               encoder->possible_crtcs = 0x3f;
+               break;
+       }
+
+       amdgpu_encoder->enc_priv = NULL;
+
+       amdgpu_encoder->encoder_enum = encoder_enum;
+       amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+       amdgpu_encoder->devices = supported_device;
+       amdgpu_encoder->rmx_type = RMX_OFF;
+       amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+       amdgpu_encoder->is_ext_encoder = false;
+       amdgpu_encoder->caps = caps;
+
+       switch (amdgpu_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+               drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                DRM_MODE_ENCODER_DAC, NULL);
+               drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+               if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+                       amdgpu_encoder->rmx_type = RMX_FULL;
+                       drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_LVDS, NULL);
+                       amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
+               } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+                       drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_DAC, NULL);
+                       amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+               } else {
+                       drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_TMDS, NULL);
+                       amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+               }
+               drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
+               break;
+       case ENCODER_OBJECT_ID_SI170B:
+       case ENCODER_OBJECT_ID_CH7303:
+       case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+       case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+       case ENCODER_OBJECT_ID_TITFP513:
+       case ENCODER_OBJECT_ID_VT1623:
+       case ENCODER_OBJECT_ID_HDMI_SI1930:
+       case ENCODER_OBJECT_ID_TRAVIS:
+       case ENCODER_OBJECT_ID_NUTMEG:
+               /* these are handled by the primary encoders */
+               amdgpu_encoder->is_ext_encoder = true;
+               if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+                       drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_LVDS, NULL);
+               else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+                       drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_DAC, NULL);
+               else
+                       drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+                                        DRM_MODE_ENCODER_TMDS, NULL);
+               drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
+               break;
+       }
+}
+
+static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
+       .set_vga_render_state = &dce_v6_0_set_vga_render_state,
+       .bandwidth_update = &dce_v6_0_bandwidth_update,
+       .vblank_get_counter = &dce_v6_0_vblank_get_counter,
+       .vblank_wait = &dce_v6_0_vblank_wait,
+       .is_display_hung = &dce_v6_0_is_display_hung,
+       .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
+       .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
+       .hpd_sense = &dce_v6_0_hpd_sense,
+       .hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
+       .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
+       .page_flip = &dce_v6_0_page_flip,
+       .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
+       .add_encoder = &dce_v6_0_encoder_add,
+       .add_connector = &amdgpu_connector_add,
+       .stop_mc_access = &dce_v6_0_stop_mc_access,
+       .resume_mc_access = &dce_v6_0_resume_mc_access,
+};
+
+static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
+{
+       if (adev->mode_info.funcs == NULL)
+               adev->mode_info.funcs = &dce_v6_0_display_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
+       .set = dce_v6_0_set_crtc_interrupt_state,
+       .process = dce_v6_0_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
+       .set = dce_v6_0_set_pageflip_interrupt_state,
+       .process = dce_v6_0_pageflip_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
+       .set = dce_v6_0_set_hpd_interrupt_state,
+       .process = dce_v6_0_hpd_irq,
+};
+
+static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+       adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
+
+       adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+       adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
+
+       adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+       adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
new file mode 100644 (file)
index 0000000..6a55281
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DCE_V6_0_H__
+#define __DCE_V6_0_H__
+
+extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
+
+#endif
index 00663a7..619b604 100644 (file)
@@ -466,11 +466,7 @@ static int dce_virtual_suspend(void *handle)
 
 static int dce_virtual_resume(void *handle)
 {
-       int ret;
-
-       ret = dce_virtual_hw_init(handle);
-
-       return ret;
+       return dce_virtual_hw_init(handle);
 }
 
 static bool dce_virtual_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
new file mode 100644 (file)
index 0000000..410b29c
--- /dev/null
@@ -0,0 +1,3233 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_ucode.h"
+#include "si/clearstate_si.h"
+#include "si/sid.h"
+
+#define GFX6_NUM_GFX_RINGS     1
+#define GFX6_NUM_COMPUTE_RINGS 2
+#define STATIC_PER_CU_PG_ENABLE                    (1 << 3)
+#define DYN_PER_CU_PG_ENABLE                       (1 << 2)
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
+
+
+static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
+
+MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+MODULE_FIRMWARE("radeon/tahiti_me.bin");
+MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+
+MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+
+MODULE_FIRMWARE("radeon/verde_pfp.bin");
+MODULE_FIRMWARE("radeon/verde_me.bin");
+MODULE_FIRMWARE("radeon/verde_ce.bin");
+MODULE_FIRMWARE("radeon/verde_rlc.bin");
+
+MODULE_FIRMWARE("radeon/oland_pfp.bin");
+MODULE_FIRMWARE("radeon/oland_me.bin");
+MODULE_FIRMWARE("radeon/oland_ce.bin");
+MODULE_FIRMWARE("radeon/oland_rlc.bin");
+
+MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+MODULE_FIRMWARE("radeon/hainan_me.bin");
+MODULE_FIRMWARE("radeon/hainan_ce.bin");
+MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+
+static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
+static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
+
+
+static const u32 verde_rlc_save_restore_register_list[] =
+{
+       (0x8000 << 16) | (0x98f4 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x98f4 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0xe80 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0xe80 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x89bc >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x89bc >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x8c1c >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x8c1c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x98f0 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xe7c >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x9148 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x9148 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9150 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x897c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8d8c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xac54 >> 2),
+       0X00000000,
+       0x3,
+       (0x9c00 << 16) | (0x98f8 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9910 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9914 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9918 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x991c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9920 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9924 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9928 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x992c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9930 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9934 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9938 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x993c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9940 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9944 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9948 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x994c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9950 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9954 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9958 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x995c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9960 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9964 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9968 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x996c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9970 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9974 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9978 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x997c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9980 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9984 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9988 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x998c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8c00 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8c14 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8c04 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8c08 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x9b7c >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x9b7c >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0xe84 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0xe84 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x89c0 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x89c0 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x914c >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x914c >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x8c20 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x8c20 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x9354 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x9354 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9060 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9364 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9100 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x913c >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x90e0 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x90e4 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x90e8 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x90e0 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x90e4 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x90e8 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8bcc >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8b24 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x88c4 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8e50 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8c0c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8e58 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8e5c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9508 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x950c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9494 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xac0c >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xac10 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xac14 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xae00 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0xac08 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x88d4 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x88c8 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x88cc >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x89b0 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8b10 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x8a14 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9830 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9834 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9838 >> 2),
+       0x00000000,
+       (0x9c00 << 16) | (0x9a10 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x9870 >> 2),
+       0x00000000,
+       (0x8000 << 16) | (0x9874 >> 2),
+       0x00000000,
+       (0x8001 << 16) | (0x9870 >> 2),
+       0x00000000,
+       (0x8001 << 16) | (0x9874 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x9870 >> 2),
+       0x00000000,
+       (0x8040 << 16) | (0x9874 >> 2),
+       0x00000000,
+       (0x8041 << 16) | (0x9870 >> 2),
+       0x00000000,
+       (0x8041 << 16) | (0x9874 >> 2),
+       0x00000000,
+       0x00000000
+};
+
+static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+{
+       const char *chip_name;
+       char fw_name[30];
+       int err;
+       const struct gfx_firmware_header_v1_0 *cp_hdr;
+       const struct rlc_firmware_header_v1_0 *rlc_hdr;
+
+       DRM_DEBUG("\n");
+
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+               chip_name = "tahiti";
+               break;
+       case CHIP_PITCAIRN:
+               chip_name = "pitcairn";
+               break;
+       case CHIP_VERDE:
+               chip_name = "verde";
+               break;
+       case CHIP_OLAND:
+               chip_name = "oland";
+               break;
+       case CHIP_HAINAN:
+               chip_name = "hainan";
+               break;
+       default: BUG();
+       }
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+       err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+       if (err)
+               goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+       adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+       err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.me_fw);
+       if (err)
+               goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+       adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+       err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+       if (err)
+               goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+       adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+       err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+       rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+       adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+       adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+
+out:
+       if (err) {
+               printk(KERN_ERR
+                      "gfx6: Failed to load firmware \"%s\"\n",
+                      fw_name);
+               release_firmware(adev->gfx.pfp_fw);
+               adev->gfx.pfp_fw = NULL;
+               release_firmware(adev->gfx.me_fw);
+               adev->gfx.me_fw = NULL;
+               release_firmware(adev->gfx.ce_fw);
+               adev->gfx.ce_fw = NULL;
+               release_firmware(adev->gfx.rlc_fw);
+               adev->gfx.rlc_fw = NULL;
+       }
+       return err;
+}
+
+static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
+{
+       const u32 num_tile_mode_states = 32;
+       u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+
+       switch (adev->gfx.config.mem_row_size_in_kb) {
+       case 1:
+               split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+               break;
+       case 2:
+       default:
+               split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+               break;
+       case 4:
+               split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+               break;
+       }
+
+       if (adev->asic_type == CHIP_VERDE ||
+               adev->asic_type == CHIP_OLAND ||
+               adev->asic_type == CHIP_HAINAN) {
+               for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 1: 
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 2:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 3:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 4:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 5:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 6:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 7:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 8: 
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 9:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 10:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 11:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 12:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 13:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 14:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 15:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 16:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 17:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 21:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 22:  
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 23: 
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 24: 
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 25: 
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       default:
+                               gb_tile_moden = 0;
+                               break;
+                       }
+                       adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+       } else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
+               for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:  /* non-AA compressed depth or any compressed stencil */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 1:  /* 2xAA/4xAA compressed depth only */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 2:  /* 8xAA compressed depth only */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 8:  /* 1D and 1D Array Surfaces */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 9:  /* Displayable maps. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 10:  /* Display 8bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 11:  /* Display 16bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 12:  /* Display 32bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       case 13:  /* Thin. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 14:  /* Thin 8 bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       case 15:  /* Thin 16 bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       case 16:  /* Thin 32 bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       case 17:  /* Thin 64 bpp. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       case 21:  /* 8 bpp PRT. */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 22:  /* 16 bpp PRT */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                               break;
+                       case 23:  /* 32 bpp PRT */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 24:  /* 64 bpp PRT */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               break;
+                       case 25:  /* 128 bpp PRT */
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                               break;
+                       default:
+                               gb_tile_moden = 0;
+                               break;
+                       }
+                       adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+       } else{
+
+               DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+       }
+
+}
+
+static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+                                 u32 sh_num, u32 instance)
+{
+       u32 data;
+
+       if (instance == 0xffffffff)
+               data = INSTANCE_BROADCAST_WRITES;
+       else
+               data = INSTANCE_INDEX(instance);
+
+       if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+               data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+       else if (se_num == 0xffffffff)
+               data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+       else if (sh_num == 0xffffffff)
+               data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+       else
+               data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+       WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 gfx_v6_0_create_bitmask(u32 bit_width)
+{
+       return (u32)(((u64)1 << bit_width) - 1);
+}
+
+static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
+                                   u32 max_rb_num_per_se,
+                                   u32 sh_per_se)
+{
+       u32 data, mask;
+
+       data = RREG32(CC_RB_BACKEND_DISABLE);
+       data &= BACKEND_DISABLE_MASK;
+       data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+       data >>= BACKEND_DISABLE_SHIFT;
+
+       mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+
+       return data & mask;
+}
+
+static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
+                             u32 se_num, u32 sh_per_se,
+                             u32 max_rb_num_per_se)
+{
+       int i, j;
+       u32 data, mask;
+       u32 disabled_rbs = 0;
+       u32 enabled_rbs = 0;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (i = 0; i < se_num; i++) {
+               for (j = 0; j < sh_per_se; j++) {
+                       gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+                       data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
+                       disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+               }
+       }
+       gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       mask = 1;
+       for (i = 0; i < max_rb_num_per_se * se_num; i++) {
+               if (!(disabled_rbs & mask))
+                       enabled_rbs |= mask;
+               mask <<= 1;
+       }
+
+       adev->gfx.config.backend_enable_mask = enabled_rbs;
+       adev->gfx.config.num_rbs = hweight32(enabled_rbs);
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (i = 0; i < se_num; i++) {
+               gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
+               data = 0;
+               for (j = 0; j < sh_per_se; j++) {
+                       switch (enabled_rbs & 3) {
+                       case 1:
+                               data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+                               break;
+                       case 2:
+                               data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+                               break;
+                       case 3:
+                       default:
+                               data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+                               break;
+                       }
+                       enabled_rbs >>= 2;
+               }
+               WREG32(PA_SC_RASTER_CONFIG, data);
+       }
+       gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+}
+/*
+static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev)
+{
+}
+*/
+
+static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
+{
+       u32 data, mask;
+
+       data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+       data &= INACTIVE_CUS_MASK;
+       data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+       data >>= INACTIVE_CUS_SHIFT;
+
+       mask = gfx_v6_0_create_bitmask(cu_per_sh);
+
+       return ~data & mask;
+}
+
+
+static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
+                        u32 se_num, u32 sh_per_se,
+                        u32 cu_per_sh)
+{
+       int i, j, k;
+       u32 data, mask;
+       u32 active_cu = 0;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (i = 0; i < se_num; i++) {
+               for (j = 0; j < sh_per_se; j++) {
+                       gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+                       data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+                       active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
+
+                       mask = 1;
+                       for (k = 0; k < 16; k++) {
+                               mask <<= k;
+                               if (active_cu & mask) {
+                                       data &= ~mask;
+                                       WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+                                       break;
+                               }
+                       }
+               }
+       }
+       gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
+{
+       u32 gb_addr_config = 0;
+       u32 mc_shared_chmap, mc_arb_ramcfg;
+       u32 sx_debug_1;
+       u32 hdp_host_path_cntl;
+       u32 tmp;
+
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+               adev->gfx.config.max_shader_engines = 2;
+               adev->gfx.config.max_tile_pipes = 12;
+               adev->gfx.config.max_cu_per_sh = 8;
+               adev->gfx.config.max_sh_per_se = 2;
+               adev->gfx.config.max_backends_per_se = 4;
+               adev->gfx.config.max_texture_channel_caches = 12;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_PITCAIRN:
+               adev->gfx.config.max_shader_engines = 2;
+               adev->gfx.config.max_tile_pipes = 8;
+               adev->gfx.config.max_cu_per_sh = 5;
+               adev->gfx.config.max_sh_per_se = 2;
+               adev->gfx.config.max_backends_per_se = 4;
+               adev->gfx.config.max_texture_channel_caches = 8;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+               break;
+
+       case CHIP_VERDE:
+               adev->gfx.config.max_shader_engines = 1;
+               adev->gfx.config.max_tile_pipes = 4;
+               adev->gfx.config.max_cu_per_sh = 5;
+               adev->gfx.config.max_sh_per_se = 2;
+               adev->gfx.config.max_backends_per_se = 4;
+               adev->gfx.config.max_texture_channel_caches = 4;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_OLAND:
+               adev->gfx.config.max_shader_engines = 1;
+               adev->gfx.config.max_tile_pipes = 4;
+               adev->gfx.config.max_cu_per_sh = 6;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+               adev->gfx.config.max_texture_channel_caches = 4;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 16;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_HAINAN:
+               adev->gfx.config.max_shader_engines = 1;
+               adev->gfx.config.max_tile_pipes = 4;
+               adev->gfx.config.max_cu_per_sh = 5;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 1;
+               adev->gfx.config.max_texture_channel_caches = 2;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 16;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+       WREG32(SRBM_INT_CNTL, 1);
+       WREG32(SRBM_INT_ACK, 1);
+
+       WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+       mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+       mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+       adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
+       adev->gfx.config.mem_max_burst_length_bytes = 256;
+       tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+       adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+       if (adev->gfx.config.mem_row_size_in_kb > 4)
+               adev->gfx.config.mem_row_size_in_kb = 4;
+       adev->gfx.config.shader_engine_tile_size = 32;
+       adev->gfx.config.num_gpus = 1;
+       adev->gfx.config.multi_gpu_tile_size = 64;
+
+       gb_addr_config &= ~ROW_SIZE_MASK;
+       switch (adev->gfx.config.mem_row_size_in_kb) {
+       case 1:
+       default:
+               gb_addr_config |= ROW_SIZE(0);
+               break;
+       case 2:
+               gb_addr_config |= ROW_SIZE(1);
+               break;
+       case 4:
+               gb_addr_config |= ROW_SIZE(2);
+               break;
+       }
+       adev->gfx.config.gb_addr_config = gb_addr_config;
+
+       WREG32(GB_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMIF_ADDR_CALC, gb_addr_config);
+       WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+#if 0
+       if (adev->has_uvd) {
+               WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+               WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+               WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+       }
+#endif
+       gfx_v6_0_tiling_mode_table_init(adev);
+
+       gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
+                   adev->gfx.config.max_sh_per_se,
+                   adev->gfx.config.max_backends_per_se);
+
+       gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines,
+                    adev->gfx.config.max_sh_per_se,
+                    adev->gfx.config.max_cu_per_sh);
+
+       gfx_v6_0_get_cu_info(adev);
+
+       WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+                                    ROQ_IB2_START(0x2b)));
+       WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+       sx_debug_1 = RREG32(SX_DEBUG_1);
+       WREG32(SX_DEBUG_1, sx_debug_1);
+
+       WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+       WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_frontend) |
+                                SC_BACKEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_backend) |
+                                SC_HIZ_TILE_FIFO_SIZE(adev->gfx.config.sc_hiz_tile_fifo_size) |
+                                SC_EARLYZ_TILE_FIFO_SIZE(adev->gfx.config.sc_earlyz_tile_fifo_size)));
+
+       WREG32(VGT_NUM_INSTANCES, 1);
+       WREG32(CP_PERFMON_CNTL, 0);
+       WREG32(SQ_CONFIG, 0);
+       WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+                                         FORCE_EOV_MAX_REZ_CNT(255)));
+
+       WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+              AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+       WREG32(VGT_GS_VERTEX_REUSE, 16);
+       WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+       WREG32(CB_PERFCOUNTER0_SELECT0, 0);
+       WREG32(CB_PERFCOUNTER0_SELECT1, 0);
+       WREG32(CB_PERFCOUNTER1_SELECT0, 0);
+       WREG32(CB_PERFCOUNTER1_SELECT1, 0);
+       WREG32(CB_PERFCOUNTER2_SELECT0, 0);
+       WREG32(CB_PERFCOUNTER2_SELECT1, 0);
+       WREG32(CB_PERFCOUNTER3_SELECT0, 0);
+       WREG32(CB_PERFCOUNTER3_SELECT1, 0);
+
+       hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+       WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+       WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+       udelay(50);
+}
+
+
+static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
+{
+       int i;
+
+       adev->gfx.scratch.num_reg = 7;
+       adev->gfx.scratch.reg_base = SCRATCH_REG0;
+       for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
+               adev->gfx.scratch.free[i] = true;
+               adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
+       }
+}
+
+static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t scratch;
+       uint32_t tmp = 0;
+       unsigned i;
+       int r;
+
+       r = amdgpu_gfx_scratch_get(adev, &scratch);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+               return r;
+       }
+       WREG32(scratch, 0xCAFEDEAD);
+
+       r = amdgpu_ring_alloc(ring, 3);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
+               amdgpu_gfx_scratch_free(adev, scratch);
+               return r;
+       }
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
+       amdgpu_ring_write(ring, 0xDEADBEEF);
+       amdgpu_ring_commit(ring);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(scratch);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+                         ring->idx, scratch, tmp);
+               r = -EINVAL;
+       }
+       amdgpu_gfx_scratch_free(adev, scratch);
+       return r;
+}
+
+static void gfx_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+       /* flush hdp cache */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+                                WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0x1);
+}
+
+/**
+ * gfx_v6_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
+ *
+ * @adev: amdgpu_device pointer
+ * @ridx: amdgpu ring index
+ *
+ * Emits an hdp invalidate on the cp.
+ */
+static void gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+                                WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, HDP_DEBUG0);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0x1);
+}
+
+static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+                                    u64 seq, unsigned flags)
+{
+       bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+       bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+       /* flush read cache over gart */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       amdgpu_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+       amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+                         PACKET3_TC_ACTION_ENA |
+                         PACKET3_SH_KCACHE_ACTION_ENA |
+                         PACKET3_SH_ICACHE_ACTION_ENA);
+       amdgpu_ring_write(ring, 0xFFFFFFFF);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 10); /* poll interval */
+       /* EVENT_WRITE_EOP - flush caches, send int */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+       amdgpu_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+       amdgpu_ring_write(ring, addr & 0xfffffffc);
+       amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+                               DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
+       amdgpu_ring_write(ring, lower_32_bits(seq));
+       amdgpu_ring_write(ring, upper_32_bits(seq));
+}
+
+static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_ib *ib,
+                                 unsigned vm_id, bool ctx_switch)
+{
+       u32 header, control = 0;
+
+       /* insert SWITCH_BUFFER packet before first IB in the ring frame */
+       if (ctx_switch) {
+               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+               amdgpu_ring_write(ring, 0);
+       }
+
+       if (ib->flags & AMDGPU_IB_FLAG_CE)
+               header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+       else
+               header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+       control |= ib->length_dw | (vm_id << 24);
+
+       amdgpu_ring_write(ring, header);
+       amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+                         (2 << 0) |
+#endif
+                         (ib->gpu_addr & 0xFFFFFFFC));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+       amdgpu_ring_write(ring, control);
+}
+
+/**
+ * gfx_v6_0_ring_test_ib - basic ring IB test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Allocate an IB and execute it on the gfx ring (SI).
+ * Provides a basic gfx ring test to verify that IBs are working.
+ * Returns 0 on success, error on failure.
+ */
+static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_ib ib;
+       struct fence *f = NULL;
+       uint32_t scratch;
+       uint32_t tmp = 0;
+       long r;
+
+       r = amdgpu_gfx_scratch_get(adev, &scratch);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+               return r;
+       }
+       WREG32(scratch, 0xCAFEDEAD);
+       memset(&ib, 0, sizeof(ib));
+       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+               goto err1;
+       }
+       ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+       ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
+       ib.ptr[2] = 0xDEADBEEF;
+       ib.length_dw = 3;
+
+       r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+       if (r)
+               goto err2;
+
+       r = fence_wait_timeout(f, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out\n");
+               r = -ETIMEDOUT;
+               goto err2;
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+               goto err2;
+       }
+       tmp = RREG32(scratch);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+               r = 0;
+       } else {
+               DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
+                         scratch, tmp);
+               r = -EINVAL;
+       }
+
+err2:
+       amdgpu_ib_free(adev, &ib, NULL);
+       fence_put(f);
+err1:
+       amdgpu_gfx_scratch_free(adev, scratch);
+       return r;
+}
+
+static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+{
+       int i;
+       if (enable)
+               WREG32(CP_ME_CNTL, 0);
+       else {
+               WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
+               WREG32(SCRATCH_UMSK, 0);
+               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+                       adev->gfx.gfx_ring[i].ready = false;
+               for (i = 0; i < adev->gfx.num_compute_rings; i++)
+                       adev->gfx.compute_ring[i].ready = false;
+       }
+       udelay(50);
+}
+
+static int gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
+{
+       unsigned i;
+       const struct gfx_firmware_header_v1_0 *pfp_hdr;
+       const struct gfx_firmware_header_v1_0 *ce_hdr;
+       const struct gfx_firmware_header_v1_0 *me_hdr;
+       const __le32 *fw_data;
+       u32 fw_size;
+
+       if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
+               return -EINVAL;
+
+       gfx_v6_0_cp_gfx_enable(adev, false);
+       pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+       ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+       me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+
+       amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+       amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+       amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+
+       /* PFP */
+       fw_data = (const __le32 *)
+               (adev->gfx.pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+       fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+       WREG32(CP_PFP_UCODE_ADDR, 0);
+       for (i = 0; i < fw_size; i++)
+               WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+       WREG32(CP_PFP_UCODE_ADDR, 0);
+
+       /* CE */
+       fw_data = (const __le32 *)
+               (adev->gfx.ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+       fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+       WREG32(CP_CE_UCODE_ADDR, 0);
+       for (i = 0; i < fw_size; i++)
+               WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+       WREG32(CP_CE_UCODE_ADDR, 0);
+
+       /* ME */
+       fw_data = (const __be32 *)
+               (adev->gfx.me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+       fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+       WREG32(CP_ME_RAM_WADDR, 0);
+       for (i = 0; i < fw_size; i++)
+               WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+       WREG32(CP_ME_RAM_WADDR, 0);
+
+
+       WREG32(CP_PFP_UCODE_ADDR, 0);
+       WREG32(CP_CE_UCODE_ADDR, 0);
+       WREG32(CP_ME_RAM_WADDR, 0);
+       WREG32(CP_ME_RAM_RADDR, 0);
+       return 0;
+}
+
+static int gfx_v6_0_cp_gfx_start(struct amdgpu_device *adev)
+{
+       const struct cs_section_def *sect = NULL;
+       const struct cs_extent_def *ext = NULL;
+       struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
+       int r, i;
+
+       r = amdgpu_ring_alloc(ring, 7 + 4);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+               return r;
+       }
+       amdgpu_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+       amdgpu_ring_write(ring, 0x1);
+       amdgpu_ring_write(ring, 0x0);
+       amdgpu_ring_write(ring, adev->gfx.config.max_hw_contexts - 1);
+       amdgpu_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+       amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+       amdgpu_ring_write(ring, 0xc000);
+       amdgpu_ring_write(ring, 0xe000);
+       amdgpu_ring_commit(ring);
+
+       gfx_v6_0_cp_gfx_enable(adev, true);
+
+       r = amdgpu_ring_alloc(ring, gfx_v6_0_get_csb_size(adev) + 10);
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+               return r;
+       }
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+       for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+               for (ext = sect->section; ext->extent != NULL; ++ext) {
+                       if (sect->id == SECT_CONTEXT) {
+                               amdgpu_ring_write(ring,
+                                                 PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+                               amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+                               for (i = 0; i < ext->reg_count; i++)
+                                       amdgpu_ring_write(ring, ext->extent[i]);
+                       }
+               }
+       }
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       amdgpu_ring_write(ring, 0x00000316);
+       amdgpu_ring_write(ring, 0x0000000e);
+       amdgpu_ring_write(ring, 0x00000010);
+
+       amdgpu_ring_commit(ring);
+
+       return 0;
+}
+
+static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       u32 tmp;
+       u32 rb_bufsz;
+       int r;
+       u64 rptr_addr;
+
+       WREG32(CP_SEM_WAIT_TIMER, 0x0);
+       WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+       /* Set the write pointer delay */
+       WREG32(CP_RB_WPTR_DELAY, 0);
+
+       WREG32(CP_DEBUG, 0);
+       WREG32(SCRATCH_ADDR, 0);
+
+       /* ring 0 - compute and gfx */
+       /* Set ring buffer size */
+       ring = &adev->gfx.gfx_ring[0];
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+
+#ifdef __BIG_ENDIAN
+       tmp |= BUF_SWAP_32BIT;
+#endif
+       WREG32(CP_RB0_CNTL, tmp);
+
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+       ring->wptr = 0;
+       WREG32(CP_RB0_WPTR, ring->wptr);
+
+       /* set the wb address whether it's enabled or not */
+       rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+       WREG32(CP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
+       WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+       WREG32(SCRATCH_UMSK, 0);
+
+       mdelay(1);
+       WREG32(CP_RB0_CNTL, tmp);
+
+       WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+
+       /* start the rings */
+       gfx_v6_0_cp_gfx_start(adev);
+       ring->ready = true;
+       r = amdgpu_ring_test_ring(ring);
+       if (r) {
+               ring->ready = false;
+               return r;
+       }
+
+       return 0;
+}
+
+static u32 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+       return ring->adev->wb.wb[ring->rptr_offs];
+}
+
+static u32 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring == &adev->gfx.gfx_ring[0])
+               return RREG32(CP_RB0_WPTR);
+       else if (ring == &adev->gfx.compute_ring[0])
+               return RREG32(CP_RB1_WPTR);
+       else if (ring == &adev->gfx.compute_ring[1])
+               return RREG32(CP_RB2_WPTR);
+       else
+               BUG();
+}
+
+static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       WREG32(CP_RB0_WPTR, ring->wptr);
+       (void)RREG32(CP_RB0_WPTR);
+}
+
+static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       if (ring == &adev->gfx.compute_ring[0]) {
+               WREG32(CP_RB1_WPTR, ring->wptr);
+               (void)RREG32(CP_RB1_WPTR);
+       } else if (ring == &adev->gfx.compute_ring[1]) {
+               WREG32(CP_RB2_WPTR, ring->wptr);
+               (void)RREG32(CP_RB2_WPTR);
+       } else {
+               BUG();
+       }
+
+}
+
+static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       u32 tmp;
+       u32 rb_bufsz;
+       int r;
+       u64 rptr_addr;
+
+       /* ring1  - compute only */
+       /* Set ring buffer size */
+
+       ring = &adev->gfx.compute_ring[0];
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+       tmp |= BUF_SWAP_32BIT;
+#endif
+       WREG32(CP_RB1_CNTL, tmp);
+
+       WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+       ring->wptr = 0;
+       WREG32(CP_RB1_WPTR, ring->wptr);
+
+       rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+       WREG32(CP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+       WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+       mdelay(1);
+       WREG32(CP_RB1_CNTL, tmp);
+       WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+
+       ring = &adev->gfx.compute_ring[1];
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+       tmp |= BUF_SWAP_32BIT;
+#endif
+       WREG32(CP_RB2_CNTL, tmp);
+
+       WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+       ring->wptr = 0;
+       WREG32(CP_RB2_WPTR, ring->wptr);
+       rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+       WREG32(CP_RB2_RPTR_ADDR, lower_32_bits(rptr_addr));
+       WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+       mdelay(1);
+       WREG32(CP_RB2_CNTL, tmp);
+       WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+
+       adev->gfx.compute_ring[0].ready = true;
+       adev->gfx.compute_ring[1].ready = true;
+
+       r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[0]);
+       if (r) {
+               adev->gfx.compute_ring[0].ready = false;
+               return r;
+       }
+
+       r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[1]);
+       if (r) {
+               adev->gfx.compute_ring[1].ready = false;
+               return r;
+       }
+
+       return 0;
+}
+
+static void gfx_v6_0_cp_enable(struct amdgpu_device *adev, bool enable)
+{
+       gfx_v6_0_cp_gfx_enable(adev, enable);
+}
+
+static int gfx_v6_0_cp_load_microcode(struct amdgpu_device *adev)
+{
+       return gfx_v6_0_cp_gfx_load_microcode(adev);
+}
+
+static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
+                                              bool enable)
+{      
+       u32 tmp = RREG32(CP_INT_CNTL_RING0);
+       u32 mask;
+       int i;
+
+       if (enable)
+               tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+       else
+               tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+       WREG32(CP_INT_CNTL_RING0, tmp);
+
+       if (!enable) {
+               /* read a gfx register */
+               tmp = RREG32(DB_DEPTH_INFO);
+
+               mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
+                               break;
+                       udelay(1);
+               }
+       }
+}
+
+static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
+{
+       int r;
+
+       gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+
+       r = gfx_v6_0_cp_load_microcode(adev);
+       if (r)
+               return r;
+
+       r = gfx_v6_0_cp_gfx_resume(adev);
+       if (r)
+               return r;
+       r = gfx_v6_0_cp_compute_resume(adev);
+       if (r)
+               return r;
+
+       gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+       return 0;
+}
+
+static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+       int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+       uint32_t seq = ring->fence_drv.sync_seq;
+       uint64_t addr = ring->fence_drv.gpu_addr;
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+                                WAIT_REG_MEM_FUNCTION(3) | /* equal */
+                                WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
+       amdgpu_ring_write(ring, addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+       amdgpu_ring_write(ring, seq);
+       amdgpu_ring_write(ring, 0xffffffff);
+       amdgpu_ring_write(ring, 4); /* poll interval */
+
+       if (usepfp) {
+               /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+               amdgpu_ring_write(ring, 0);
+               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+               amdgpu_ring_write(ring, 0);
+       }
+}
+
+static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+                                       unsigned vm_id, uint64_t pd_addr)
+{
+       int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+
+       /* write new base address */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+                                WRITE_DATA_DST_SEL(0)));
+       if (vm_id < 8) {
+               amdgpu_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
+       } else {
+               amdgpu_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+       }
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, pd_addr >> 12);
+
+       /* bits 0-15 are the VM contexts0-15 */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+                                WRITE_DATA_DST_SEL(0)));
+       amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 1 << vm_id);
+
+       /* wait for the invalidate to complete */
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       amdgpu_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
+                                WAIT_REG_MEM_ENGINE(0))); /* me */
+       amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, 0); /* ref */
+       amdgpu_ring_write(ring, 0); /* mask */
+       amdgpu_ring_write(ring, 0x20); /* poll interval */
+
+       if (usepfp) {
+               /* sync PFP to ME, otherwise we might get invalid PFP reads */
+               amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+               amdgpu_ring_write(ring, 0x0);
+
+               /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+               amdgpu_ring_write(ring, 0);
+               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+               amdgpu_ring_write(ring, 0);
+       }
+}
+
+
+static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (adev->gfx.rlc.save_restore_obj) {
+               r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+               if (unlikely(r != 0))
+                       dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
+               amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+               amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
+               adev->gfx.rlc.save_restore_obj = NULL;
+       }
+
+       if (adev->gfx.rlc.clear_state_obj) {
+               r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+               if (unlikely(r != 0))
+                       dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+               amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+               amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
+               adev->gfx.rlc.clear_state_obj = NULL;
+       }
+
+       if (adev->gfx.rlc.cp_table_obj) {
+               r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+               if (unlikely(r != 0))
+                       dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+               amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+               amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+               adev->gfx.rlc.cp_table_obj = NULL;
+       }
+}
+
+static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+{
+       const u32 *src_ptr;
+       volatile u32 *dst_ptr;
+       u32 dws, i;
+       u64 reg_list_mc_addr;
+       const struct cs_section_def *cs_data;
+       int r;
+
+       adev->gfx.rlc.reg_list = verde_rlc_save_restore_register_list;
+       adev->gfx.rlc.reg_list_size =
+                       (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
+
+       adev->gfx.rlc.cs_data = si_cs_data;
+       src_ptr = adev->gfx.rlc.reg_list;
+       dws = adev->gfx.rlc.reg_list_size;
+       cs_data = adev->gfx.rlc.cs_data;
+
+       if (src_ptr) {
+               /* save restore block */
+               if (adev->gfx.rlc.save_restore_obj == NULL) {
+
+                       r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+                                            AMDGPU_GEM_DOMAIN_VRAM,
+                                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                                            NULL, NULL,
+                                            &adev->gfx.rlc.save_restore_obj);
+
+                       if (r) {
+                               dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+                               return r;
+                       }
+               }
+
+               r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+               if (unlikely(r != 0)) {
+                       gfx_v6_0_rlc_fini(adev);
+                       return r;
+               }
+               r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
+                                 &adev->gfx.rlc.save_restore_gpu_addr);
+               if (r) {
+                       amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+                       dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+                       gfx_v6_0_rlc_fini(adev);
+                       return r;
+               }
+
+               r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
+               if (r) {
+                       dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
+                       gfx_v6_0_rlc_fini(adev);
+                       return r;
+               }
+               /* write the sr buffer */
+               dst_ptr = adev->gfx.rlc.sr_ptr;
+               for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+                       dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+               amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+       }
+
+       if (cs_data) {
+               /* clear state block */
+               adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
+               dws = adev->gfx.rlc.clear_state_size + (256 / 4);
+
+               if (adev->gfx.rlc.clear_state_obj == NULL) {
+                       r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+                                            AMDGPU_GEM_DOMAIN_VRAM,
+                                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                                            NULL, NULL,
+                                            &adev->gfx.rlc.clear_state_obj);
+
+                       if (r) {
+                               dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+                               gfx_v6_0_rlc_fini(adev);
+                               return r;
+                       }
+               }
+               r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+               if (unlikely(r != 0)) {
+                       gfx_v6_0_rlc_fini(adev);
+                       return r;
+               }
+               r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
+                                 &adev->gfx.rlc.clear_state_gpu_addr);
+               if (r) {
+                       amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+                       dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+                       gfx_v6_0_rlc_fini(adev);
+                       return r;
+               }
+
+               r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
+               if (r) {
+                       dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+                       gfx_v6_0_rlc_fini(adev);
+                       return r;
+               }
+               /* set up the cs buffer */
+               dst_ptr = adev->gfx.rlc.cs_ptr;
+               reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;
+               dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
+               dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
+               dst_ptr[2] = cpu_to_le32(adev->gfx.rlc.clear_state_size);
+               gfx_v6_0_get_csb_buffer(adev, &dst_ptr[(256/4)]);
+               amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+       }
+
+       return 0;
+}
+
+static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
+{
+       u32 tmp;
+
+       tmp = RREG32(RLC_LB_CNTL);
+       if (enable)
+               tmp |= LOAD_BALANCE_ENABLE;
+       else
+               tmp &= ~LOAD_BALANCE_ENABLE;
+       WREG32(RLC_LB_CNTL, tmp);
+
+       if (!enable) {
+               gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+               WREG32(SPI_LB_CU_MASK, 0x00ff);
+       }
+
+}
+
+static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
+                       break;
+               udelay(1);
+       }
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
+                       break;
+               udelay(1);
+       }
+}
+
+static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
+{
+       u32 tmp;
+
+       tmp = RREG32(RLC_CNTL);
+       if (tmp != rlc)
+               WREG32(RLC_CNTL, rlc);
+}
+
+static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(RLC_CNTL);
+
+       if (data & RLC_ENABLE) {
+               data &= ~RLC_ENABLE;
+               WREG32(RLC_CNTL, data);
+
+               gfx_v6_0_wait_for_rlc_serdes(adev);
+       }
+
+       return orig;
+}
+
+static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev)
+{
+       WREG32(RLC_CNTL, 0);
+
+       gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+       gfx_v6_0_wait_for_rlc_serdes(adev);
+}
+
+static void gfx_v6_0_rlc_start(struct amdgpu_device *adev)
+{
+       WREG32(RLC_CNTL, RLC_ENABLE);
+
+       gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+       udelay(50);
+}
+
+static void gfx_v6_0_rlc_reset(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(GRBM_SOFT_RESET);
+
+       tmp |= SOFT_RESET_RLC;
+       WREG32(GRBM_SOFT_RESET, tmp);
+       udelay(50);
+       tmp &= ~SOFT_RESET_RLC;
+       WREG32(GRBM_SOFT_RESET, tmp);
+       udelay(50);
+}
+
+static bool gfx_v6_0_lbpw_supported(struct amdgpu_device *adev)
+{
+       u32 tmp;
+
+       /* Enable LBPW only for DDR3 */
+       tmp = RREG32(MC_SEQ_MISC0);
+       if ((tmp & 0xF0000000) == 0xB0000000)
+               return true;
+       return false;
+}
+static void gfx_v6_0_init_cg(struct amdgpu_device *adev)
+{
+}
+
+static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
+{
+       u32 i;
+       const struct rlc_firmware_header_v1_0 *hdr;
+       const __le32 *fw_data;
+       u32 fw_size;
+
+
+       if (!adev->gfx.rlc_fw)
+               return -EINVAL;
+
+       gfx_v6_0_rlc_stop(adev);
+       gfx_v6_0_rlc_reset(adev);
+       gfx_v6_0_init_pg(adev);
+       gfx_v6_0_init_cg(adev);
+
+       WREG32(RLC_RL_BASE, 0);
+       WREG32(RLC_RL_SIZE, 0);
+       WREG32(RLC_LB_CNTL, 0);
+       WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
+       WREG32(RLC_LB_CNTR_INIT, 0);
+       WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
+
+       WREG32(RLC_MC_CNTL, 0);
+       WREG32(RLC_UCODE_CNTL, 0);
+
+       hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+       fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+       fw_data = (const __le32 *)
+               (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+       amdgpu_ucode_print_rlc_hdr(&hdr->header);
+
+       for (i = 0; i < fw_size; i++) {
+               WREG32(RLC_UCODE_ADDR, i);
+               WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+       }
+       WREG32(RLC_UCODE_ADDR, 0);
+
+       gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
+       gfx_v6_0_rlc_start(adev);
+
+       return 0;
+}
+
+static void gfx_v6_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
+{
+       u32 data, orig, tmp;
+
+       orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+
+       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+               gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+               WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
+
+               tmp = gfx_v6_0_halt_rlc(adev);
+
+               WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+               WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+               WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
+
+               gfx_v6_0_wait_for_rlc_serdes(adev);
+               gfx_v6_0_update_rlc(adev, tmp);
+
+               WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
+
+               data |= CGCG_EN | CGLS_EN;
+       } else {
+               gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+
+               RREG32(CB_CGTT_SCLK_CTRL);
+               RREG32(CB_CGTT_SCLK_CTRL);
+               RREG32(CB_CGTT_SCLK_CTRL);
+               RREG32(CB_CGTT_SCLK_CTRL);
+
+               data &= ~(CGCG_EN | CGLS_EN);
+       }
+
+       if (orig != data)
+               WREG32(RLC_CGCG_CGLS_CTRL, data);
+
+}
+
+static void gfx_v6_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+{
+
+       u32 data, orig, tmp = 0;
+
+       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+               orig = data = RREG32(CGTS_SM_CTRL_REG);
+               data = 0x96940200;
+               if (orig != data)
+                       WREG32(CGTS_SM_CTRL_REG, data);
+
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+                       orig = data = RREG32(CP_MEM_SLP_CNTL);
+                       data |= CP_MEM_LS_EN;
+                       if (orig != data)
+                               WREG32(CP_MEM_SLP_CNTL, data);
+               }
+
+               orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+               data &= 0xffffffc0;
+               if (orig != data)
+                       WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+               tmp = gfx_v6_0_halt_rlc(adev);
+
+               WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+               WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+               WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
+
+               gfx_v6_0_update_rlc(adev, tmp);
+       } else {
+               orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+               data |= 0x00000003;
+               if (orig != data)
+                       WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+               data = RREG32(CP_MEM_SLP_CNTL);
+               if (data & CP_MEM_LS_EN) {
+                       data &= ~CP_MEM_LS_EN;
+                       WREG32(CP_MEM_SLP_CNTL, data);
+               }
+               orig = data = RREG32(CGTS_SM_CTRL_REG);
+               data |= LS_OVERRIDE | OVERRIDE;
+               if (orig != data)
+                       WREG32(CGTS_SM_CTRL_REG, data);
+
+               tmp = gfx_v6_0_halt_rlc(adev);
+
+               WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+               WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+               WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
+
+               gfx_v6_0_update_rlc(adev, tmp);
+       }
+}
+/*
+static void gfx_v6_0_update_cg(struct amdgpu_device *adev,
+                              bool enable)
+{
+       gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+       if (enable) {
+               gfx_v6_0_enable_mgcg(adev, true);
+               gfx_v6_0_enable_cgcg(adev, true);
+       } else {
+               gfx_v6_0_enable_cgcg(adev, false);
+               gfx_v6_0_enable_mgcg(adev, false);
+       }
+       gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+}
+*/
+static void gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
+                                               bool enable)
+{
+}
+
+static void gfx_v6_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
+                                               bool enable)
+{
+}
+
+static void gfx_v6_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(RLC_PG_CNTL);
+       if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
+               data &= ~0x8000;
+       else
+               data |= 0x8000;
+       if (orig != data)
+               WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
+{
+}
+/*
+static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev)
+{
+       const __le32 *fw_data;
+       volatile u32 *dst_ptr;
+       int me, i, max_me = 4;
+       u32 bo_offset = 0;
+       u32 table_offset, table_size;
+
+       if (adev->asic_type == CHIP_KAVERI)
+               max_me = 5;
+
+       if (adev->gfx.rlc.cp_table_ptr == NULL)
+               return;
+
+       dst_ptr = adev->gfx.rlc.cp_table_ptr;
+       for (me = 0; me < max_me; me++) {
+               if (me == 0) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.ce_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 1) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.pfp_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 2) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.me_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 3) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.mec_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.mec2_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               }
+
+               for (i = 0; i < table_size; i ++) {
+                       dst_ptr[bo_offset + i] =
+                               cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+               }
+
+               bo_offset += table_size;
+       }
+}
+*/
+static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
+                                    bool enable)
+{
+
+       u32 tmp;
+
+       if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+               tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
+               WREG32(RLC_TTOP_D, tmp);
+
+               tmp = RREG32(RLC_PG_CNTL);
+               tmp |= GFX_PG_ENABLE;
+               WREG32(RLC_PG_CNTL, tmp);
+
+               tmp = RREG32(RLC_AUTO_PG_CTRL);
+               tmp |= AUTO_PG_EN;
+               WREG32(RLC_AUTO_PG_CTRL, tmp);
+       } else {
+               tmp = RREG32(RLC_AUTO_PG_CTRL);
+               tmp &= ~AUTO_PG_EN;
+               WREG32(RLC_AUTO_PG_CTRL, tmp);
+
+               tmp = RREG32(DB_RENDER_CONTROL);
+       }
+}
+
+static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
+                                        u32 se, u32 sh)
+{
+
+       u32 mask = 0, tmp, tmp1;
+       int i;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
+       tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+       tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+       gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+
+       tmp &= 0xffff0000;
+
+       tmp |= tmp1;
+       tmp >>= 16;
+
+       for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
+               mask <<= 1;
+               mask |= 1;
+       }
+
+       return (~tmp) & mask;
+}
+
+static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
+{
+       u32 i, j, k, active_cu_number = 0;
+
+       u32 mask, counter, cu_bitmap;
+       u32 tmp = 0;
+
+       for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+               for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+                       mask = 1;
+                       cu_bitmap = 0;
+                       counter  = 0;
+                       for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+                               if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) {
+                                       if (counter < 2)
+                                               cu_bitmap |= mask;
+                                       counter++;
+                               }
+                               mask <<= 1;
+                       }
+
+                       active_cu_number += counter;
+                       tmp |= (cu_bitmap << (i * 16 + j * 8));
+               }
+       }
+
+       WREG32(RLC_PG_AO_CU_MASK, tmp);
+
+       tmp = RREG32(RLC_MAX_PG_CU);
+       tmp &= ~MAX_PU_CU_MASK;
+       tmp |= MAX_PU_CU(active_cu_number);
+       WREG32(RLC_MAX_PG_CU, tmp);
+}
+
+static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
+                                           bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(RLC_PG_CNTL);
+       if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
+               data |= STATIC_PER_CU_PG_ENABLE;
+       else
+               data &= ~STATIC_PER_CU_PG_ENABLE;
+       if (orig != data)
+               WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
+                                            bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(RLC_PG_CNTL);
+       if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
+               data |= DYN_PER_CU_PG_ENABLE;
+       else
+               data &= ~DYN_PER_CU_PG_ENABLE;
+       if (orig != data)
+               WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device *adev)
+{
+       u32 tmp;
+
+       WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+
+       tmp = RREG32(RLC_PG_CNTL);
+       tmp |= GFX_PG_SRC;
+       WREG32(RLC_PG_CNTL, tmp);
+
+       WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+
+       tmp = RREG32(RLC_AUTO_PG_CTRL);
+
+       tmp &= ~GRBM_REG_SGIT_MASK;
+       tmp |= GRBM_REG_SGIT(0x700);
+       tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
+       WREG32(RLC_AUTO_PG_CTRL, tmp);
+}
+
+static void gfx_v6_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
+{
+       gfx_v6_0_enable_gfx_cgpg(adev, enable);
+       gfx_v6_0_enable_gfx_static_mgpg(adev, enable);
+       gfx_v6_0_enable_gfx_dynamic_mgpg(adev, enable);
+}
+
+static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
+{
+       u32 count = 0;
+       const struct cs_section_def *sect = NULL;
+       const struct cs_extent_def *ext = NULL;
+
+       if (adev->gfx.rlc.cs_data == NULL)
+               return 0;
+
+       /* begin clear state */
+       count += 2;
+       /* context control state */
+       count += 3;
+
+       for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+               for (ext = sect->section; ext->extent != NULL; ++ext) {
+                       if (sect->id == SECT_CONTEXT)
+                               count += 2 + ext->reg_count;
+                       else
+                               return 0;
+               }
+       }
+       /* pa_sc_raster_config */
+       count += 3;
+       /* end clear state */
+       count += 2;
+       /* clear state */
+       count += 2;
+
+       return count;
+}
+
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
+                                   volatile u32 *buffer)
+{
+       u32 count = 0, i;
+       const struct cs_section_def *sect = NULL;
+       const struct cs_extent_def *ext = NULL;
+
+       if (adev->gfx.rlc.cs_data == NULL)
+               return;
+       if (buffer == NULL)
+               return;
+
+       buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+       buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+       buffer[count++] = cpu_to_le32(0x80000000);
+       buffer[count++] = cpu_to_le32(0x80000000);
+
+       for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+               for (ext = sect->section; ext->extent != NULL; ++ext) {
+                       if (sect->id == SECT_CONTEXT) {
+                               buffer[count++] =
+                                       cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+                               buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
+                               for (i = 0; i < ext->reg_count; i++)
+                                       buffer[count++] = cpu_to_le32(ext->extent[i]);
+                       } else {
+                               return;
+                       }
+               }
+       }
+
+       buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+       buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+       case CHIP_PITCAIRN:
+               buffer[count++] = cpu_to_le32(0x2a00126a);
+               break;
+       case CHIP_VERDE:
+               buffer[count++] = cpu_to_le32(0x0000124a);
+               break;
+       case CHIP_OLAND:
+               buffer[count++] = cpu_to_le32(0x00000082);
+               break;
+       case CHIP_HAINAN:
+               buffer[count++] = cpu_to_le32(0x00000000);
+               break;
+       default:
+               buffer[count++] = cpu_to_le32(0x00000000);
+               break;
+       }
+
+       buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+       buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+       buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+       buffer[count++] = cpu_to_le32(0);
+}
+
+static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
+{
+       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+                             AMD_PG_SUPPORT_GFX_SMG |
+                             AMD_PG_SUPPORT_GFX_DMG |
+                             AMD_PG_SUPPORT_CP |
+                             AMD_PG_SUPPORT_GDS |
+                             AMD_PG_SUPPORT_RLC_SMU_HS)) {
+               gfx_v6_0_enable_sclk_slowdown_on_pu(adev, true);
+               gfx_v6_0_enable_sclk_slowdown_on_pd(adev, true);
+               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+                       gfx_v6_0_init_gfx_cgpg(adev);
+                       gfx_v6_0_enable_cp_pg(adev, true);
+                       gfx_v6_0_enable_gds_pg(adev, true);
+               } else {
+                       WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+                       WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+
+               }
+               gfx_v6_0_init_ao_cu_mask(adev);
+               gfx_v6_0_update_gfx_pg(adev, true);
+       } else {
+
+               WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+               WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+       }
+}
+
+static void gfx_v6_0_fini_pg(struct amdgpu_device *adev)
+{
+       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+                             AMD_PG_SUPPORT_GFX_SMG |
+                             AMD_PG_SUPPORT_GFX_DMG |
+                             AMD_PG_SUPPORT_CP |
+                             AMD_PG_SUPPORT_GDS |
+                             AMD_PG_SUPPORT_RLC_SMU_HS)) {
+               gfx_v6_0_update_gfx_pg(adev, false);
+               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+                       gfx_v6_0_enable_cp_pg(adev, false);
+                       gfx_v6_0_enable_gds_pg(adev, false);
+               }
+       }
+}
+
+static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+{
+       uint64_t clock;
+
+       mutex_lock(&adev->gfx.gpu_clock_mutex);
+       WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+       clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+               ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+       mutex_unlock(&adev->gfx.gpu_clock_mutex);
+       return clock;
+}
+
+static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+       amdgpu_ring_write(ring, 0x80000000);
+       amdgpu_ring_write(ring, 0);
+}
+
+static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               6; /* gfx_v6_0_ring_emit_ib */
+}
+
+static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+       return
+               5 + /* gfx_v6_0_ring_emit_hdp_flush */
+               5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+               14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+               7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+               17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+               3; /* gfx_v6_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+       return
+               5 + /* gfx_v6_0_ring_emit_hdp_flush */
+               5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+               7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+               17 + /* gfx_v6_0_ring_emit_vm_flush */
+               14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+}
+
+static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
+       .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
+       .select_se_sh = &gfx_v6_0_select_se_sh,
+};
+
+static int gfx_v6_0_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
+       adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
+       adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
+       gfx_v6_0_set_ring_funcs(adev);
+       gfx_v6_0_set_irq_funcs(adev);
+
+       return 0;
+}
+
+static int gfx_v6_0_sw_init(void *handle)
+{
+       struct amdgpu_ring *ring;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int i, r;
+
+       r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+       if (r)
+               return r;
+
+       gfx_v6_0_scratch_init(adev);
+
+       r = gfx_v6_0_init_microcode(adev);
+       if (r) {
+               DRM_ERROR("Failed to load gfx firmware!\n");
+               return r;
+       }
+
+       r = gfx_v6_0_rlc_init(adev);
+       if (r) {
+               DRM_ERROR("Failed to init rlc BOs!\n");
+               return r;
+       }
+
+       for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+               ring = &adev->gfx.gfx_ring[i];
+               ring->ring_obj = NULL;
+               sprintf(ring->name, "gfx");
+               r = amdgpu_ring_init(adev, ring, 1024,
+                                    0x80000000, 0xf,
+                                    &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
+                                    AMDGPU_RING_TYPE_GFX);
+               if (r)
+                       return r;
+       }
+
+       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+               unsigned irq_type;
+
+               if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
+                       DRM_ERROR("Too many (%d) compute rings!\n", i);
+                       break;
+               }
+               ring = &adev->gfx.compute_ring[i];
+               ring->ring_obj = NULL;
+               ring->use_doorbell = false;
+               ring->doorbell_index = 0;
+               ring->me = 1;
+               ring->pipe = i;
+               ring->queue = i;
+               sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+               irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
+               r = amdgpu_ring_init(adev, ring, 1024,
+                                    0x80000000, 0xf,
+                                    &adev->gfx.eop_irq, irq_type,
+                                    AMDGPU_RING_TYPE_COMPUTE);
+               if (r)
+                       return r;
+       }
+
+       return r;
+}
+
+static int gfx_v6_0_sw_fini(void *handle)
+{
+       int i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
+       amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
+       amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+
+       for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+               amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
+       for (i = 0; i < adev->gfx.num_compute_rings; i++)
+               amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+       gfx_v6_0_rlc_fini(adev);
+
+       return 0;
+}
+
+static int gfx_v6_0_hw_init(void *handle)
+{
+       int r;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       gfx_v6_0_gpu_init(adev);
+
+       r = gfx_v6_0_rlc_resume(adev);
+       if (r)
+               return r;
+
+       r = gfx_v6_0_cp_resume(adev);
+       if (r)
+               return r;
+
+       adev->gfx.ce_ram_size = 0x8000;
+
+       return r;
+}
+
+static int gfx_v6_0_hw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       gfx_v6_0_cp_enable(adev, false);
+       gfx_v6_0_rlc_stop(adev);
+       gfx_v6_0_fini_pg(adev);
+
+       return 0;
+}
+
+static int gfx_v6_0_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return gfx_v6_0_hw_fini(adev);
+}
+
+static int gfx_v6_0_resume(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return gfx_v6_0_hw_init(adev);
+}
+
+static bool gfx_v6_0_is_idle(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (RREG32(GRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
+               return false;
+       else
+               return true;
+}
+
+static int gfx_v6_0_wait_for_idle(void *handle)
+{
+       unsigned i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (gfx_v6_0_is_idle(handle))
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+}
+
+static int gfx_v6_0_soft_reset(void *handle)
+{
+       return 0;
+}
+
+static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+                                                enum amdgpu_interrupt_state state)
+{
+       u32 cp_int_cntl;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+               cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+               WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+               cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+               WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       default:
+               break;
+       }
+}
+
+static void gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
+                                                    int ring,
+                                                    enum amdgpu_interrupt_state state)
+{
+       u32 cp_int_cntl;
+       switch (state){
+       case AMDGPU_IRQ_STATE_DISABLE:
+               if (ring == 0) {
+                       cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
+                       cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+                       WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+                       break;
+               } else {
+                       cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
+                       cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+                       WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+                       break;
+
+               }
+       case AMDGPU_IRQ_STATE_ENABLE:
+               if (ring == 0) {
+                       cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
+                       cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+                       WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+                       break;
+               } else {
+                       cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
+                       cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+                       WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+                       break;
+
+               }
+
+       default:
+               BUG();
+               break;
+
+       }
+}
+
+static int gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
+                                            struct amdgpu_irq_src *src,
+                                            unsigned type,
+                                            enum amdgpu_interrupt_state state)
+{
+       u32 cp_int_cntl;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+               cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+               WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+               cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+               WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
+                                             struct amdgpu_irq_src *src,
+                                             unsigned type,
+                                             enum amdgpu_interrupt_state state)
+{
+       u32 cp_int_cntl;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+               cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+               WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+               cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+               WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int gfx_v6_0_set_eop_interrupt_state(struct amdgpu_device *adev,
+                                           struct amdgpu_irq_src *src,
+                                           unsigned type,
+                                           enum amdgpu_interrupt_state state)
+{
+       switch (type) {
+       case AMDGPU_CP_IRQ_GFX_EOP:
+               gfx_v6_0_set_gfx_eop_interrupt_state(adev, state);
+               break;
+       case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
+               gfx_v6_0_set_compute_eop_interrupt_state(adev, 0, state);
+               break;
+       case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
+               gfx_v6_0_set_compute_eop_interrupt_state(adev, 1, state);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
+                           struct amdgpu_irq_src *source,
+                           struct amdgpu_iv_entry *entry)
+{
+       switch (entry->ring_id) {
+       case 0:
+               amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
+               break;
+       case 1:
+       case 2:
+               amdgpu_fence_process(&adev->gfx.compute_ring[entry->ring_id -1]);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
+                                struct amdgpu_irq_src *source,
+                                struct amdgpu_iv_entry *entry)
+{
+       DRM_ERROR("Illegal register access in command stream\n");
+       schedule_work(&adev->reset_work);
+       return 0;
+}
+
+static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
+                                 struct amdgpu_irq_src *source,
+                                 struct amdgpu_iv_entry *entry)
+{
+       DRM_ERROR("Illegal instruction in command stream\n");
+       schedule_work(&adev->reset_work);
+       return 0;
+}
+
+static int gfx_v6_0_set_clockgating_state(void *handle,
+                                         enum amd_clockgating_state state)
+{
+       bool gate = false;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (state == AMD_CG_STATE_GATE)
+               gate = true;
+
+       gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+       if (gate) {
+               gfx_v6_0_enable_mgcg(adev, true);
+               gfx_v6_0_enable_cgcg(adev, true);
+       } else {
+               gfx_v6_0_enable_cgcg(adev, false);
+               gfx_v6_0_enable_mgcg(adev, false);
+       }
+       gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+       return 0;
+}
+
+static int gfx_v6_0_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state)
+{
+       bool gate = false;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (state == AMD_PG_STATE_GATE)
+               gate = true;
+
+       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+                             AMD_PG_SUPPORT_GFX_SMG |
+                             AMD_PG_SUPPORT_GFX_DMG |
+                             AMD_PG_SUPPORT_CP |
+                             AMD_PG_SUPPORT_GDS |
+                             AMD_PG_SUPPORT_RLC_SMU_HS)) {
+               gfx_v6_0_update_gfx_pg(adev, gate);
+               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+                       gfx_v6_0_enable_cp_pg(adev, gate);
+                       gfx_v6_0_enable_gds_pg(adev, gate);
+               }
+       }
+
+       return 0;
+}
+
+const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+       .name = "gfx_v6_0",
+       .early_init = gfx_v6_0_early_init,
+       .late_init = NULL,
+       .sw_init = gfx_v6_0_sw_init,
+       .sw_fini = gfx_v6_0_sw_fini,
+       .hw_init = gfx_v6_0_hw_init,
+       .hw_fini = gfx_v6_0_hw_fini,
+       .suspend = gfx_v6_0_suspend,
+       .resume = gfx_v6_0_resume,
+       .is_idle = gfx_v6_0_is_idle,
+       .wait_for_idle = gfx_v6_0_wait_for_idle,
+       .soft_reset = gfx_v6_0_soft_reset,
+       .set_clockgating_state = gfx_v6_0_set_clockgating_state,
+       .set_powergating_state = gfx_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+       .get_rptr = gfx_v6_0_ring_get_rptr,
+       .get_wptr = gfx_v6_0_ring_get_wptr,
+       .set_wptr = gfx_v6_0_ring_set_wptr_gfx,
+       .parse_cs = NULL,
+       .emit_ib = gfx_v6_0_ring_emit_ib,
+       .emit_fence = gfx_v6_0_ring_emit_fence,
+       .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
+       .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
+       .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
+       .test_ring = gfx_v6_0_ring_test_ring,
+       .test_ib = gfx_v6_0_ring_test_ib,
+       .insert_nop = amdgpu_ring_insert_nop,
+       .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
+       .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
+};
+
+static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+       .get_rptr = gfx_v6_0_ring_get_rptr,
+       .get_wptr = gfx_v6_0_ring_get_wptr,
+       .set_wptr = gfx_v6_0_ring_set_wptr_compute,
+       .parse_cs = NULL,
+       .emit_ib = gfx_v6_0_ring_emit_ib,
+       .emit_fence = gfx_v6_0_ring_emit_fence,
+       .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
+       .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
+       .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
+       .test_ring = gfx_v6_0_ring_test_ring,
+       .test_ib = gfx_v6_0_ring_test_ib,
+       .insert_nop = amdgpu_ring_insert_nop,
+       .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
+};
+
+static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+               adev->gfx.gfx_ring[i].funcs = &gfx_v6_0_ring_funcs_gfx;
+       for (i = 0; i < adev->gfx.num_compute_rings; i++)
+               adev->gfx.compute_ring[i].funcs = &gfx_v6_0_ring_funcs_compute;
+}
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_eop_irq_funcs = {
+       .set = gfx_v6_0_set_eop_interrupt_state,
+       .process = gfx_v6_0_eop_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_reg_irq_funcs = {
+       .set = gfx_v6_0_set_priv_reg_fault_state,
+       .process = gfx_v6_0_priv_reg_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_inst_irq_funcs = {
+       .set = gfx_v6_0_set_priv_inst_fault_state,
+       .process = gfx_v6_0_priv_inst_irq,
+};
+
+static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
+       adev->gfx.eop_irq.funcs = &gfx_v6_0_eop_irq_funcs;
+
+       adev->gfx.priv_reg_irq.num_types = 1;
+       adev->gfx.priv_reg_irq.funcs = &gfx_v6_0_priv_reg_irq_funcs;
+
+       adev->gfx.priv_inst_irq.num_types = 1;
+       adev->gfx.priv_inst_irq.funcs = &gfx_v6_0_priv_inst_irq_funcs;
+}
+
+static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
+{
+       int i, j, k, counter, active_cu_number = 0;
+       u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+       struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+
+       memset(cu_info, 0, sizeof(*cu_info));
+
+       for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+               for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+                       mask = 1;
+                       ao_bitmap = 0;
+                       counter = 0;
+                       bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j);
+                       cu_info->bitmap[i][j] = bitmap;
+
+                       for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+                               if (bitmap & mask) {
+                                       if (counter < 2)
+                                               ao_bitmap |= mask;
+                                       counter ++;
+                               }
+                               mask <<= 1;
+                       }
+                       active_cu_number += counter;
+                       ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+               }
+       }
+
+       cu_info->number = active_cu_number;
+       cu_info->ao_cu_mask = ao_cu_mask;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
new file mode 100644 (file)
index 0000000..b9657e7
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V6_0_H__
+#define __GFX_V6_0_H__
+
+extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
+
+#endif
index f4fbec3..90102f1 100644 (file)
@@ -2096,6 +2096,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, control);
 }
 
+static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+       uint32_t dw2 = 0;
+
+       dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
+       if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+               /* set load_global_config & load_global_uconfig */
+               dw2 |= 0x8001;
+               /* set load_cs_sh_regs */
+               dw2 |= 0x01000000;
+               /* set load_per_context_state & load_gfx_sh_regs */
+               dw2 |= 0x10002;
+       }
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+       amdgpu_ring_write(ring, dw2);
+       amdgpu_ring_write(ring, 0);
+}
+
 /**
  * gfx_v7_0_ring_test_ib - basic ring IB test
  *
@@ -2443,7 +2462,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
        return 0;
 }
 
-static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+static u32 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
        return ring->adev->wb.wb[ring->rptr_offs];
 }
@@ -2463,11 +2482,6 @@ static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
        (void)RREG32(mmCP_RB0_WPTR);
 }
 
-static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
-{
-       return ring->adev->wb.wb[ring->rptr_offs];
-}
-
 static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
 {
        /* XXX check if swapping is necessary on BE */
@@ -4176,6 +4190,41 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
 }
 
+static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+{
+       return
+               4; /* gfx_v7_0_ring_emit_ib_gfx */
+}
+
+static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+       return
+               20 + /* gfx_v7_0_ring_emit_gds_switch */
+               7 + /* gfx_v7_0_ring_emit_hdp_flush */
+               5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+               12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+               7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+               17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+               3; /* gfx_v7_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+{
+       return
+               4; /* gfx_v7_0_ring_emit_ib_compute */
+}
+
+static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+       return
+               20 + /* gfx_v7_0_ring_emit_gds_switch */
+               7 + /* gfx_v7_0_ring_emit_hdp_flush */
+               5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+               7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+               17 + /* gfx_v7_0_ring_emit_vm_flush */
+               7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+}
+
 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
        .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
        .select_se_sh = &gfx_v7_0_select_se_sh,
@@ -4495,9 +4544,9 @@ static int gfx_v7_0_sw_fini(void *handle)
        int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
-       amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
-       amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+       amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -4928,7 +4977,7 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
-       .get_rptr = gfx_v7_0_ring_get_rptr_gfx,
+       .get_rptr = gfx_v7_0_ring_get_rptr,
        .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
        .parse_cs = NULL,
@@ -4943,10 +4992,13 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
        .test_ib = gfx_v7_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
+       .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
+       .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
-       .get_rptr = gfx_v7_0_ring_get_rptr_compute,
+       .get_rptr = gfx_v7_0_ring_get_rptr,
        .get_wptr = gfx_v7_0_ring_get_wptr_compute,
        .set_wptr = gfx_v7_0_ring_set_wptr_compute,
        .parse_cs = NULL,
@@ -4961,6 +5013,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
        .test_ib = gfx_v7_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
+       .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
index c6a63c2..47e270a 100644 (file)
@@ -2113,9 +2113,9 @@ static int gfx_v8_0_sw_fini(void *handle)
        int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
-       amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
-       amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+       amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -3866,7 +3866,7 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
        }
 }
 
-void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
 {
        WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
 
@@ -5835,7 +5835,7 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
        return 0;
 }
 
-static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+static u32 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
        return ring->adev->wb.wb[ring->rptr_offs];
 }
@@ -5915,12 +5915,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 {
        u32 header, control = 0;
 
-       /* insert SWITCH_BUFFER packet before first IB in the ring frame */
-       if (ctx_switch) {
-               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-               amdgpu_ring_write(ring, 0);
-       }
-
        if (ib->flags & AMDGPU_IB_FLAG_CE)
                header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
        else
@@ -5990,14 +5984,6 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, seq);
        amdgpu_ring_write(ring, 0xffffffff);
        amdgpu_ring_write(ring, 4); /* poll interval */
-
-       if (usepfp) {
-               /* synce CE with ME to prevent CE fetch CEIB before context switch done */
-               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-               amdgpu_ring_write(ring, 0);
-               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-               amdgpu_ring_write(ring, 0);
-       }
 }
 
 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
@@ -6005,6 +5991,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 {
        int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
 
+       /* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
+       if (usepfp)
+               amdgpu_ring_insert_nop(ring, 128);
+
        amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
        amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
                                 WRITE_DATA_DST_SEL(0)) |
@@ -6044,18 +6034,11 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
                /* sync PFP to ME, otherwise we might get invalid PFP reads */
                amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
                amdgpu_ring_write(ring, 0x0);
-               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-               amdgpu_ring_write(ring, 0);
-               amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
-               amdgpu_ring_write(ring, 0);
+               /* GFX8 emits 128 dw nop to prevent CE access VM before vm_flush finish */
+               amdgpu_ring_insert_nop(ring, 128);
        }
 }
 
-static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
-{
-       return ring->adev->wb.wb[ring->rptr_offs];
-}
-
 static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
 {
        return ring->adev->wb.wb[ring->wptr_offs];
@@ -6091,6 +6074,77 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, upper_32_bits(seq));
 }
 
+static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+       amdgpu_ring_write(ring, 0);
+}
+
+static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+       uint32_t dw2 = 0;
+
+       dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
+       if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+               /* set load_global_config & load_global_uconfig */
+               dw2 |= 0x8001;
+               /* set load_cs_sh_regs */
+               dw2 |= 0x01000000;
+               /* set load_per_context_state & load_gfx_sh_regs for GFX */
+               dw2 |= 0x10002;
+
+               /* set load_ce_ram if preamble presented */
+               if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
+                       dw2 |= 0x10000000;
+       } else {
+               /* still load_ce_ram if this is the first time preamble presented
+                * although there is no context switch happens.
+                */
+               if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
+                       dw2 |= 0x10000000;
+       }
+
+       amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+       amdgpu_ring_write(ring, dw2);
+       amdgpu_ring_write(ring, 0);
+}
+
+static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+{
+       return
+               4; /* gfx_v8_0_ring_emit_ib_gfx */
+}
+
+static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+       return
+               20 + /* gfx_v8_0_ring_emit_gds_switch */
+               7 + /* gfx_v8_0_ring_emit_hdp_flush */
+               5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+               6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+               7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+               256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+               2 + /* gfx_v8_ring_emit_sb */
+               3; /* gfx_v8_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+{
+       return
+               4; /* gfx_v8_0_ring_emit_ib_compute */
+}
+
+static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+       return
+               20 + /* gfx_v8_0_ring_emit_gds_switch */
+               7 + /* gfx_v8_0_ring_emit_hdp_flush */
+               5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+               7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+               17 + /* gfx_v8_0_ring_emit_vm_flush */
+               7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+}
+
 static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
                                                 enum amdgpu_interrupt_state state)
 {
@@ -6257,7 +6311,7 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
-       .get_rptr = gfx_v8_0_ring_get_rptr_gfx,
+       .get_rptr = gfx_v8_0_ring_get_rptr,
        .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
        .parse_cs = NULL,
@@ -6272,10 +6326,14 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
        .test_ib = gfx_v8_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .emit_switch_buffer = gfx_v8_ring_emit_sb,
+       .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
+       .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
+       .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
-       .get_rptr = gfx_v8_0_ring_get_rptr_compute,
+       .get_rptr = gfx_v8_0_ring_get_rptr,
        .get_wptr = gfx_v8_0_ring_get_wptr_compute,
        .set_wptr = gfx_v8_0_ring_set_wptr_compute,
        .parse_cs = NULL,
@@ -6290,6 +6348,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
        .test_ib = gfx_v8_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
+       .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
+       .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
 };
 
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
new file mode 100644 (file)
index 0000000..b13c8aa
--- /dev/null
@@ -0,0 +1,1071 @@
+
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "gmc_v6_0.h"
+#include "amdgpu_ucode.h"
+#include "si/sid.h"
+
+static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v6_0_wait_for_idle(void *handle);
+
+MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+MODULE_FIRMWARE("radeon/verde_mc.bin");
+MODULE_FIRMWARE("radeon/oland_mc.bin");
+
+static const u32 crtc_offsets[6] =
+{
+       SI_CRTC0_REGISTER_OFFSET,
+       SI_CRTC1_REGISTER_OFFSET,
+       SI_CRTC2_REGISTER_OFFSET,
+       SI_CRTC3_REGISTER_OFFSET,
+       SI_CRTC4_REGISTER_OFFSET,
+       SI_CRTC5_REGISTER_OFFSET
+};
+
+static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
+                            struct amdgpu_mode_mc_save *save)
+{
+       u32 blackout;
+
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_stop_mc_access(adev, save);
+
+       gmc_v6_0_wait_for_idle((void *)adev);
+
+       blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+       if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) {
+               /* Block CPU access */
+               WREG32(BIF_FB_EN, 0);
+               /* blackout the MC */
+               blackout = REG_SET_FIELD(blackout,
+                                        mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
+               WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+       }
+       /* wait for the MC to settle */
+       udelay(100);
+
+}
+
+static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
+                              struct amdgpu_mode_mc_save *save)
+{
+       u32 tmp;
+
+       /* unblackout the MC */
+       tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+       tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
+       WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+       /* allow CPU access */
+       tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1);
+       tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1);
+       WREG32(BIF_FB_EN, tmp);
+
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_resume_mc_access(adev, save);
+
+}
+
+static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
+{
+       const char *chip_name;
+       char fw_name[30];
+       int err;
+
+       DRM_DEBUG("\n");
+
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+               chip_name = "tahiti";
+               break;
+       case CHIP_PITCAIRN:
+               chip_name = "pitcairn";
+               break;
+       case CHIP_VERDE:
+               chip_name = "verde";
+               break;
+       case CHIP_OLAND:
+               chip_name = "oland";
+               break;
+       case CHIP_HAINAN:
+               chip_name = "hainan";
+               break;
+       default: BUG();
+       }
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+       err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+
+       err = amdgpu_ucode_validate(adev->mc.fw);
+
+out:
+       if (err) {
+               dev_err(adev->dev,
+                      "si_mc: Failed to load firmware \"%s\"\n",
+                      fw_name);
+               release_firmware(adev->mc.fw);
+               adev->mc.fw = NULL;
+       }
+       return err;
+}
+
+static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
+{
+       const __le32 *new_fw_data = NULL;
+       u32 running;
+       const __le32 *new_io_mc_regs = NULL;
+       int i, regs_size, ucode_size;
+       const struct mc_firmware_header_v1_0 *hdr;
+
+       if (!adev->mc.fw)
+               return -EINVAL;
+
+       hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
+
+       amdgpu_ucode_print_mc_hdr(&hdr->header);
+
+       adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
+       regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+       new_io_mc_regs = (const __le32 *)
+               (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+       ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+       new_fw_data = (const __le32 *)
+               (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+       running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+       if (running == 0) {
+
+               /* reset the engine and set to writable */
+               WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+               WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+               /* load mc io regs */
+               for (i = 0; i < regs_size; i++) {
+                       WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+                       WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+               }
+               /* load the MC ucode */
+               for (i = 0; i < ucode_size; i++) {
+                       WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+               }
+
+               /* put the engine back into the active state */
+               WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+               WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+               WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+               /* wait for training to complete */
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+                               break;
+                       udelay(1);
+               }
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+                               break;
+                       udelay(1);
+               }
+
+       }
+
+       return 0;
+}
+
+static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
+                                      struct amdgpu_mc *mc)
+{
+       if (mc->mc_vram_size > 0xFFC0000000ULL) {
+               dev_warn(adev->dev, "limiting VRAM\n");
+               mc->real_vram_size = 0xFFC0000000ULL;
+               mc->mc_vram_size = 0xFFC0000000ULL;
+       }
+       amdgpu_vram_location(adev, &adev->mc, 0);
+       adev->mc.gtt_base_align = 0;
+       amdgpu_gtt_location(adev, mc);
+}
+
+static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
+{
+       struct amdgpu_mode_mc_save save;
+       u32 tmp;
+       int i, j;
+
+       /* Initialize HDP */
+       for (i = 0, j = 0; i < 32; i++, j += 0x6) {
+               WREG32((0xb05 + j), 0x00000000);
+               WREG32((0xb06 + j), 0x00000000);
+               WREG32((0xb07 + j), 0x00000000);
+               WREG32((0xb08 + j), 0x00000000);
+               WREG32((0xb09 + j), 0x00000000);
+       }
+       WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+       gmc_v6_0_mc_stop(adev, &save);
+
+       if (gmc_v6_0_wait_for_idle((void *)adev)) {
+               dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+       }
+
+       WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+       /* Update configuration */
+       WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+              adev->mc.vram_start >> 12);
+       WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+              adev->mc.vram_end >> 12);
+       WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+              adev->vram_scratch.gpu_addr >> 12);
+       tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
+       tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
+       WREG32(MC_VM_FB_LOCATION, tmp);
+       /* XXX double check these! */
+       WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+       WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+       WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+       WREG32(MC_VM_AGP_BASE, 0);
+       WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+       WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+
+       if (gmc_v6_0_wait_for_idle((void *)adev)) {
+               dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+       }
+       gmc_v6_0_mc_resume(adev, &save);
+       amdgpu_display_set_vga_render_state(adev, false);
+}
+
+static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
+{
+
+       u32 tmp;
+       int chansize, numchan;
+
+       tmp = RREG32(MC_ARB_RAMCFG);
+       if (tmp & CHANSIZE_OVERRIDE) {
+               chansize = 16;
+       } else if (tmp & CHANSIZE_MASK) {
+               chansize = 64;
+       } else {
+               chansize = 32;
+       }
+       tmp = RREG32(MC_SHARED_CHMAP);
+       switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+       case 0:
+       default:
+               numchan = 1;
+               break;
+       case 1:
+               numchan = 2;
+               break;
+       case 2:
+               numchan = 4;
+               break;
+       case 3:
+               numchan = 8;
+               break;
+       case 4:
+               numchan = 3;
+               break;
+       case 5:
+               numchan = 6;
+               break;
+       case 6:
+               numchan = 10;
+               break;
+       case 7:
+               numchan = 12;
+               break;
+       case 8:
+               numchan = 16;
+               break;
+       }
+       adev->mc.vram_width = numchan * chansize;
+       /* Could aper size report 0 ? */
+       adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
+       adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
+       /* size in MB on si */
+       adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+       adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+       adev->mc.visible_vram_size = adev->mc.aper_size;
+
+       /* unless the user had overridden it, set the gart
+        * size equal to the 1024 or vram, whichever is larger.
+        */
+       if (amdgpu_gart_size == -1)
+               adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
+       else
+               adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
+
+       gmc_v6_0_vram_gtt_location(adev, &adev->mc);
+
+       return 0;
+}
+
+static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
+                                       uint32_t vmid)
+{
+       WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+
+       WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
+                                    void *cpu_pt_addr,
+                                    uint32_t gpu_page_idx,
+                                    uint64_t addr,
+                                    uint32_t flags)
+{
+       void __iomem *ptr = (void *)cpu_pt_addr;
+       uint64_t value;
+
+       value = addr & 0xFFFFFFFFFFFFF000ULL;
+       value |= flags;
+       writeq(value, ptr + (gpu_page_idx * 8));
+
+       return 0;
+}
+
+static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
+                                             bool value)
+{
+       u32 tmp;
+
+       tmp = RREG32(VM_CONTEXT1_CNTL);
+       tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+                           xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+                           xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+                           xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+                           xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+                           xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+                           xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       WREG32(VM_CONTEXT1_CNTL, tmp);
+}
+
+static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+{
+       int r, i;
+
+       if (adev->gart.robj == NULL) {
+               dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+               return -EINVAL;
+       }
+       r = amdgpu_gart_table_vram_pin(adev);
+       if (r)
+               return r;
+       /* Setup TLB control */
+       WREG32(MC_VM_MX_L1_TLB_CNTL,
+              (0xA << 7) |
+              ENABLE_L1_TLB |
+              ENABLE_L1_FRAGMENT_PROCESSING |
+              SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+              ENABLE_ADVANCED_DRIVER_MODEL |
+              SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+       /* Setup L2 cache */
+       WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+              ENABLE_L2_FRAGMENT_PROCESSING |
+              ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+              ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+              EFFECTIVE_L2_QUEUE_SIZE(7) |
+              CONTEXT1_IDENTITY_ACCESS_MODE(1));
+       WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+       WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+              BANK_SELECT(4) |
+              L2_CACHE_BIGK_FRAGMENT_SIZE(4));
+       /* setup context0 */
+       WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+       WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+                       (u32)(adev->dummy_page.addr >> 12));
+       WREG32(VM_CONTEXT0_CNTL2, 0);
+       WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+
+       WREG32(0x575, 0);
+       WREG32(0x576, 0);
+       WREG32(0x577, 0);
+
+       /* empty context1-15 */
+       /* set vm size, must be a multiple of 4 */
+       WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
+       /* Assign the pt base to something valid for now; the pts used for
+        * the VMs are determined by the application and setup and assigned
+        * on the fly in the vm part of radeon_gart.c
+        */
+       for (i = 1; i < 16; i++) {
+               if (i < 8)
+                       WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+                              adev->gart.table_addr >> 12);
+               else
+                       WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+                              adev->gart.table_addr >> 12);
+       }
+
+       /* enable context1-15 */
+       WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+              (u32)(adev->dummy_page.addr >> 12));
+       WREG32(VM_CONTEXT1_CNTL2, 4);
+       WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+                               PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) |
+                               RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+       gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
+       dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
+                (unsigned)(adev->mc.gtt_size >> 20),
+                (unsigned long long)adev->gart.table_addr);
+       adev->gart.ready = true;
+       return 0;
+}
+
+static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (adev->gart.robj) {
+               dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
+               return 0;
+       }
+       r = amdgpu_gart_init(adev);
+       if (r)
+               return r;
+       adev->gart.table_size = adev->gart.num_gpu_pages * 8;
+       return amdgpu_gart_table_vram_alloc(adev);
+}
+
+static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
+{
+       /*unsigned i;
+
+       for (i = 1; i < 16; ++i) {
+               uint32_t reg;
+               if (i < 8)
+                       reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
+               else
+                       reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
+               adev->vm_manager.saved_table_addr[i] = RREG32(reg);
+       }*/
+
+       /* Disable all tables */
+       WREG32(VM_CONTEXT0_CNTL, 0);
+       WREG32(VM_CONTEXT1_CNTL, 0);
+       /* Setup TLB control */
+       WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+              SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+       /* Setup L2 cache */
+       WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+              ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+              EFFECTIVE_L2_QUEUE_SIZE(7) |
+              CONTEXT1_IDENTITY_ACCESS_MODE(1));
+       WREG32(VM_L2_CNTL2, 0);
+       WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+              L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+       amdgpu_gart_table_vram_unpin(adev);
+}
+
+static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
+{
+       amdgpu_gart_table_vram_free(adev);
+       amdgpu_gart_fini(adev);
+}
+
+static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
+{
+       /*
+        * number of VMs
+        * VMID 0 is reserved for System
+        * amdgpu graphics/compute will use VMIDs 1-7
+        * amdkfd will use VMIDs 8-15
+        */
+       adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+       amdgpu_vm_manager_init(adev);
+
+       /* base offset of vram pages */
+       if (adev->flags & AMD_IS_APU) {
+               u64 tmp = RREG32(MC_VM_FB_OFFSET);
+               tmp <<= 22;
+               adev->vm_manager.vram_base_offset = tmp;
+       } else
+               adev->vm_manager.vram_base_offset = 0;
+
+       return 0;
+}
+
+static void gmc_v6_0_vm_fini(struct amdgpu_device *adev)
+{
+}
+
+static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
+                                    u32 status, u32 addr, u32 mc_client)
+{
+       u32 mc_id;
+       u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID);
+       u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                                       xxPROTECTIONS);
+       char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+               (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+
+       mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                             xxMEMORY_CLIENT_ID);
+
+       dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+              protections, vmid, addr,
+              REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                            xxMEMORY_CLIENT_RW) ?
+              "write" : "read", block, mc_client, mc_id);
+}
+
+/*
+static const u32 mc_cg_registers[] = {
+       MC_HUB_MISC_HUB_CG,
+       MC_HUB_MISC_SIP_CG,
+       MC_HUB_MISC_VM_CG,
+       MC_XPB_CLK_GAT,
+       ATC_MISC_CG,
+       MC_CITF_MISC_WR_CG,
+       MC_CITF_MISC_RD_CG,
+       MC_CITF_MISC_VM_CG,
+       VM_L2_CG,
+};
+
+static const u32 mc_cg_ls_en[] = {
+       MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
+       MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
+       MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+       MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
+       ATC_MISC_CG__MEM_LS_ENABLE_MASK,
+       MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
+       MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
+       MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+       VM_L2_CG__MEM_LS_ENABLE_MASK,
+};
+
+static const u32 mc_cg_en[] = {
+       MC_HUB_MISC_HUB_CG__ENABLE_MASK,
+       MC_HUB_MISC_SIP_CG__ENABLE_MASK,
+       MC_HUB_MISC_VM_CG__ENABLE_MASK,
+       MC_XPB_CLK_GAT__ENABLE_MASK,
+       ATC_MISC_CG__ENABLE_MASK,
+       MC_CITF_MISC_WR_CG__ENABLE_MASK,
+       MC_CITF_MISC_RD_CG__ENABLE_MASK,
+       MC_CITF_MISC_VM_CG__ENABLE_MASK,
+       VM_L2_CG__ENABLE_MASK,
+};
+
+static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
+                                 bool enable)
+{
+       int i;
+       u32 orig, data;
+
+       for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+               orig = data = RREG32(mc_cg_registers[i]);
+               if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+                       data |= mc_cg_ls_en[i];
+               else
+                       data &= ~mc_cg_ls_en[i];
+               if (data != orig)
+                       WREG32(mc_cg_registers[i], data);
+       }
+}
+
+static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
+                                   bool enable)
+{
+       int i;
+       u32 orig, data;
+
+       for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+               orig = data = RREG32(mc_cg_registers[i]);
+               if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+                       data |= mc_cg_en[i];
+               else
+                       data &= ~mc_cg_en[i];
+               if (data != orig)
+                       WREG32(mc_cg_registers[i], data);
+       }
+}
+
+static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
+                                    bool enable)
+{
+       u32 orig, data;
+
+       orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+
+       if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+               data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
+               data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
+               data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
+               data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
+       } else {
+               data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
+               data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
+               data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
+               data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
+       }
+
+       if (orig != data)
+               WREG32_PCIE(ixPCIE_CNTL2, data);
+}
+
+static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
+                                    bool enable)
+{
+       u32 orig, data;
+
+       orig = data = RREG32(HDP_HOST_PATH_CNTL);
+
+       if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+               data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
+       else
+               data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
+
+       if (orig != data)
+               WREG32(HDP_HOST_PATH_CNTL, data);
+}
+
+static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
+                                  bool enable)
+{
+       u32 orig, data;
+
+       orig = data = RREG32(HDP_MEM_POWER_LS);
+
+       if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+               data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
+       else
+               data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
+
+       if (orig != data)
+               WREG32(HDP_MEM_POWER_LS, data);
+}
+*/
+
+static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
+{
+       switch (mc_seq_vram_type) {
+       case MC_SEQ_MISC0__MT__GDDR1:
+               return AMDGPU_VRAM_TYPE_GDDR1;
+       case MC_SEQ_MISC0__MT__DDR2:
+               return AMDGPU_VRAM_TYPE_DDR2;
+       case MC_SEQ_MISC0__MT__GDDR3:
+               return AMDGPU_VRAM_TYPE_GDDR3;
+       case MC_SEQ_MISC0__MT__GDDR4:
+               return AMDGPU_VRAM_TYPE_GDDR4;
+       case MC_SEQ_MISC0__MT__GDDR5:
+               return AMDGPU_VRAM_TYPE_GDDR5;
+       case MC_SEQ_MISC0__MT__DDR3:
+               return AMDGPU_VRAM_TYPE_DDR3;
+       default:
+               return AMDGPU_VRAM_TYPE_UNKNOWN;
+       }
+}
+
+static int gmc_v6_0_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       gmc_v6_0_set_gart_funcs(adev);
+       gmc_v6_0_set_irq_funcs(adev);
+
+       if (adev->flags & AMD_IS_APU) {
+               adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+       } else {
+               u32 tmp = RREG32(MC_SEQ_MISC0);
+               tmp &= MC_SEQ_MISC0__MT__MASK;
+               adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
+       }
+
+       return 0;
+}
+
+static int gmc_v6_0_late_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
+static int gmc_v6_0_sw_init(void *handle)
+{
+       int r;
+       int dma_bits;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+       if (r)
+               return r;
+
+       adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+
+       adev->mc.mc_mask = 0xffffffffffULL;
+
+       adev->need_dma32 = false;
+       dma_bits = adev->need_dma32 ? 32 : 40;
+       r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+       if (r) {
+               adev->need_dma32 = true;
+               dma_bits = 32;
+               dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
+       }
+       r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+       if (r) {
+               pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
+               dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
+       }
+
+       r = gmc_v6_0_init_microcode(adev);
+       if (r) {
+               dev_err(adev->dev, "Failed to load mc firmware!\n");
+               return r;
+       }
+
+       r = amdgpu_ttm_global_init(adev);
+       if (r) {
+               return r;
+       }
+
+       r = gmc_v6_0_mc_init(adev);
+       if (r)
+               return r;
+
+       r = amdgpu_bo_init(adev);
+       if (r)
+               return r;
+
+       r = gmc_v6_0_gart_init(adev);
+       if (r)
+               return r;
+
+       if (!adev->vm_manager.enabled) {
+               r = gmc_v6_0_vm_init(adev);
+               if (r) {
+                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+                       return r;
+               }
+               adev->vm_manager.enabled = true;
+       }
+
+       return r;
+}
+
+static int gmc_v6_0_sw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->vm_manager.enabled) {
+               gmc_v6_0_vm_fini(adev);
+               adev->vm_manager.enabled = false;
+       }
+       gmc_v6_0_gart_fini(adev);
+       amdgpu_gem_force_release(adev);
+       amdgpu_bo_fini(adev);
+
+       return 0;
+}
+
+static int gmc_v6_0_hw_init(void *handle)
+{
+       int r;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       gmc_v6_0_mc_program(adev);
+
+       if (!(adev->flags & AMD_IS_APU)) {
+               r = gmc_v6_0_mc_load_microcode(adev);
+               if (r) {
+                       dev_err(adev->dev, "Failed to load MC firmware!\n");
+                       return r;
+               }
+       }
+
+       r = gmc_v6_0_gart_enable(adev);
+       if (r)
+               return r;
+
+       return r;
+}
+
+static int gmc_v6_0_hw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
+       gmc_v6_0_gart_disable(adev);
+
+       return 0;
+}
+
+static int gmc_v6_0_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->vm_manager.enabled) {
+               gmc_v6_0_vm_fini(adev);
+               adev->vm_manager.enabled = false;
+       }
+       gmc_v6_0_hw_fini(adev);
+
+       return 0;
+}
+
+static int gmc_v6_0_resume(void *handle)
+{
+       int r;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       r = gmc_v6_0_hw_init(adev);
+       if (r)
+               return r;
+
+       if (!adev->vm_manager.enabled) {
+               r = gmc_v6_0_vm_init(adev);
+               if (r) {
+                       dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+                       return r;
+               }
+               adev->vm_manager.enabled = true;
+       }
+
+       return r;
+}
+
+static bool gmc_v6_0_is_idle(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 tmp = RREG32(SRBM_STATUS);
+
+       if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+                  SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
+               return false;
+
+       return true;
+}
+
+static int gmc_v6_0_wait_for_idle(void *handle)
+{
+       unsigned i;
+       u32 tmp;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
+                                              SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+                                              SRBM_STATUS__MCC_BUSY_MASK |
+                                              SRBM_STATUS__MCD_BUSY_MASK |
+                                              SRBM_STATUS__VMC_BUSY_MASK);
+               if (!tmp)
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+
+}
+
+static int gmc_v6_0_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       struct amdgpu_mode_mc_save save;
+       u32 srbm_soft_reset = 0;
+       u32 tmp = RREG32(SRBM_STATUS);
+
+       if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
+               srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+                                               mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1);
+
+       if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+                  SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
+               if (!(adev->flags & AMD_IS_APU))
+                       srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+                                                       mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1);
+       }
+
+       if (srbm_soft_reset) {
+               gmc_v6_0_mc_stop(adev, &save);
+               if (gmc_v6_0_wait_for_idle(adev)) {
+                       dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+               }
+
+
+               tmp = RREG32(SRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(SRBM_SOFT_RESET, tmp);
+               tmp = RREG32(SRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(SRBM_SOFT_RESET, tmp);
+               tmp = RREG32(SRBM_SOFT_RESET);
+
+               udelay(50);
+
+               gmc_v6_0_mc_resume(adev, &save);
+               udelay(50);
+       }
+
+       return 0;
+}
+
+static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
+                                            struct amdgpu_irq_src *src,
+                                            unsigned type,
+                                            enum amdgpu_interrupt_state state)
+{
+       u32 tmp;
+       u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+                   VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               tmp = RREG32(VM_CONTEXT0_CNTL);
+               tmp &= ~bits;
+               WREG32(VM_CONTEXT0_CNTL, tmp);
+               tmp = RREG32(VM_CONTEXT1_CNTL);
+               tmp &= ~bits;
+               WREG32(VM_CONTEXT1_CNTL, tmp);
+               break;
+       case AMDGPU_IRQ_STATE_ENABLE:
+               tmp = RREG32(VM_CONTEXT0_CNTL);
+               tmp |= bits;
+               WREG32(VM_CONTEXT0_CNTL, tmp);
+               tmp = RREG32(VM_CONTEXT1_CNTL);
+               tmp |= bits;
+               WREG32(VM_CONTEXT1_CNTL, tmp);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     struct amdgpu_iv_entry *entry)
+{
+       u32 addr, status;
+
+       addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+       status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+
+       if (!addr && !status)
+               return 0;
+
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
+               gmc_v6_0_set_fault_enable_default(adev, false);
+
+       dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+               entry->src_id, entry->src_data);
+       dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+               addr);
+       dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+               status);
+       gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+
+       return 0;
+}
+
+static int gmc_v6_0_set_clockgating_state(void *handle,
+                                         enum amd_clockgating_state state)
+{
+       return 0;
+}
+
+static int gmc_v6_0_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state)
+{
+       return 0;
+}
+
+const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+       .name = "gmc_v6_0",
+       .early_init = gmc_v6_0_early_init,
+       .late_init = gmc_v6_0_late_init,
+       .sw_init = gmc_v6_0_sw_init,
+       .sw_fini = gmc_v6_0_sw_fini,
+       .hw_init = gmc_v6_0_hw_init,
+       .hw_fini = gmc_v6_0_hw_fini,
+       .suspend = gmc_v6_0_suspend,
+       .resume = gmc_v6_0_resume,
+       .is_idle = gmc_v6_0_is_idle,
+       .wait_for_idle = gmc_v6_0_wait_for_idle,
+       .soft_reset = gmc_v6_0_soft_reset,
+       .set_clockgating_state = gmc_v6_0_set_clockgating_state,
+       .set_powergating_state = gmc_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
+       .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
+       .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
+};
+
+static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
+       .set = gmc_v6_0_vm_fault_interrupt_state,
+       .process = gmc_v6_0_process_interrupt,
+};
+
+static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
+{
+       if (adev->gart.gart_funcs == NULL)
+               adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
+}
+
+static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->mc.vm_fault.num_types = 1;
+       adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
new file mode 100644 (file)
index 0000000..42c4fc6
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GMC_V6_0_H__
+#define __GMC_V6_0_H__
+
+extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
+
+#endif
index 2118399..ef7c27d 100644 (file)
@@ -121,7 +121,7 @@ out:
        return result;
 }
 
-void iceland_start_smc(struct amdgpu_device *adev)
+static void iceland_start_smc(struct amdgpu_device *adev)
 {
        uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
 
@@ -129,7 +129,7 @@ void iceland_start_smc(struct amdgpu_device *adev)
        WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
 }
 
-void iceland_reset_smc(struct amdgpu_device *adev)
+static void iceland_reset_smc(struct amdgpu_device *adev)
 {
        uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
 
@@ -145,7 +145,7 @@ static int iceland_program_jump_on_start(struct amdgpu_device *adev)
        return 0;
 }
 
-void iceland_stop_smc_clock(struct amdgpu_device *adev)
+static void iceland_stop_smc_clock(struct amdgpu_device *adev)
 {
        uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
 
@@ -153,7 +153,7 @@ void iceland_stop_smc_clock(struct amdgpu_device *adev)
        WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
 }
 
-void iceland_start_smc_clock(struct amdgpu_device *adev)
+static void iceland_start_smc_clock(struct amdgpu_device *adev)
 {
        uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
new file mode 100644 (file)
index 0000000..055321f
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __R600_DPM_H__
+#define __R600_DPM_H__
+
+#define R600_ASI_DFLT                                10000
+#define R600_BSP_DFLT                                0x41EB
+#define R600_BSU_DFLT                                0x2
+#define R600_AH_DFLT                                 5
+#define R600_RLP_DFLT                                25
+#define R600_RMP_DFLT                                65
+#define R600_LHP_DFLT                                40
+#define R600_LMP_DFLT                                15
+#define R600_TD_DFLT                                 0
+#define R600_UTC_DFLT_00                             0x24
+#define R600_UTC_DFLT_01                             0x22
+#define R600_UTC_DFLT_02                             0x22
+#define R600_UTC_DFLT_03                             0x22
+#define R600_UTC_DFLT_04                             0x22
+#define R600_UTC_DFLT_05                             0x22
+#define R600_UTC_DFLT_06                             0x22
+#define R600_UTC_DFLT_07                             0x22
+#define R600_UTC_DFLT_08                             0x22
+#define R600_UTC_DFLT_09                             0x22
+#define R600_UTC_DFLT_10                             0x22
+#define R600_UTC_DFLT_11                             0x22
+#define R600_UTC_DFLT_12                             0x22
+#define R600_UTC_DFLT_13                             0x22
+#define R600_UTC_DFLT_14                             0x22
+#define R600_DTC_DFLT_00                             0x24
+#define R600_DTC_DFLT_01                             0x22
+#define R600_DTC_DFLT_02                             0x22
+#define R600_DTC_DFLT_03                             0x22
+#define R600_DTC_DFLT_04                             0x22
+#define R600_DTC_DFLT_05                             0x22
+#define R600_DTC_DFLT_06                             0x22
+#define R600_DTC_DFLT_07                             0x22
+#define R600_DTC_DFLT_08                             0x22
+#define R600_DTC_DFLT_09                             0x22
+#define R600_DTC_DFLT_10                             0x22
+#define R600_DTC_DFLT_11                             0x22
+#define R600_DTC_DFLT_12                             0x22
+#define R600_DTC_DFLT_13                             0x22
+#define R600_DTC_DFLT_14                             0x22
+#define R600_VRC_DFLT                                0x0000C003
+#define R600_VOLTAGERESPONSETIME_DFLT                1000
+#define R600_BACKBIASRESPONSETIME_DFLT               1000
+#define R600_VRU_DFLT                                0x3
+#define R600_SPLLSTEPTIME_DFLT                       0x1000
+#define R600_SPLLSTEPUNIT_DFLT                       0x3
+#define R600_TPU_DFLT                                0
+#define R600_TPC_DFLT                                0x200
+#define R600_SSTU_DFLT                               0
+#define R600_SST_DFLT                                0x00C8
+#define R600_GICST_DFLT                              0x200
+#define R600_FCT_DFLT                                0x0400
+#define R600_FCTU_DFLT                               0
+#define R600_CTXCGTT3DRPHC_DFLT                      0x20
+#define R600_CTXCGTT3DRSDC_DFLT                      0x40
+#define R600_VDDC3DOORPHC_DFLT                       0x100
+#define R600_VDDC3DOORSDC_DFLT                       0x7
+#define R600_VDDC3DOORSU_DFLT                        0
+#define R600_MPLLLOCKTIME_DFLT                       100
+#define R600_MPLLRESETTIME_DFLT                      150
+#define R600_VCOSTEPPCT_DFLT                          20
+#define R600_ENDINGVCOSTEPPCT_DFLT                    5
+#define R600_REFERENCEDIVIDER_DFLT                    4
+
+#define R600_PM_NUMBER_OF_TC 15
+#define R600_PM_NUMBER_OF_SCLKS 20
+#define R600_PM_NUMBER_OF_MCLKS 4
+#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define R600_TEMP_RANGE_MIN (90 * 1000)
+#define R600_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum r600_power_level {
+       R600_POWER_LEVEL_LOW = 0,
+       R600_POWER_LEVEL_MEDIUM = 1,
+       R600_POWER_LEVEL_HIGH = 2,
+       R600_POWER_LEVEL_CTXSW = 3,
+};
+
+enum r600_td {
+       R600_TD_AUTO,
+       R600_TD_UP,
+       R600_TD_DOWN,
+};
+
+enum r600_display_watermark {
+       R600_DISPLAY_WATERMARK_LOW = 0,
+       R600_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum r600_display_gap
+{
+    R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+    R600_PM_DISPLAY_GAP_VBLANK       = 1,
+    R600_PM_DISPLAY_GAP_WATERMARK    = 2,
+    R600_PM_DISPLAY_GAP_IGNORE       = 3,
+};
+#endif
index e822296..9ae3075 100644 (file)
@@ -190,12 +190,8 @@ out:
  */
 static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
 {
-       u32 rptr;
-
        /* XXX check if swapping is necessary on BE */
-       rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
-
-       return rptr;
+       return ring->adev->wb.wb[ring->rptr_offs] >> 2;
 }
 
 /**
@@ -906,6 +902,22 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
                          SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
+static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               7 + 6; /* sdma_v2_4_ring_emit_ib */
+}
+
+static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               6 + /* sdma_v2_4_ring_emit_hdp_flush */
+               3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+               6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+               12 + /* sdma_v2_4_ring_emit_vm_flush */
+               10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+}
+
 static int sdma_v2_4_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1224,6 +1236,8 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
        .test_ib = sdma_v2_4_ring_test_ib,
        .insert_nop = sdma_v2_4_ring_insert_nop,
        .pad_ib = sdma_v2_4_ring_pad_ib,
+       .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
+       .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
 };
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
index bee4978..f325fd8 100644 (file)
@@ -335,12 +335,8 @@ out:
  */
 static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
-       u32 rptr;
-
        /* XXX check if swapping is necessary on BE */
-       rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
-
-       return rptr;
+       return ring->adev->wb.wb[ring->rptr_offs] >> 2;
 }
 
 /**
@@ -499,31 +495,6 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
        amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
 }
 
-unsigned init_cond_exec(struct amdgpu_ring *ring)
-{
-       unsigned ret;
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
-       amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
-       amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
-       amdgpu_ring_write(ring, 1);
-       ret = ring->wptr;/* this is the offset we need patch later */
-       amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
-       return ret;
-}
-
-void patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
-{
-       unsigned cur;
-       BUG_ON(ring->ring[offset] != 0x55aa55aa);
-
-       cur = ring->wptr - 1;
-       if (likely(cur > offset))
-               ring->ring[offset] = cur - offset;
-       else
-               ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
-}
-
-
 /**
  * sdma_v3_0_gfx_stop - stop the gfx async dma engines
  *
@@ -1133,6 +1104,22 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
                          SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
 }
 
+static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               7 + 6; /* sdma_v3_0_ring_emit_ib */
+}
+
+static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               6 + /* sdma_v3_0_ring_emit_hdp_flush */
+               3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+               6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+               12 + /* sdma_v3_0_ring_emit_vm_flush */
+               10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+}
+
 static int sdma_v3_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1594,6 +1581,8 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
        .test_ib = sdma_v3_0_ring_test_ib,
        .insert_nop = sdma_v3_0_ring_insert_nop,
        .pad_ib = sdma_v3_0_ring_pad_ib,
+       .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
 };
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
new file mode 100644 (file)
index 0000000..fee76b8
--- /dev/null
@@ -0,0 +1,1965 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_uvd.h"
+#include "amdgpu_vce.h"
+#include "atom.h"
+#include "amdgpu_powerplay.h"
+#include "si/sid.h"
+#include "si_ih.h"
+#include "gfx_v6_0.h"
+#include "gmc_v6_0.h"
+#include "si_dma.h"
+#include "dce_v6_0.h"
+#include "si.h"
+
+static const u32 tahiti_golden_registers[] =
+{
+       0x2684, 0x00010000, 0x00018208,
+       0x260c, 0xffffffff, 0x00000000,
+       0x260d, 0xf00fffff, 0x00000400,
+       0x260e, 0x0002021c, 0x00020200,
+       0x031e, 0x00000080, 0x00000000,
+       0x340c, 0x000300c0, 0x00800040,
+       0x360c, 0x000300c0, 0x00800040,
+       0x16ec, 0x000000f0, 0x00000070,
+       0x16f0, 0x00200000, 0x50100000,
+       0x1c0c, 0x31000311, 0x00000011,
+       0x09df, 0x00000003, 0x000007ff,
+       0x0903, 0x000007ff, 0x00000000,
+       0x2285, 0xf000001f, 0x00000007,
+       0x22c9, 0xffffffff, 0x00ffffff,
+       0x22c4, 0x0000ff0f, 0x00000000,
+       0xa293, 0x07ffffff, 0x4e000000,
+       0xa0d4, 0x3f3f3fff, 0x2a00126a,
+       0x000c, 0x000000ff, 0x0040,
+       0x000d, 0x00000040, 0x00004040,
+       0x2440, 0x07ffffff, 0x03000000,
+       0x23a2, 0x01ff1f3f, 0x00000000,
+       0x23a1, 0x01ff1f3f, 0x00000000,
+       0x2418, 0x0000007f, 0x00000020,
+       0x2542, 0x00010000, 0x00010000,
+       0x2b05, 0x00000200, 0x000002fb,
+       0x2b04, 0xffffffff, 0x0000543b,
+       0x2b03, 0xffffffff, 0xa9210876,
+       0x2234, 0xffffffff, 0x000fff40,
+       0x2235, 0x0000001f, 0x00000010,
+       0x0504, 0x20000000, 0x20fffed8,
+       0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_golden_registers2[] =
+{
+       0x0319, 0x00000001, 0x00000001
+};
+
+static const u32 tahiti_golden_rlc_registers[] =
+{
+       0x3109, 0xffffffff, 0x00601005,
+       0x311f, 0xffffffff, 0x10104040,
+       0x3122, 0xffffffff, 0x0100000a,
+       0x30c5, 0xffffffff, 0x00000800,
+       0x30c3, 0xffffffff, 0x800000f4,
+       0x3d2a, 0xffffffff, 0x00000000
+};
+
+static const u32 pitcairn_golden_registers[] =
+{
+       0x2684, 0x00010000, 0x00018208,
+       0x260c, 0xffffffff, 0x00000000,
+       0x260d, 0xf00fffff, 0x00000400,
+       0x260e, 0x0002021c, 0x00020200,
+       0x031e, 0x00000080, 0x00000000,
+       0x340c, 0x000300c0, 0x00800040,
+       0x360c, 0x000300c0, 0x00800040,
+       0x16ec, 0x000000f0, 0x00000070,
+       0x16f0, 0x00200000, 0x50100000,
+       0x1c0c, 0x31000311, 0x00000011,
+       0x0ab9, 0x00073ffe, 0x000022a2,
+       0x0903, 0x000007ff, 0x00000000,
+       0x2285, 0xf000001f, 0x00000007,
+       0x22c9, 0xffffffff, 0x00ffffff,
+       0x22c4, 0x0000ff0f, 0x00000000,
+       0xa293, 0x07ffffff, 0x4e000000,
+       0xa0d4, 0x3f3f3fff, 0x2a00126a,
+       0x000c, 0x000000ff, 0x0040,
+       0x000d, 0x00000040, 0x00004040,
+       0x2440, 0x07ffffff, 0x03000000,
+       0x2418, 0x0000007f, 0x00000020,
+       0x2542, 0x00010000, 0x00010000,
+       0x2b05, 0x000003ff, 0x000000f7,
+       0x2b04, 0xffffffff, 0x00000000,
+       0x2b03, 0xffffffff, 0x32761054,
+       0x2235, 0x0000001f, 0x00000010,
+       0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 pitcairn_golden_rlc_registers[] =
+{
+       0x3109, 0xffffffff, 0x00601004,
+       0x311f, 0xffffffff, 0x10102020,
+       0x3122, 0xffffffff, 0x01000020,
+       0x30c5, 0xffffffff, 0x00000800,
+       0x30c3, 0xffffffff, 0x800000a4
+};
+
+static const u32 verde_pg_init[] =
+{
+       0xd4f, 0xffffffff, 0x40000,
+       0xd4e, 0xffffffff, 0x200010ff,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x7007,
+       0xd4e, 0xffffffff, 0x300010ff,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x400000,
+       0xd4e, 0xffffffff, 0x100010ff,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x120200,
+       0xd4e, 0xffffffff, 0x500010ff,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x1e1e16,
+       0xd4e, 0xffffffff, 0x600010ff,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x171f1e,
+       0xd4e, 0xffffffff, 0x700010ff,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4f, 0xffffffff, 0x0,
+       0xd4e, 0xffffffff, 0x9ff,
+       0xd40, 0xffffffff, 0x0,
+       0xd41, 0xffffffff, 0x10000800,
+       0xd41, 0xffffffff, 0xf,
+       0xd41, 0xffffffff, 0xf,
+       0xd40, 0xffffffff, 0x4,
+       0xd41, 0xffffffff, 0x1000051e,
+       0xd41, 0xffffffff, 0xffff,
+       0xd41, 0xffffffff, 0xffff,
+       0xd40, 0xffffffff, 0x8,
+       0xd41, 0xffffffff, 0x80500,
+       0xd40, 0xffffffff, 0x12,
+       0xd41, 0xffffffff, 0x9050c,
+       0xd40, 0xffffffff, 0x1d,
+       0xd41, 0xffffffff, 0xb052c,
+       0xd40, 0xffffffff, 0x2a,
+       0xd41, 0xffffffff, 0x1053e,
+       0xd40, 0xffffffff, 0x2d,
+       0xd41, 0xffffffff, 0x10546,
+       0xd40, 0xffffffff, 0x30,
+       0xd41, 0xffffffff, 0xa054e,
+       0xd40, 0xffffffff, 0x3c,
+       0xd41, 0xffffffff, 0x1055f,
+       0xd40, 0xffffffff, 0x3f,
+       0xd41, 0xffffffff, 0x10567,
+       0xd40, 0xffffffff, 0x42,
+       0xd41, 0xffffffff, 0x1056f,
+       0xd40, 0xffffffff, 0x45,
+       0xd41, 0xffffffff, 0x10572,
+       0xd40, 0xffffffff, 0x48,
+       0xd41, 0xffffffff, 0x20575,
+       0xd40, 0xffffffff, 0x4c,
+       0xd41, 0xffffffff, 0x190801,
+       0xd40, 0xffffffff, 0x67,
+       0xd41, 0xffffffff, 0x1082a,
+       0xd40, 0xffffffff, 0x6a,
+       0xd41, 0xffffffff, 0x1b082d,
+       0xd40, 0xffffffff, 0x87,
+       0xd41, 0xffffffff, 0x310851,
+       0xd40, 0xffffffff, 0xba,
+       0xd41, 0xffffffff, 0x891,
+       0xd40, 0xffffffff, 0xbc,
+       0xd41, 0xffffffff, 0x893,
+       0xd40, 0xffffffff, 0xbe,
+       0xd41, 0xffffffff, 0x20895,
+       0xd40, 0xffffffff, 0xc2,
+       0xd41, 0xffffffff, 0x20899,
+       0xd40, 0xffffffff, 0xc6,
+       0xd41, 0xffffffff, 0x2089d,
+       0xd40, 0xffffffff, 0xca,
+       0xd41, 0xffffffff, 0x8a1,
+       0xd40, 0xffffffff, 0xcc,
+       0xd41, 0xffffffff, 0x8a3,
+       0xd40, 0xffffffff, 0xce,
+       0xd41, 0xffffffff, 0x308a5,
+       0xd40, 0xffffffff, 0xd3,
+       0xd41, 0xffffffff, 0x6d08cd,
+       0xd40, 0xffffffff, 0x142,
+       0xd41, 0xffffffff, 0x2000095a,
+       0xd41, 0xffffffff, 0x1,
+       0xd40, 0xffffffff, 0x144,
+       0xd41, 0xffffffff, 0x301f095b,
+       0xd40, 0xffffffff, 0x165,
+       0xd41, 0xffffffff, 0xc094d,
+       0xd40, 0xffffffff, 0x173,
+       0xd41, 0xffffffff, 0xf096d,
+       0xd40, 0xffffffff, 0x184,
+       0xd41, 0xffffffff, 0x15097f,
+       0xd40, 0xffffffff, 0x19b,
+       0xd41, 0xffffffff, 0xc0998,
+       0xd40, 0xffffffff, 0x1a9,
+       0xd41, 0xffffffff, 0x409a7,
+       0xd40, 0xffffffff, 0x1af,
+       0xd41, 0xffffffff, 0xcdc,
+       0xd40, 0xffffffff, 0x1b1,
+       0xd41, 0xffffffff, 0x800,
+       0xd42, 0xffffffff, 0x6c9b2000,
+       0xd44, 0xfc00, 0x2000,
+       0xd51, 0xffffffff, 0xfc0,
+       0xa35, 0x00000100, 0x100
+};
+
+static const u32 verde_golden_rlc_registers[] =
+{
+       0x3109, 0xffffffff, 0x033f1005,
+       0x311f, 0xffffffff, 0x10808020,
+       0x3122, 0xffffffff, 0x00800008,
+       0x30c5, 0xffffffff, 0x00001000,
+       0x30c3, 0xffffffff, 0x80010014
+};
+
+static const u32 verde_golden_registers[] =
+{
+       0x2684, 0x00010000, 0x00018208,
+       0x260c, 0xffffffff, 0x00000000,
+       0x260d, 0xf00fffff, 0x00000400,
+       0x260e, 0x0002021c, 0x00020200,
+       0x031e, 0x00000080, 0x00000000,
+       0x340c, 0x000300c0, 0x00800040,
+       0x340c, 0x000300c0, 0x00800040,
+       0x360c, 0x000300c0, 0x00800040,
+       0x360c, 0x000300c0, 0x00800040,
+       0x16ec, 0x000000f0, 0x00000070,
+       0x16f0, 0x00200000, 0x50100000,
+
+       0x1c0c, 0x31000311, 0x00000011,
+       0x0ab9, 0x00073ffe, 0x000022a2,
+       0x0ab9, 0x00073ffe, 0x000022a2,
+       0x0ab9, 0x00073ffe, 0x000022a2,
+       0x0903, 0x000007ff, 0x00000000,
+       0x0903, 0x000007ff, 0x00000000,
+       0x0903, 0x000007ff, 0x00000000,
+       0x2285, 0xf000001f, 0x00000007,
+       0x2285, 0xf000001f, 0x00000007,
+       0x2285, 0xf000001f, 0x00000007,
+       0x2285, 0xffffffff, 0x00ffffff,
+       0x22c4, 0x0000ff0f, 0x00000000,
+
+       0xa293, 0x07ffffff, 0x4e000000,
+       0xa0d4, 0x3f3f3fff, 0x0000124a,
+       0xa0d4, 0x3f3f3fff, 0x0000124a,
+       0xa0d4, 0x3f3f3fff, 0x0000124a,
+       0x000c, 0x000000ff, 0x0040,
+       0x000d, 0x00000040, 0x00004040,
+       0x2440, 0x07ffffff, 0x03000000,
+       0x2440, 0x07ffffff, 0x03000000,
+       0x23a2, 0x01ff1f3f, 0x00000000,
+       0x23a3, 0x01ff1f3f, 0x00000000,
+       0x23a2, 0x01ff1f3f, 0x00000000,
+       0x23a1, 0x01ff1f3f, 0x00000000,
+       0x23a1, 0x01ff1f3f, 0x00000000,
+
+       0x23a1, 0x01ff1f3f, 0x00000000,
+       0x2418, 0x0000007f, 0x00000020,
+       0x2542, 0x00010000, 0x00010000,
+       0x2b01, 0x000003ff, 0x00000003,
+       0x2b05, 0x000003ff, 0x00000003,
+       0x2b05, 0x000003ff, 0x00000003,
+       0x2b04, 0xffffffff, 0x00000000,
+       0x2b04, 0xffffffff, 0x00000000,
+       0x2b04, 0xffffffff, 0x00000000,
+       0x2b03, 0xffffffff, 0x00001032,
+       0x2b03, 0xffffffff, 0x00001032,
+       0x2b03, 0xffffffff, 0x00001032,
+       0x2235, 0x0000001f, 0x00000010,
+       0x2235, 0x0000001f, 0x00000010,
+       0x2235, 0x0000001f, 0x00000010,
+       0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_registers[] =
+{
+       0x2684, 0x00010000, 0x00018208,
+       0x260c, 0xffffffff, 0x00000000,
+       0x260d, 0xf00fffff, 0x00000400,
+       0x260e, 0x0002021c, 0x00020200,
+       0x031e, 0x00000080, 0x00000000,
+       0x340c, 0x000300c0, 0x00800040,
+       0x360c, 0x000300c0, 0x00800040,
+       0x16ec, 0x000000f0, 0x00000070,
+       0x16f9, 0x00200000, 0x50100000,
+       0x1c0c, 0x31000311, 0x00000011,
+       0x0ab9, 0x00073ffe, 0x000022a2,
+       0x0903, 0x000007ff, 0x00000000,
+       0x2285, 0xf000001f, 0x00000007,
+       0x22c9, 0xffffffff, 0x00ffffff,
+       0x22c4, 0x0000ff0f, 0x00000000,
+       0xa293, 0x07ffffff, 0x4e000000,
+       0xa0d4, 0x3f3f3fff, 0x00000082,
+       0x000c, 0x000000ff, 0x0040,
+       0x000d, 0x00000040, 0x00004040,
+       0x2440, 0x07ffffff, 0x03000000,
+       0x2418, 0x0000007f, 0x00000020,
+       0x2542, 0x00010000, 0x00010000,
+       0x2b05, 0x000003ff, 0x000000f3,
+       0x2b04, 0xffffffff, 0x00000000,
+       0x2b03, 0xffffffff, 0x00003210,
+       0x2235, 0x0000001f, 0x00000010,
+       0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_rlc_registers[] =
+{
+       0x3109, 0xffffffff, 0x00601005,
+       0x311f, 0xffffffff, 0x10104040,
+       0x3122, 0xffffffff, 0x0100000a,
+       0x30c5, 0xffffffff, 0x00000800,
+       0x30c3, 0xffffffff, 0x800000f4
+};
+
+static const u32 hainan_golden_registers[] =
+{
+       0x2684, 0x00010000, 0x00018208,
+       0x260c, 0xffffffff, 0x00000000,
+       0x260d, 0xf00fffff, 0x00000400,
+       0x260e, 0x0002021c, 0x00020200,
+       0x4595, 0xff000fff, 0x00000100,
+       0x340c, 0x000300c0, 0x00800040,
+       0x3630, 0xff000fff, 0x00000100,
+       0x360c, 0x000300c0, 0x00800040,
+       0x0ab9, 0x00073ffe, 0x000022a2,
+       0x0903, 0x000007ff, 0x00000000,
+       0x2285, 0xf000001f, 0x00000007,
+       0x22c9, 0xffffffff, 0x00ffffff,
+       0x22c4, 0x0000ff0f, 0x00000000,
+       0xa393, 0x07ffffff, 0x4e000000,
+       0xa0d4, 0x3f3f3fff, 0x00000000,
+       0x000c, 0x000000ff, 0x0040,
+       0x000d, 0x00000040, 0x00004040,
+       0x2440, 0x03e00000, 0x03600000,
+       0x2418, 0x0000007f, 0x00000020,
+       0x2542, 0x00010000, 0x00010000,
+       0x2b05, 0x000003ff, 0x000000f1,
+       0x2b04, 0xffffffff, 0x00000000,
+       0x2b03, 0xffffffff, 0x00003210,
+       0x2235, 0x0000001f, 0x00000010,
+       0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers2[] =
+{
+       0x263e, 0xffffffff, 0x02010001
+};
+
+static const u32 tahiti_mgcg_cgcg_init[] =
+{
+       0x3100, 0xffffffff, 0xfffffffc,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2698, 0xffffffff, 0x00000100,
+       0x24a9, 0xffffffff, 0x00000100,
+       0x3059, 0xffffffff, 0x00000100,
+       0x25dd, 0xffffffff, 0x00000100,
+       0x2261, 0xffffffff, 0x06000100,
+       0x2286, 0xffffffff, 0x00000100,
+       0x24a8, 0xffffffff, 0x00000100,
+       0x30e0, 0xffffffff, 0x00000100,
+       0x22ca, 0xffffffff, 0x00000100,
+       0x2451, 0xffffffff, 0x00000100,
+       0x2362, 0xffffffff, 0x00000100,
+       0x2363, 0xffffffff, 0x00000100,
+       0x240c, 0xffffffff, 0x00000100,
+       0x240d, 0xffffffff, 0x00000100,
+       0x240e, 0xffffffff, 0x00000100,
+       0x240f, 0xffffffff, 0x00000100,
+       0x2b60, 0xffffffff, 0x00000100,
+       0x2b15, 0xffffffff, 0x00000100,
+       0x225f, 0xffffffff, 0x06000100,
+       0x261a, 0xffffffff, 0x00000100,
+       0x2544, 0xffffffff, 0x00000100,
+       0x2bc1, 0xffffffff, 0x00000100,
+       0x2b81, 0xffffffff, 0x00000100,
+       0x2527, 0xffffffff, 0x00000100,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2458, 0xffffffff, 0x00010000,
+       0x2459, 0xffffffff, 0x00030002,
+       0x245a, 0xffffffff, 0x00040007,
+       0x245b, 0xffffffff, 0x00060005,
+       0x245c, 0xffffffff, 0x00090008,
+       0x245d, 0xffffffff, 0x00020001,
+       0x245e, 0xffffffff, 0x00040003,
+       0x245f, 0xffffffff, 0x00000007,
+       0x2460, 0xffffffff, 0x00060005,
+       0x2461, 0xffffffff, 0x00090008,
+       0x2462, 0xffffffff, 0x00030002,
+       0x2463, 0xffffffff, 0x00050004,
+       0x2464, 0xffffffff, 0x00000008,
+       0x2465, 0xffffffff, 0x00070006,
+       0x2466, 0xffffffff, 0x000a0009,
+       0x2467, 0xffffffff, 0x00040003,
+       0x2468, 0xffffffff, 0x00060005,
+       0x2469, 0xffffffff, 0x00000009,
+       0x246a, 0xffffffff, 0x00080007,
+       0x246b, 0xffffffff, 0x000b000a,
+       0x246c, 0xffffffff, 0x00050004,
+       0x246d, 0xffffffff, 0x00070006,
+       0x246e, 0xffffffff, 0x0008000b,
+       0x246f, 0xffffffff, 0x000a0009,
+       0x2470, 0xffffffff, 0x000d000c,
+       0x2471, 0xffffffff, 0x00060005,
+       0x2472, 0xffffffff, 0x00080007,
+       0x2473, 0xffffffff, 0x0000000b,
+       0x2474, 0xffffffff, 0x000a0009,
+       0x2475, 0xffffffff, 0x000d000c,
+       0x2476, 0xffffffff, 0x00070006,
+       0x2477, 0xffffffff, 0x00090008,
+       0x2478, 0xffffffff, 0x0000000c,
+       0x2479, 0xffffffff, 0x000b000a,
+       0x247a, 0xffffffff, 0x000e000d,
+       0x247b, 0xffffffff, 0x00080007,
+       0x247c, 0xffffffff, 0x000a0009,
+       0x247d, 0xffffffff, 0x0000000d,
+       0x247e, 0xffffffff, 0x000c000b,
+       0x247f, 0xffffffff, 0x000f000e,
+       0x2480, 0xffffffff, 0x00090008,
+       0x2481, 0xffffffff, 0x000b000a,
+       0x2482, 0xffffffff, 0x000c000f,
+       0x2483, 0xffffffff, 0x000e000d,
+       0x2484, 0xffffffff, 0x00110010,
+       0x2485, 0xffffffff, 0x000a0009,
+       0x2486, 0xffffffff, 0x000c000b,
+       0x2487, 0xffffffff, 0x0000000f,
+       0x2488, 0xffffffff, 0x000e000d,
+       0x2489, 0xffffffff, 0x00110010,
+       0x248a, 0xffffffff, 0x000b000a,
+       0x248b, 0xffffffff, 0x000d000c,
+       0x248c, 0xffffffff, 0x00000010,
+       0x248d, 0xffffffff, 0x000f000e,
+       0x248e, 0xffffffff, 0x00120011,
+       0x248f, 0xffffffff, 0x000c000b,
+       0x2490, 0xffffffff, 0x000e000d,
+       0x2491, 0xffffffff, 0x00000011,
+       0x2492, 0xffffffff, 0x0010000f,
+       0x2493, 0xffffffff, 0x00130012,
+       0x2494, 0xffffffff, 0x000d000c,
+       0x2495, 0xffffffff, 0x000f000e,
+       0x2496, 0xffffffff, 0x00100013,
+       0x2497, 0xffffffff, 0x00120011,
+       0x2498, 0xffffffff, 0x00150014,
+       0x2499, 0xffffffff, 0x000e000d,
+       0x249a, 0xffffffff, 0x0010000f,
+       0x249b, 0xffffffff, 0x00000013,
+       0x249c, 0xffffffff, 0x00120011,
+       0x249d, 0xffffffff, 0x00150014,
+       0x249e, 0xffffffff, 0x000f000e,
+       0x249f, 0xffffffff, 0x00110010,
+       0x24a0, 0xffffffff, 0x00000014,
+       0x24a1, 0xffffffff, 0x00130012,
+       0x24a2, 0xffffffff, 0x00160015,
+       0x24a3, 0xffffffff, 0x0010000f,
+       0x24a4, 0xffffffff, 0x00120011,
+       0x24a5, 0xffffffff, 0x00000015,
+       0x24a6, 0xffffffff, 0x00140013,
+       0x24a7, 0xffffffff, 0x00170016,
+       0x2454, 0xffffffff, 0x96940200,
+       0x21c2, 0xffffffff, 0x00900100,
+       0x311e, 0xffffffff, 0x00000080,
+       0x3101, 0xffffffff, 0x0020003f,
+       0xc, 0xffffffff, 0x0000001c,
+       0xd, 0x000f0000, 0x000f0000,
+       0x583, 0xffffffff, 0x00000100,
+       0x409, 0xffffffff, 0x00000100,
+       0x40b, 0x00000101, 0x00000000,
+       0x82a, 0xffffffff, 0x00000104,
+       0x993, 0x000c0000, 0x000c0000,
+       0x992, 0x000c0000, 0x000c0000,
+       0x1579, 0xff000fff, 0x00000100,
+       0x157a, 0x00000001, 0x00000001,
+       0xbd4, 0x00000001, 0x00000001,
+       0xc33, 0xc0000fff, 0x00000104,
+       0x3079, 0x00000001, 0x00000001,
+       0x3430, 0xfffffff0, 0x00000100,
+       0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 pitcairn_mgcg_cgcg_init[] =
+{
+       0x3100, 0xffffffff, 0xfffffffc,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2698, 0xffffffff, 0x00000100,
+       0x24a9, 0xffffffff, 0x00000100,
+       0x3059, 0xffffffff, 0x00000100,
+       0x25dd, 0xffffffff, 0x00000100,
+       0x2261, 0xffffffff, 0x06000100,
+       0x2286, 0xffffffff, 0x00000100,
+       0x24a8, 0xffffffff, 0x00000100,
+       0x30e0, 0xffffffff, 0x00000100,
+       0x22ca, 0xffffffff, 0x00000100,
+       0x2451, 0xffffffff, 0x00000100,
+       0x2362, 0xffffffff, 0x00000100,
+       0x2363, 0xffffffff, 0x00000100,
+       0x240c, 0xffffffff, 0x00000100,
+       0x240d, 0xffffffff, 0x00000100,
+       0x240e, 0xffffffff, 0x00000100,
+       0x240f, 0xffffffff, 0x00000100,
+       0x2b60, 0xffffffff, 0x00000100,
+       0x2b15, 0xffffffff, 0x00000100,
+       0x225f, 0xffffffff, 0x06000100,
+       0x261a, 0xffffffff, 0x00000100,
+       0x2544, 0xffffffff, 0x00000100,
+       0x2bc1, 0xffffffff, 0x00000100,
+       0x2b81, 0xffffffff, 0x00000100,
+       0x2527, 0xffffffff, 0x00000100,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2458, 0xffffffff, 0x00010000,
+       0x2459, 0xffffffff, 0x00030002,
+       0x245a, 0xffffffff, 0x00040007,
+       0x245b, 0xffffffff, 0x00060005,
+       0x245c, 0xffffffff, 0x00090008,
+       0x245d, 0xffffffff, 0x00020001,
+       0x245e, 0xffffffff, 0x00040003,
+       0x245f, 0xffffffff, 0x00000007,
+       0x2460, 0xffffffff, 0x00060005,
+       0x2461, 0xffffffff, 0x00090008,
+       0x2462, 0xffffffff, 0x00030002,
+       0x2463, 0xffffffff, 0x00050004,
+       0x2464, 0xffffffff, 0x00000008,
+       0x2465, 0xffffffff, 0x00070006,
+       0x2466, 0xffffffff, 0x000a0009,
+       0x2467, 0xffffffff, 0x00040003,
+       0x2468, 0xffffffff, 0x00060005,
+       0x2469, 0xffffffff, 0x00000009,
+       0x246a, 0xffffffff, 0x00080007,
+       0x246b, 0xffffffff, 0x000b000a,
+       0x246c, 0xffffffff, 0x00050004,
+       0x246d, 0xffffffff, 0x00070006,
+       0x246e, 0xffffffff, 0x0008000b,
+       0x246f, 0xffffffff, 0x000a0009,
+       0x2470, 0xffffffff, 0x000d000c,
+       0x2480, 0xffffffff, 0x00090008,
+       0x2481, 0xffffffff, 0x000b000a,
+       0x2482, 0xffffffff, 0x000c000f,
+       0x2483, 0xffffffff, 0x000e000d,
+       0x2484, 0xffffffff, 0x00110010,
+       0x2485, 0xffffffff, 0x000a0009,
+       0x2486, 0xffffffff, 0x000c000b,
+       0x2487, 0xffffffff, 0x0000000f,
+       0x2488, 0xffffffff, 0x000e000d,
+       0x2489, 0xffffffff, 0x00110010,
+       0x248a, 0xffffffff, 0x000b000a,
+       0x248b, 0xffffffff, 0x000d000c,
+       0x248c, 0xffffffff, 0x00000010,
+       0x248d, 0xffffffff, 0x000f000e,
+       0x248e, 0xffffffff, 0x00120011,
+       0x248f, 0xffffffff, 0x000c000b,
+       0x2490, 0xffffffff, 0x000e000d,
+       0x2491, 0xffffffff, 0x00000011,
+       0x2492, 0xffffffff, 0x0010000f,
+       0x2493, 0xffffffff, 0x00130012,
+       0x2494, 0xffffffff, 0x000d000c,
+       0x2495, 0xffffffff, 0x000f000e,
+       0x2496, 0xffffffff, 0x00100013,
+       0x2497, 0xffffffff, 0x00120011,
+       0x2498, 0xffffffff, 0x00150014,
+       0x2454, 0xffffffff, 0x96940200,
+       0x21c2, 0xffffffff, 0x00900100,
+       0x311e, 0xffffffff, 0x00000080,
+       0x3101, 0xffffffff, 0x0020003f,
+       0xc, 0xffffffff, 0x0000001c,
+       0xd, 0x000f0000, 0x000f0000,
+       0x583, 0xffffffff, 0x00000100,
+       0x409, 0xffffffff, 0x00000100,
+       0x40b, 0x00000101, 0x00000000,
+       0x82a, 0xffffffff, 0x00000104,
+       0x1579, 0xff000fff, 0x00000100,
+       0x157a, 0x00000001, 0x00000001,
+       0xbd4, 0x00000001, 0x00000001,
+       0xc33, 0xc0000fff, 0x00000104,
+       0x3079, 0x00000001, 0x00000001,
+       0x3430, 0xfffffff0, 0x00000100,
+       0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 verde_mgcg_cgcg_init[] =
+{
+       0x3100, 0xffffffff, 0xfffffffc,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2698, 0xffffffff, 0x00000100,
+       0x24a9, 0xffffffff, 0x00000100,
+       0x3059, 0xffffffff, 0x00000100,
+       0x25dd, 0xffffffff, 0x00000100,
+       0x2261, 0xffffffff, 0x06000100,
+       0x2286, 0xffffffff, 0x00000100,
+       0x24a8, 0xffffffff, 0x00000100,
+       0x30e0, 0xffffffff, 0x00000100,
+       0x22ca, 0xffffffff, 0x00000100,
+       0x2451, 0xffffffff, 0x00000100,
+       0x2362, 0xffffffff, 0x00000100,
+       0x2363, 0xffffffff, 0x00000100,
+       0x240c, 0xffffffff, 0x00000100,
+       0x240d, 0xffffffff, 0x00000100,
+       0x240e, 0xffffffff, 0x00000100,
+       0x240f, 0xffffffff, 0x00000100,
+       0x2b60, 0xffffffff, 0x00000100,
+       0x2b15, 0xffffffff, 0x00000100,
+       0x225f, 0xffffffff, 0x06000100,
+       0x261a, 0xffffffff, 0x00000100,
+       0x2544, 0xffffffff, 0x00000100,
+       0x2bc1, 0xffffffff, 0x00000100,
+       0x2b81, 0xffffffff, 0x00000100,
+       0x2527, 0xffffffff, 0x00000100,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2458, 0xffffffff, 0x00010000,
+       0x2459, 0xffffffff, 0x00030002,
+       0x245a, 0xffffffff, 0x00040007,
+       0x245b, 0xffffffff, 0x00060005,
+       0x245c, 0xffffffff, 0x00090008,
+       0x245d, 0xffffffff, 0x00020001,
+       0x245e, 0xffffffff, 0x00040003,
+       0x245f, 0xffffffff, 0x00000007,
+       0x2460, 0xffffffff, 0x00060005,
+       0x2461, 0xffffffff, 0x00090008,
+       0x2462, 0xffffffff, 0x00030002,
+       0x2463, 0xffffffff, 0x00050004,
+       0x2464, 0xffffffff, 0x00000008,
+       0x2465, 0xffffffff, 0x00070006,
+       0x2466, 0xffffffff, 0x000a0009,
+       0x2467, 0xffffffff, 0x00040003,
+       0x2468, 0xffffffff, 0x00060005,
+       0x2469, 0xffffffff, 0x00000009,
+       0x246a, 0xffffffff, 0x00080007,
+       0x246b, 0xffffffff, 0x000b000a,
+       0x246c, 0xffffffff, 0x00050004,
+       0x246d, 0xffffffff, 0x00070006,
+       0x246e, 0xffffffff, 0x0008000b,
+       0x246f, 0xffffffff, 0x000a0009,
+       0x2470, 0xffffffff, 0x000d000c,
+       0x2480, 0xffffffff, 0x00090008,
+       0x2481, 0xffffffff, 0x000b000a,
+       0x2482, 0xffffffff, 0x000c000f,
+       0x2483, 0xffffffff, 0x000e000d,
+       0x2484, 0xffffffff, 0x00110010,
+       0x2485, 0xffffffff, 0x000a0009,
+       0x2486, 0xffffffff, 0x000c000b,
+       0x2487, 0xffffffff, 0x0000000f,
+       0x2488, 0xffffffff, 0x000e000d,
+       0x2489, 0xffffffff, 0x00110010,
+       0x248a, 0xffffffff, 0x000b000a,
+       0x248b, 0xffffffff, 0x000d000c,
+       0x248c, 0xffffffff, 0x00000010,
+       0x248d, 0xffffffff, 0x000f000e,
+       0x248e, 0xffffffff, 0x00120011,
+       0x248f, 0xffffffff, 0x000c000b,
+       0x2490, 0xffffffff, 0x000e000d,
+       0x2491, 0xffffffff, 0x00000011,
+       0x2492, 0xffffffff, 0x0010000f,
+       0x2493, 0xffffffff, 0x00130012,
+       0x2494, 0xffffffff, 0x000d000c,
+       0x2495, 0xffffffff, 0x000f000e,
+       0x2496, 0xffffffff, 0x00100013,
+       0x2497, 0xffffffff, 0x00120011,
+       0x2498, 0xffffffff, 0x00150014,
+       0x2454, 0xffffffff, 0x96940200,
+       0x21c2, 0xffffffff, 0x00900100,
+       0x311e, 0xffffffff, 0x00000080,
+       0x3101, 0xffffffff, 0x0020003f,
+       0xc, 0xffffffff, 0x0000001c,
+       0xd, 0x000f0000, 0x000f0000,
+       0x583, 0xffffffff, 0x00000100,
+       0x409, 0xffffffff, 0x00000100,
+       0x40b, 0x00000101, 0x00000000,
+       0x82a, 0xffffffff, 0x00000104,
+       0x993, 0x000c0000, 0x000c0000,
+       0x992, 0x000c0000, 0x000c0000,
+       0x1579, 0xff000fff, 0x00000100,
+       0x157a, 0x00000001, 0x00000001,
+       0xbd4, 0x00000001, 0x00000001,
+       0xc33, 0xc0000fff, 0x00000104,
+       0x3079, 0x00000001, 0x00000001,
+       0x3430, 0xfffffff0, 0x00000100,
+       0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 oland_mgcg_cgcg_init[] =
+{
+       0x3100, 0xffffffff, 0xfffffffc,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2698, 0xffffffff, 0x00000100,
+       0x24a9, 0xffffffff, 0x00000100,
+       0x3059, 0xffffffff, 0x00000100,
+       0x25dd, 0xffffffff, 0x00000100,
+       0x2261, 0xffffffff, 0x06000100,
+       0x2286, 0xffffffff, 0x00000100,
+       0x24a8, 0xffffffff, 0x00000100,
+       0x30e0, 0xffffffff, 0x00000100,
+       0x22ca, 0xffffffff, 0x00000100,
+       0x2451, 0xffffffff, 0x00000100,
+       0x2362, 0xffffffff, 0x00000100,
+       0x2363, 0xffffffff, 0x00000100,
+       0x240c, 0xffffffff, 0x00000100,
+       0x240d, 0xffffffff, 0x00000100,
+       0x240e, 0xffffffff, 0x00000100,
+       0x240f, 0xffffffff, 0x00000100,
+       0x2b60, 0xffffffff, 0x00000100,
+       0x2b15, 0xffffffff, 0x00000100,
+       0x225f, 0xffffffff, 0x06000100,
+       0x261a, 0xffffffff, 0x00000100,
+       0x2544, 0xffffffff, 0x00000100,
+       0x2bc1, 0xffffffff, 0x00000100,
+       0x2b81, 0xffffffff, 0x00000100,
+       0x2527, 0xffffffff, 0x00000100,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2458, 0xffffffff, 0x00010000,
+       0x2459, 0xffffffff, 0x00030002,
+       0x245a, 0xffffffff, 0x00040007,
+       0x245b, 0xffffffff, 0x00060005,
+       0x245c, 0xffffffff, 0x00090008,
+       0x245d, 0xffffffff, 0x00020001,
+       0x245e, 0xffffffff, 0x00040003,
+       0x245f, 0xffffffff, 0x00000007,
+       0x2460, 0xffffffff, 0x00060005,
+       0x2461, 0xffffffff, 0x00090008,
+       0x2462, 0xffffffff, 0x00030002,
+       0x2463, 0xffffffff, 0x00050004,
+       0x2464, 0xffffffff, 0x00000008,
+       0x2465, 0xffffffff, 0x00070006,
+       0x2466, 0xffffffff, 0x000a0009,
+       0x2467, 0xffffffff, 0x00040003,
+       0x2468, 0xffffffff, 0x00060005,
+       0x2469, 0xffffffff, 0x00000009,
+       0x246a, 0xffffffff, 0x00080007,
+       0x246b, 0xffffffff, 0x000b000a,
+       0x246c, 0xffffffff, 0x00050004,
+       0x246d, 0xffffffff, 0x00070006,
+       0x246e, 0xffffffff, 0x0008000b,
+       0x246f, 0xffffffff, 0x000a0009,
+       0x2470, 0xffffffff, 0x000d000c,
+       0x2471, 0xffffffff, 0x00060005,
+       0x2472, 0xffffffff, 0x00080007,
+       0x2473, 0xffffffff, 0x0000000b,
+       0x2474, 0xffffffff, 0x000a0009,
+       0x2475, 0xffffffff, 0x000d000c,
+       0x2454, 0xffffffff, 0x96940200,
+       0x21c2, 0xffffffff, 0x00900100,
+       0x311e, 0xffffffff, 0x00000080,
+       0x3101, 0xffffffff, 0x0020003f,
+       0xc, 0xffffffff, 0x0000001c,
+       0xd, 0x000f0000, 0x000f0000,
+       0x583, 0xffffffff, 0x00000100,
+       0x409, 0xffffffff, 0x00000100,
+       0x40b, 0x00000101, 0x00000000,
+       0x82a, 0xffffffff, 0x00000104,
+       0x993, 0x000c0000, 0x000c0000,
+       0x992, 0x000c0000, 0x000c0000,
+       0x1579, 0xff000fff, 0x00000100,
+       0x157a, 0x00000001, 0x00000001,
+       0xbd4, 0x00000001, 0x00000001,
+       0xc33, 0xc0000fff, 0x00000104,
+       0x3079, 0x00000001, 0x00000001,
+       0x3430, 0xfffffff0, 0x00000100,
+       0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 hainan_mgcg_cgcg_init[] =
+{
+       0x3100, 0xffffffff, 0xfffffffc,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2698, 0xffffffff, 0x00000100,
+       0x24a9, 0xffffffff, 0x00000100,
+       0x3059, 0xffffffff, 0x00000100,
+       0x25dd, 0xffffffff, 0x00000100,
+       0x2261, 0xffffffff, 0x06000100,
+       0x2286, 0xffffffff, 0x00000100,
+       0x24a8, 0xffffffff, 0x00000100,
+       0x30e0, 0xffffffff, 0x00000100,
+       0x22ca, 0xffffffff, 0x00000100,
+       0x2451, 0xffffffff, 0x00000100,
+       0x2362, 0xffffffff, 0x00000100,
+       0x2363, 0xffffffff, 0x00000100,
+       0x240c, 0xffffffff, 0x00000100,
+       0x240d, 0xffffffff, 0x00000100,
+       0x240e, 0xffffffff, 0x00000100,
+       0x240f, 0xffffffff, 0x00000100,
+       0x2b60, 0xffffffff, 0x00000100,
+       0x2b15, 0xffffffff, 0x00000100,
+       0x225f, 0xffffffff, 0x06000100,
+       0x261a, 0xffffffff, 0x00000100,
+       0x2544, 0xffffffff, 0x00000100,
+       0x2bc1, 0xffffffff, 0x00000100,
+       0x2b81, 0xffffffff, 0x00000100,
+       0x2527, 0xffffffff, 0x00000100,
+       0x200b, 0xffffffff, 0xe0000000,
+       0x2458, 0xffffffff, 0x00010000,
+       0x2459, 0xffffffff, 0x00030002,
+       0x245a, 0xffffffff, 0x00040007,
+       0x245b, 0xffffffff, 0x00060005,
+       0x245c, 0xffffffff, 0x00090008,
+       0x245d, 0xffffffff, 0x00020001,
+       0x245e, 0xffffffff, 0x00040003,
+       0x245f, 0xffffffff, 0x00000007,
+       0x2460, 0xffffffff, 0x00060005,
+       0x2461, 0xffffffff, 0x00090008,
+       0x2462, 0xffffffff, 0x00030002,
+       0x2463, 0xffffffff, 0x00050004,
+       0x2464, 0xffffffff, 0x00000008,
+       0x2465, 0xffffffff, 0x00070006,
+       0x2466, 0xffffffff, 0x000a0009,
+       0x2467, 0xffffffff, 0x00040003,
+       0x2468, 0xffffffff, 0x00060005,
+       0x2469, 0xffffffff, 0x00000009,
+       0x246a, 0xffffffff, 0x00080007,
+       0x246b, 0xffffffff, 0x000b000a,
+       0x246c, 0xffffffff, 0x00050004,
+       0x246d, 0xffffffff, 0x00070006,
+       0x246e, 0xffffffff, 0x0008000b,
+       0x246f, 0xffffffff, 0x000a0009,
+       0x2470, 0xffffffff, 0x000d000c,
+       0x2471, 0xffffffff, 0x00060005,
+       0x2472, 0xffffffff, 0x00080007,
+       0x2473, 0xffffffff, 0x0000000b,
+       0x2474, 0xffffffff, 0x000a0009,
+       0x2475, 0xffffffff, 0x000d000c,
+       0x2454, 0xffffffff, 0x96940200,
+       0x21c2, 0xffffffff, 0x00900100,
+       0x311e, 0xffffffff, 0x00000080,
+       0x3101, 0xffffffff, 0x0020003f,
+       0xc, 0xffffffff, 0x0000001c,
+       0xd, 0x000f0000, 0x000f0000,
+       0x583, 0xffffffff, 0x00000100,
+       0x409, 0xffffffff, 0x00000100,
+       0x82a, 0xffffffff, 0x00000104,
+       0x993, 0x000c0000, 0x000c0000,
+       0x992, 0x000c0000, 0x000c0000,
+       0xbd4, 0x00000001, 0x00000001,
+       0xc33, 0xc0000fff, 0x00000104,
+       0x3079, 0x00000001, 0x00000001,
+       0x3430, 0xfffffff0, 0x00000100,
+       0x3630, 0xfffffff0, 0x00000100
+};
+
+static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(AMDGPU_PCIE_INDEX, reg);
+       (void)RREG32(AMDGPU_PCIE_INDEX);
+       r = RREG32(AMDGPU_PCIE_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       return r;
+}
+
+static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(AMDGPU_PCIE_INDEX, reg);
+       (void)RREG32(AMDGPU_PCIE_INDEX);
+       WREG32(AMDGPU_PCIE_DATA, v);
+       (void)RREG32(AMDGPU_PCIE_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+       (void)RREG32(PCIE_PORT_INDEX);
+       r = RREG32(PCIE_PORT_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       return r;
+}
+
+void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+       (void)RREG32(PCIE_PORT_INDEX);
+       WREG32(PCIE_PORT_DATA, (v));
+       (void)RREG32(PCIE_PORT_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       WREG32(SMC_IND_INDEX_0, (reg));
+       r = RREG32(SMC_IND_DATA_0);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+       return r;
+}
+
+static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       WREG32(SMC_IND_INDEX_0, (reg));
+       WREG32(SMC_IND_DATA_0, (v));
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
+static u32 si_get_virtual_caps(struct amdgpu_device *adev)
+{
+       /* SI does not support SR-IOV */
+       return 0;
+}
+
+static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
+       {GRBM_STATUS, false},
+       {GB_ADDR_CONFIG, false},
+       {MC_ARB_RAMCFG, false},
+       {GB_TILE_MODE0, false},
+       {GB_TILE_MODE1, false},
+       {GB_TILE_MODE2, false},
+       {GB_TILE_MODE3, false},
+       {GB_TILE_MODE4, false},
+       {GB_TILE_MODE5, false},
+       {GB_TILE_MODE6, false},
+       {GB_TILE_MODE7, false},
+       {GB_TILE_MODE8, false},
+       {GB_TILE_MODE9, false},
+       {GB_TILE_MODE10, false},
+       {GB_TILE_MODE11, false},
+       {GB_TILE_MODE12, false},
+       {GB_TILE_MODE13, false},
+       {GB_TILE_MODE14, false},
+       {GB_TILE_MODE15, false},
+       {GB_TILE_MODE16, false},
+       {GB_TILE_MODE17, false},
+       {GB_TILE_MODE18, false},
+       {GB_TILE_MODE19, false},
+       {GB_TILE_MODE20, false},
+       {GB_TILE_MODE21, false},
+       {GB_TILE_MODE22, false},
+       {GB_TILE_MODE23, false},
+       {GB_TILE_MODE24, false},
+       {GB_TILE_MODE25, false},
+       {GB_TILE_MODE26, false},
+       {GB_TILE_MODE27, false},
+       {GB_TILE_MODE28, false},
+       {GB_TILE_MODE29, false},
+       {GB_TILE_MODE30, false},
+       {GB_TILE_MODE31, false},
+       {CC_RB_BACKEND_DISABLE, false, true},
+       {GC_USER_RB_BACKEND_DISABLE, false, true},
+       {PA_SC_RASTER_CONFIG, false, true},
+};
+
+static uint32_t si_read_indexed_register(struct amdgpu_device *adev,
+                                         u32 se_num, u32 sh_num,
+                                         u32 reg_offset)
+{
+       uint32_t val;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       if (se_num != 0xffffffff || sh_num != 0xffffffff)
+               amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+
+       val = RREG32(reg_offset);
+
+       if (se_num != 0xffffffff || sh_num != 0xffffffff)
+               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+       return val;
+}
+
+static int si_read_register(struct amdgpu_device *adev, u32 se_num,
+                            u32 sh_num, u32 reg_offset, u32 *value)
+{
+       uint32_t i;
+
+       *value = 0;
+       for (i = 0; i < ARRAY_SIZE(si_allowed_read_registers); i++) {
+               if (reg_offset != si_allowed_read_registers[i].reg_offset)
+                       continue;
+
+               if (!si_allowed_read_registers[i].untouched)
+                       *value = si_allowed_read_registers[i].grbm_indexed ?
+                                si_read_indexed_register(adev, se_num,
+                                                          sh_num, reg_offset) :
+                                RREG32(reg_offset);
+               return 0;
+       }
+       return -EINVAL;
+}
+
+static bool si_read_disabled_bios(struct amdgpu_device *adev)
+{
+       u32 bus_cntl;
+       u32 d1vga_control = 0;
+       u32 d2vga_control = 0;
+       u32 vga_render_control = 0;
+       u32 rom_cntl;
+       bool r;
+
+       bus_cntl = RREG32(R600_BUS_CNTL);
+       if (adev->mode_info.num_crtc) {
+               d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+               d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+               vga_render_control = RREG32(VGA_RENDER_CONTROL);
+       }
+       rom_cntl = RREG32(R600_ROM_CNTL);
+
+       /* enable the rom */
+       WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+       if (adev->mode_info.num_crtc) {
+               /* Disable VGA mode */
+               WREG32(AVIVO_D1VGA_CONTROL,
+                      (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+                                         AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+               WREG32(AVIVO_D2VGA_CONTROL,
+                      (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+                                         AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+               WREG32(VGA_RENDER_CONTROL,
+                      (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
+       }
+       WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+       r = amdgpu_read_bios(adev);
+
+       /* restore regs */
+       WREG32(R600_BUS_CNTL, bus_cntl);
+       if (adev->mode_info.num_crtc) {
+               WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+               WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+               WREG32(VGA_RENDER_CONTROL, vga_render_control);
+       }
+       WREG32(R600_ROM_CNTL, rom_cntl);
+       return r;
+}
+
+//xxx: not implemented
+static int si_asic_reset(struct amdgpu_device *adev)
+{
+       return 0;
+}
+
+static void si_vga_set_state(struct amdgpu_device *adev, bool state)
+{
+       uint32_t temp;
+
+       temp = RREG32(CONFIG_CNTL);
+       if (state == false) {
+               temp &= ~(1<<0);
+               temp |= (1<<1);
+       } else {
+               temp &= ~(1<<1);
+       }
+       WREG32(CONFIG_CNTL, temp);
+}
+
+static u32 si_get_xclk(struct amdgpu_device *adev)
+{
+        u32 reference_clock = adev->clock.spll.reference_freq;
+       u32 tmp;
+
+       tmp = RREG32(CG_CLKPIN_CNTL_2);
+       if (tmp & MUX_TCLK_TO_XCLK)
+               return TCLK;
+
+       tmp = RREG32(CG_CLKPIN_CNTL);
+       if (tmp & XTALIN_DIVIDE)
+               return reference_clock / 4;
+
+       return reference_clock;
+}
+
+//xxx:not implemented
+static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
+{
+       return 0;
+}
+
+static const struct amdgpu_asic_funcs si_asic_funcs =
+{
+       .read_disabled_bios = &si_read_disabled_bios,
+       .read_register = &si_read_register,
+       .reset = &si_asic_reset,
+       .set_vga_state = &si_vga_set_state,
+       .get_xclk = &si_get_xclk,
+       .set_uvd_clocks = &si_set_uvd_clocks,
+       .set_vce_clocks = NULL,
+       .get_virtual_caps = &si_get_virtual_caps,
+};
+
+static uint32_t si_get_rev_id(struct amdgpu_device *adev)
+{
+       return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+               >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
+}
+
+static int si_common_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       adev->smc_rreg = &si_smc_rreg;
+       adev->smc_wreg = &si_smc_wreg;
+       adev->pcie_rreg = &si_pcie_rreg;
+       adev->pcie_wreg = &si_pcie_wreg;
+       adev->pciep_rreg = &si_pciep_rreg;
+       adev->pciep_wreg = &si_pciep_wreg;
+       adev->uvd_ctx_rreg = NULL;
+       adev->uvd_ctx_wreg = NULL;
+       adev->didt_rreg = NULL;
+       adev->didt_wreg = NULL;
+
+       adev->asic_funcs = &si_asic_funcs;
+
+       adev->rev_id = si_get_rev_id(adev);
+       adev->external_rev_id = 0xFF;
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+               adev->cg_flags =
+                       AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_MGLS |
+                       /*AMD_CG_SUPPORT_GFX_CGCG |*/
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_CGTS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_VCE_MGCG |
+                       AMD_CG_SUPPORT_UVD_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG;
+                       adev->pg_flags = 0;
+               break;
+       case CHIP_PITCAIRN:
+               adev->cg_flags =
+                       AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_MGLS |
+                       /*AMD_CG_SUPPORT_GFX_CGCG |*/
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_CGTS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_GFX_RLC_LS |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_VCE_MGCG |
+                       AMD_CG_SUPPORT_UVD_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG;
+               adev->pg_flags = 0;
+               break;
+
+       case CHIP_VERDE:
+               adev->cg_flags =
+                       AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_MGLS |
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_CGTS |
+                       AMD_CG_SUPPORT_GFX_CGTS_LS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_SDMA_LS |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_VCE_MGCG |
+                       AMD_CG_SUPPORT_UVD_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG;
+               adev->pg_flags = 0;
+               //???
+               adev->external_rev_id = adev->rev_id + 0x14;
+               break;
+       case CHIP_OLAND:
+               adev->cg_flags =
+                       AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_MGLS |
+                       /*AMD_CG_SUPPORT_GFX_CGCG |*/
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_CGTS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_GFX_RLC_LS |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_UVD_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG;
+               adev->pg_flags = 0;
+               break;
+       case CHIP_HAINAN:
+               adev->cg_flags =
+                       AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_MGLS |
+                       /*AMD_CG_SUPPORT_GFX_CGCG |*/
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_CGTS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_GFX_RLC_LS |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG;
+               adev->pg_flags = 0;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int si_common_sw_init(void *handle)
+{
+       return 0;
+}
+
+static int si_common_sw_fini(void *handle)
+{
+       return 0;
+}
+
+
+static void si_init_golden_registers(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+               amdgpu_program_register_sequence(adev,
+                                                tahiti_golden_registers,
+                                                (const u32)ARRAY_SIZE(tahiti_golden_registers));
+               amdgpu_program_register_sequence(adev,
+                                                tahiti_golden_rlc_registers,
+                                                (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
+               amdgpu_program_register_sequence(adev,
+                                                tahiti_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                tahiti_golden_registers2,
+                                                (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+               break;
+       case CHIP_PITCAIRN:
+               amdgpu_program_register_sequence(adev,
+                                                pitcairn_golden_registers,
+                                                (const u32)ARRAY_SIZE(pitcairn_golden_registers));
+               amdgpu_program_register_sequence(adev,
+                                                pitcairn_golden_rlc_registers,
+                                                (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
+               amdgpu_program_register_sequence(adev,
+                                                pitcairn_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+       case CHIP_VERDE:
+               amdgpu_program_register_sequence(adev,
+                                                verde_golden_registers,
+                                                (const u32)ARRAY_SIZE(verde_golden_registers));
+               amdgpu_program_register_sequence(adev,
+                                                verde_golden_rlc_registers,
+                                                (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
+               amdgpu_program_register_sequence(adev,
+                                                verde_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                verde_pg_init,
+                                                (const u32)ARRAY_SIZE(verde_pg_init));
+               break;
+       case CHIP_OLAND:
+               amdgpu_program_register_sequence(adev,
+                                                oland_golden_registers,
+                                                (const u32)ARRAY_SIZE(oland_golden_registers));
+               amdgpu_program_register_sequence(adev,
+                                                oland_golden_rlc_registers,
+                                                (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
+               amdgpu_program_register_sequence(adev,
+                                                oland_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+       case CHIP_HAINAN:
+               amdgpu_program_register_sequence(adev,
+                                                hainan_golden_registers,
+                                                (const u32)ARRAY_SIZE(hainan_golden_registers));
+               amdgpu_program_register_sequence(adev,
+                                                hainan_golden_registers2,
+                                                (const u32)ARRAY_SIZE(hainan_golden_registers2));
+               amdgpu_program_register_sequence(adev,
+                                                hainan_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+               break;
+
+
+       default:
+               BUG();
+       }
+}
+
+static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+{
+       struct pci_dev *root = adev->pdev->bus->self;
+       int bridge_pos, gpu_pos;
+       u32 speed_cntl, mask, current_data_rate;
+       int ret, i;
+       u16 tmp16;
+
+       if (pci_is_root_bus(adev->pdev->bus))
+               return;
+
+       if (amdgpu_pcie_gen2 == 0)
+               return;
+
+       if (adev->flags & AMD_IS_APU)
+               return;
+
+       ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+       if (ret != 0)
+               return;
+
+       if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+               return;
+
+       speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+       current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
+               LC_CURRENT_DATA_RATE_SHIFT;
+       if (mask & DRM_PCIE_SPEED_80) {
+               if (current_data_rate == 2) {
+                       DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+                       return;
+               }
+               DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
+       } else if (mask & DRM_PCIE_SPEED_50) {
+               if (current_data_rate == 1) {
+                       DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+                       return;
+               }
+               DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
+       }
+
+       bridge_pos = pci_pcie_cap(root);
+       if (!bridge_pos)
+               return;
+
+       gpu_pos = pci_pcie_cap(adev->pdev);
+       if (!gpu_pos)
+               return;
+
+       if (mask & DRM_PCIE_SPEED_80) {
+               if (current_data_rate != 2) {
+                       u16 bridge_cfg, gpu_cfg;
+                       u16 bridge_cfg2, gpu_cfg2;
+                       u32 max_lw, current_lw, tmp;
+
+                       pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+                       pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+                       tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+                       pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+                       tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+                       pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+                       tmp = RREG32_PCIE(PCIE_LC_STATUS1);
+                       max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+                       current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+
+                       if (current_lw < max_lw) {
+                               tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+                               if (tmp & LC_RENEGOTIATION_SUPPORT) {
+                                       tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
+                                       tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
+                                       tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
+                                       WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+                               }
+                       }
+
+                       for (i = 0; i < 10; i++) {
+                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+                               if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+                                       break;
+
+                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+                               tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+                               tmp |= LC_SET_QUIESCE;
+                               WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+                               tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+                               tmp |= LC_REDO_EQ;
+                               WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+                               mdelay(100);
+
+                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+                               tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+                               tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+                               pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+                               tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+                               tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+                               pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+                               tmp16 &= ~((1 << 4) | (7 << 9));
+                               tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+                               pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+                               tmp16 &= ~((1 << 4) | (7 << 9));
+                               tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+                               pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+                               tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+                               tmp &= ~LC_SET_QUIESCE;
+                               WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+                       }
+               }
+       }
+
+       speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
+       speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
+       WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+       pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+       tmp16 &= ~0xf;
+       if (mask & DRM_PCIE_SPEED_80)
+               tmp16 |= 3;
+       else if (mask & DRM_PCIE_SPEED_50)
+               tmp16 |= 2;
+       else
+               tmp16 |= 1;
+       pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+       speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+       speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
+       WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+               if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+                       break;
+               udelay(1);
+       }
+}
+
+static inline u32 si_pif_phy0_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+       r = RREG32(EVERGREEN_PIF_PHY0_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       return r;
+}
+
+static inline void si_pif_phy0_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+       WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static inline u32 si_pif_phy1_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+       r = RREG32(EVERGREEN_PIF_PHY1_DATA);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       return r;
+}
+
+static inline void si_pif_phy1_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+       WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+static void si_program_aspm(struct amdgpu_device *adev)
+{
+       u32 data, orig;
+       bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+       bool disable_clkreq = false;
+
+       if (amdgpu_aspm == 0)
+               return;
+
+       if (adev->flags & AMD_IS_APU)
+               return;
+       orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+       data &= ~LC_XMIT_N_FTS_MASK;
+       data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+       if (orig != data)
+               WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+
+       orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
+       data |= LC_GO_TO_RECOVERY;
+       if (orig != data)
+               WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+
+       orig = data = RREG32_PCIE(PCIE_P_CNTL);
+       data |= P_IGNORE_EDB_ERR;
+       if (orig != data)
+               WREG32_PCIE(PCIE_P_CNTL, data);
+
+       orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+       data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
+       data |= LC_PMI_TO_L1_DIS;
+       if (!disable_l0s)
+               data |= LC_L0S_INACTIVITY(7);
+
+       if (!disable_l1) {
+               data |= LC_L1_INACTIVITY(7);
+               data &= ~LC_PMI_TO_L1_DIS;
+               if (orig != data)
+                       WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+
+               if (!disable_plloff_in_l1) {
+                       bool clk_req_support;
+
+                       orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+                       data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+                       data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+                       if (orig != data)
+                               si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+
+                       orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
+                       data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+                       data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+                       if (orig != data)
+                               si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+
+                       orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
+                       data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+                       data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+                       if (orig != data)
+                               si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+
+                       orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
+                       data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+                       data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+                       if (orig != data)
+                               si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+                       if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
+                               orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+                               data &= ~PLL_RAMP_UP_TIME_0_MASK;
+                               if (orig != data)
+                                       si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+
+                               orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
+                               data &= ~PLL_RAMP_UP_TIME_1_MASK;
+                               if (orig != data)
+                                       si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+
+                               orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
+                               data &= ~PLL_RAMP_UP_TIME_2_MASK;
+                               if (orig != data)
+                                       si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
+
+                               orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
+                               data &= ~PLL_RAMP_UP_TIME_3_MASK;
+                               if (orig != data)
+                                       si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
+
+                               orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
+                               data &= ~PLL_RAMP_UP_TIME_0_MASK;
+                               if (orig != data)
+                                       si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+
+                               orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
+                               data &= ~PLL_RAMP_UP_TIME_1_MASK;
+                               if (orig != data)
+                                       si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+                               orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
+                               data &= ~PLL_RAMP_UP_TIME_2_MASK;
+                               if (orig != data)
+                                       si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
+
+                               orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
+                               data &= ~PLL_RAMP_UP_TIME_3_MASK;
+                               if (orig != data)
+                                       si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
+                       }
+                       orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+                       data &= ~LC_DYN_LANES_PWR_STATE_MASK;
+                       data |= LC_DYN_LANES_PWR_STATE(3);
+                       if (orig != data)
+                               WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+
+                       orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
+                       data &= ~LS2_EXIT_TIME_MASK;
+                       if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+                               data |= LS2_EXIT_TIME(5);
+                       if (orig != data)
+                               si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+
+                       orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
+                       data &= ~LS2_EXIT_TIME_MASK;
+                       if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+                               data |= LS2_EXIT_TIME(5);
+                       if (orig != data)
+                               si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+
+                       if (!disable_clkreq &&
+                           !pci_is_root_bus(adev->pdev->bus)) {
+                               struct pci_dev *root = adev->pdev->bus->self;
+                               u32 lnkcap;
+
+                               clk_req_support = false;
+                               pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+                               if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+                                       clk_req_support = true;
+                       } else {
+                               clk_req_support = false;
+                       }
+
+                       if (clk_req_support) {
+                               orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
+                               data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+                               if (orig != data)
+                                       WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+
+                               orig = data = RREG32(THM_CLK_CNTL);
+                               data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
+                               data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+                               if (orig != data)
+                                       WREG32(THM_CLK_CNTL, data);
+
+                               orig = data = RREG32(MISC_CLK_CNTL);
+                               data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
+                               data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+                               if (orig != data)
+                                       WREG32(MISC_CLK_CNTL, data);
+
+                               orig = data = RREG32(CG_CLKPIN_CNTL);
+                               data &= ~BCLK_AS_XCLK;
+                               if (orig != data)
+                                       WREG32(CG_CLKPIN_CNTL, data);
+
+                               orig = data = RREG32(CG_CLKPIN_CNTL_2);
+                               data &= ~FORCE_BIF_REFCLK_EN;
+                               if (orig != data)
+                                       WREG32(CG_CLKPIN_CNTL_2, data);
+
+                               orig = data = RREG32(MPLL_BYPASSCLK_SEL);
+                               data &= ~MPLL_CLKOUT_SEL_MASK;
+                               data |= MPLL_CLKOUT_SEL(4);
+                               if (orig != data)
+                                       WREG32(MPLL_BYPASSCLK_SEL, data);
+
+                               orig = data = RREG32(SPLL_CNTL_MODE);
+                               data &= ~SPLL_REFCLK_SEL_MASK;
+                               if (orig != data)
+                                       WREG32(SPLL_CNTL_MODE, data);
+                       }
+               }
+       } else {
+               if (orig != data)
+                       WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+       }
+
+       orig = data = RREG32_PCIE(PCIE_CNTL2);
+       data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+       if (orig != data)
+               WREG32_PCIE(PCIE_CNTL2, data);
+
+       if (!disable_l0s) {
+               data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+               if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
+                       data = RREG32_PCIE(PCIE_LC_STATUS1);
+                       if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
+                               orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+                               data &= ~LC_L0S_INACTIVITY_MASK;
+                               if (orig != data)
+                                       WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+                       }
+               }
+       }
+}
+
+static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev)
+{
+       int readrq;
+       u16 v;
+
+       readrq = pcie_get_readrq(adev->pdev);
+       v = ffs(readrq) - 8;
+       if ((v == 0) || (v == 6) || (v == 7))
+               pcie_set_readrq(adev->pdev, 512);
+}
+
+static int si_common_hw_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       si_fix_pci_max_read_req_size(adev);
+       si_init_golden_registers(adev);
+       si_pcie_gen3_enable(adev);
+       si_program_aspm(adev);
+
+       return 0;
+}
+
+static int si_common_hw_fini(void *handle)
+{
+       return 0;
+}
+
+static int si_common_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_common_hw_fini(adev);
+}
+
+static int si_common_resume(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_common_hw_init(adev);
+}
+
+static bool si_common_is_idle(void *handle)
+{
+       return true;
+}
+
+static int si_common_wait_for_idle(void *handle)
+{
+       return 0;
+}
+
+static int si_common_soft_reset(void *handle)
+{
+       return 0;
+}
+
+static int si_common_set_clockgating_state(void *handle,
+                                           enum amd_clockgating_state state)
+{
+       return 0;
+}
+
+static int si_common_set_powergating_state(void *handle,
+                                           enum amd_powergating_state state)
+{
+       return 0;
+}
+
+const struct amd_ip_funcs si_common_ip_funcs = {
+       .name = "si_common",
+       .early_init = si_common_early_init,
+       .late_init = NULL,
+       .sw_init = si_common_sw_init,
+       .sw_fini = si_common_sw_fini,
+       .hw_init = si_common_hw_init,
+       .hw_fini = si_common_hw_fini,
+       .suspend = si_common_suspend,
+       .resume = si_common_resume,
+       .is_idle = si_common_is_idle,
+       .wait_for_idle = si_common_wait_for_idle,
+       .soft_reset = si_common_soft_reset,
+       .set_clockgating_state = si_common_set_clockgating_state,
+       .set_powergating_state = si_common_set_powergating_state,
+};
+
+static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+{
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_DCE,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &dce_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gfx_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_dma_ip_funcs,
+       },
+/*     {
+               .type = AMD_IP_BLOCK_TYPE_UVD,
+               .major = 3,
+               .minor = 1,
+               .rev = 0,
+               .funcs = &si_null_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_VCE,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_null_ip_funcs,
+       },
+       */
+};
+
+
+static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
+{
+       {
+               .type = AMD_IP_BLOCK_TYPE_COMMON,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_common_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GMC,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gmc_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_IH,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_ih_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SMC,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &amdgpu_pp_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_GFX,
+               .major = 6,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &gfx_v6_0_ip_funcs,
+       },
+       {
+               .type = AMD_IP_BLOCK_TYPE_SDMA,
+               .major = 1,
+               .minor = 0,
+               .rev = 0,
+               .funcs = &si_dma_ip_funcs,
+       },
+};
+
+int si_set_ip_blocks(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_VERDE:
+       case CHIP_TAHITI:
+       case CHIP_PITCAIRN:
+       case CHIP_OLAND:
+               adev->ip_blocks = verde_ip_blocks;
+               adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+               break;
+       case CHIP_HAINAN:
+               adev->ip_blocks = hainan_ip_blocks;
+               adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+               break;
+       default:
+               BUG();
+       }
+       return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h
new file mode 100644 (file)
index 0000000..959d7b6
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SI_H__
+#define __SI_H__
+
+extern const struct amd_ip_funcs si_common_ip_funcs;
+
+void si_srbm_select(struct amdgpu_device *adev,
+                    u32 me, u32 pipe, u32 queue, u32 vmid);
+int si_set_ip_blocks(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
new file mode 100644 (file)
index 0000000..de35819
--- /dev/null
@@ -0,0 +1,915 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+#include "si/sid.h"
+
+const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
+{
+       DMA0_REGISTER_OFFSET,
+       DMA1_REGISTER_OFFSET
+};
+
+static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
+static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
+static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
+
+static uint32_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
+{
+       return ring->adev->wb.wb[ring->rptr_offs>>2];
+}
+
+static uint32_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+       return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+}
+
+static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+       WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+}
+
+static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+                               struct amdgpu_ib *ib,
+                               unsigned vm_id, bool ctx_switch)
+{
+       /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+        * Pad as necessary with NOPs.
+        */
+       while ((ring->wptr & 7) != 5)
+               amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+       amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
+       amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+       amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL));
+       amdgpu_ring_write(ring, 1);
+}
+
+static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0));
+       amdgpu_ring_write(ring, 1);
+}
+
+/**
+ * si_dma_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @fence: amdgpu fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (VI).
+ */
+static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+                                     unsigned flags)
+{
+
+       bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+       /* write the fence */
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
+       amdgpu_ring_write(ring, addr & 0xfffffffc);
+       amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
+       amdgpu_ring_write(ring, seq);
+       /* optionally write high bits as well */
+       if (write64bit) {
+               addr += 4;
+               amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
+               amdgpu_ring_write(ring, addr & 0xfffffffc);
+               amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
+               amdgpu_ring_write(ring, upper_32_bits(seq));
+       }
+       /* generate an interrupt */
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
+}
+
+static void si_dma_stop(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       u32 rb_cntl;
+       unsigned i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
+               /* dma0 */
+               rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
+               rb_cntl &= ~DMA_RB_ENABLE;
+               WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+               if (adev->mman.buffer_funcs_ring == ring)
+                       amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+               ring->ready = false;
+       }
+}
+
+static int si_dma_start(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *ring;
+       u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
+       int i, r;
+       uint64_t rptr_addr;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
+
+               WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+               WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+
+               /* Set ring buffer size in dwords */
+               rb_bufsz = order_base_2(ring->ring_size / 4);
+               rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+               rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+               WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+               /* Initialize the ring buffer's read and write pointers */
+               WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
+               WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
+
+               rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+
+               WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
+               WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
+
+               rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+               WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+
+               /* enable DMA IBs */
+               ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+               ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+               WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+               dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
+               dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+               WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
+
+               ring->wptr = 0;
+               WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+               WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
+
+               ring->ready = true;
+
+               r = amdgpu_ring_test_ring(ring);
+               if (r) {
+                       ring->ready = false;
+                       return r;
+               }
+
+               if (adev->mman.buffer_funcs_ring == ring)
+                       amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+       }
+
+       return 0;
+}
+
+/**
+ * si_dma_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       unsigned i;
+       unsigned index;
+       int r;
+       u32 tmp;
+       u64 gpu_addr;
+
+       r = amdgpu_wb_get(adev, &index);
+       if (r) {
+               dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+               return r;
+       }
+
+       gpu_addr = adev->wb.gpu_addr + (index * 4);
+       tmp = 0xCAFEDEAD;
+       adev->wb.wb[index] = cpu_to_le32(tmp);
+
+       r = amdgpu_ring_alloc(ring, 4);
+       if (r) {
+               DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
+               amdgpu_wb_free(adev, index);
+               return r;
+       }
+
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
+       amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+       amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
+       amdgpu_ring_write(ring, 0xDEADBEEF);
+       amdgpu_ring_commit(ring);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = le32_to_cpu(adev->wb.wb[index]);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < adev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+                         ring->idx, tmp);
+               r = -EINVAL;
+       }
+       amdgpu_wb_free(adev, index);
+
+       return r;
+}
+
+/**
+ * si_dma_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (VI).
+ * Returns 0 on success, error on failure.
+ */
+static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_ib ib;
+       struct fence *f = NULL;
+       unsigned index;
+       u32 tmp = 0;
+       u64 gpu_addr;
+       long r;
+
+       r = amdgpu_wb_get(adev, &index);
+       if (r) {
+               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+               return r;
+       }
+
+       gpu_addr = adev->wb.gpu_addr + (index * 4);
+       tmp = 0xCAFEDEAD;
+       adev->wb.wb[index] = cpu_to_le32(tmp);
+       memset(&ib, 0, sizeof(ib));
+       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+               goto err0;
+       }
+
+       ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
+       ib.ptr[1] = lower_32_bits(gpu_addr);
+       ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
+       ib.ptr[3] = 0xDEADBEEF;
+       ib.length_dw = 4;
+       r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+       if (r)
+               goto err1;
+
+       r = fence_wait_timeout(f, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out\n");
+               r = -ETIMEDOUT;
+               goto err1;
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+               goto err1;
+       }
+       tmp = le32_to_cpu(adev->wb.wb[index]);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+               r = 0;
+       } else {
+               DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+               r = -EINVAL;
+       }
+
+err1:
+       amdgpu_ib_free(adev, &ib, NULL);
+       fence_put(f);
+err0:
+       amdgpu_wb_free(adev, index);
+       return r;
+}
+
+/**
+ * cik_dma_vm_copy_pte - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using DMA (SI).
+ */
+static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
+                              uint64_t pe, uint64_t src,
+                              unsigned count)
+{
+       unsigned bytes = count * 8;
+
+       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+                                             1, 0, 0, bytes);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+       ib->ptr[ib->length_dw++] = lower_32_bits(src);
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+       ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+}
+
+/**
+ * si_dma_vm_write_pte - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @value: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ *
+ * Update PTEs by writing them manually using DMA (SI).
+ */
+static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+                               uint64_t value, unsigned count,
+                               uint32_t incr)
+{
+       unsigned ndw = count * 2;
+
+       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+       ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+       ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+       for (; ndw > 0; ndw -= 2) {
+               ib->ptr[ib->length_dw++] = lower_32_bits(value);
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               value += incr;
+       }
+}
+
+/**
+ * si_dma_vm_set_pte_pde - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
+                                    uint64_t pe,
+                                    uint64_t addr, unsigned count,
+                                    uint32_t incr, uint32_t flags)
+{
+       uint64_t value;
+       unsigned ndw;
+
+       while (count) {
+               ndw = count * 2;
+               if (ndw > 0xFFFFE)
+                       ndw = 0xFFFFE;
+
+               if (flags & AMDGPU_PTE_VALID)
+                       value = addr;
+               else
+                       value = 0;
+
+               /* for physically contiguous pages (vram) */
+               ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+               ib->ptr[ib->length_dw++] = pe; /* dst addr */
+               ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+               ib->ptr[ib->length_dw++] = flags; /* mask */
+               ib->ptr[ib->length_dw++] = 0;
+               ib->ptr[ib->length_dw++] = value; /* value */
+               ib->ptr[ib->length_dw++] = upper_32_bits(value);
+               ib->ptr[ib->length_dw++] = incr; /* increment size */
+               ib->ptr[ib->length_dw++] = 0;
+               pe += ndw * 4;
+               addr += (ndw / 2) * incr;
+               count -= ndw / 2;
+       }
+}
+
+/**
+ * si_dma_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+{
+       while (ib->length_dw & 0x7)
+               ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+/**
+ * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+       uint32_t seq = ring->fence_drv.sync_seq;
+       uint64_t addr = ring->fence_drv.gpu_addr;
+
+       /* wait for idle */
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
+                         (1 << 27)); /* Poll memory */
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+       amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
+       amdgpu_ring_write(ring, 0xffffffff); /* mask */
+       amdgpu_ring_write(ring, seq); /* value */
+       amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
+}
+
+/**
+ * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vm: amdgpu_vm pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (VI).
+ */
+static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
+                                     unsigned vm_id, uint64_t pd_addr)
+{
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       if (vm_id < 8)
+               amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+       else
+               amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+       amdgpu_ring_write(ring, pd_addr >> 12);
+
+       /* bits 0-7 are the VM contexts0-7 */
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
+       amdgpu_ring_write(ring, 1 << vm_id);
+
+       /* wait for invalidate to complete */
+       amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+       amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+       amdgpu_ring_write(ring, 0xff << 16); /* retry */
+       amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+       amdgpu_ring_write(ring, 0); /* value */
+       amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
+}
+
+static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               7 + 3; /* si_dma_ring_emit_ib */
+}
+
+static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               3 + /* si_dma_ring_emit_hdp_flush */
+               3 + /* si_dma_ring_emit_hdp_invalidate */
+               6 + /* si_dma_ring_emit_pipeline_sync */
+               12 + /* si_dma_ring_emit_vm_flush */
+               9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+}
+
+static int si_dma_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       adev->sdma.num_instances = 2;
+
+       si_dma_set_ring_funcs(adev);
+       si_dma_set_buffer_funcs(adev);
+       si_dma_set_vm_pte_funcs(adev);
+       si_dma_set_irq_funcs(adev);
+
+       return 0;
+}
+
+static int si_dma_sw_init(void *handle)
+{
+       struct amdgpu_ring *ring;
+       int r, i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       /* DMA0 trap event */
+       r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+       if (r)
+               return r;
+
+       /* DMA1 trap event */
+       r = amdgpu_irq_add_id(adev, 244, &adev->sdma.trap_irq_1);
+       if (r)
+               return r;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
+               ring->ring_obj = NULL;
+               ring->use_doorbell = false;
+               sprintf(ring->name, "sdma%d", i);
+               r = amdgpu_ring_init(adev, ring, 1024,
+                                    DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
+                                    &adev->sdma.trap_irq,
+                                    (i == 0) ?
+                                    AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
+                                    AMDGPU_RING_TYPE_SDMA);
+               if (r)
+                       return r;
+       }
+
+       return r;
+}
+
+static int si_dma_sw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+
+       return 0;
+}
+
+static int si_dma_hw_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_dma_start(adev);
+}
+
+static int si_dma_hw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       si_dma_stop(adev);
+
+       return 0;
+}
+
+static int si_dma_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_dma_hw_fini(adev);
+}
+
+static int si_dma_resume(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_dma_hw_init(adev);
+}
+
+static bool si_dma_is_idle(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 tmp = RREG32(SRBM_STATUS2);
+
+       if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
+           return false;
+
+       return true;
+}
+
+static int si_dma_wait_for_idle(void *handle)
+{
+       unsigned i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (si_dma_is_idle(handle))
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+}
+
+static int si_dma_soft_reset(void *handle)
+{
+       DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
+       return 0;
+}
+
+static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *src,
+                                       unsigned type,
+                                       enum amdgpu_interrupt_state state)
+{
+       u32 sdma_cntl;
+
+       switch (type) {
+       case AMDGPU_SDMA_IRQ_TRAP0:
+               switch (state) {
+               case AMDGPU_IRQ_STATE_DISABLE:
+                       sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
+                       sdma_cntl &= ~TRAP_ENABLE;
+                       WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               case AMDGPU_IRQ_STATE_ENABLE:
+                       sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
+                       sdma_cntl |= TRAP_ENABLE;
+                       WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       case AMDGPU_SDMA_IRQ_TRAP1:
+               switch (state) {
+               case AMDGPU_IRQ_STATE_DISABLE:
+                       sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
+                       sdma_cntl &= ~TRAP_ENABLE;
+                       WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               case AMDGPU_IRQ_STATE_ENABLE:
+                       sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
+                       sdma_cntl |= TRAP_ENABLE;
+                       WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int si_dma_process_trap_irq(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     struct amdgpu_iv_entry *entry)
+{
+       amdgpu_fence_process(&adev->sdma.instance[0].ring);
+
+       return 0;
+}
+
+static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     struct amdgpu_iv_entry *entry)
+{
+       amdgpu_fence_process(&adev->sdma.instance[1].ring);
+
+       return 0;
+}
+
+static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
+                                             struct amdgpu_irq_src *source,
+                                             struct amdgpu_iv_entry *entry)
+{
+       DRM_ERROR("Illegal instruction in SDMA command stream\n");
+       schedule_work(&adev->reset_work);
+       return 0;
+}
+
+static int si_dma_set_clockgating_state(void *handle,
+                                         enum amd_clockgating_state state)
+{
+       u32 orig, data, offset;
+       int i;
+       bool enable;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       if (i == 0)
+                               offset = DMA0_REGISTER_OFFSET;
+                       else
+                               offset = DMA1_REGISTER_OFFSET;
+                       orig = data = RREG32(DMA_POWER_CNTL + offset);
+                       data &= ~MEM_POWER_OVERRIDE;
+                       if (data != orig)
+                               WREG32(DMA_POWER_CNTL + offset, data);
+                       WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+               }
+       } else {
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       if (i == 0)
+                               offset = DMA0_REGISTER_OFFSET;
+                       else
+                               offset = DMA1_REGISTER_OFFSET;
+                       orig = data = RREG32(DMA_POWER_CNTL + offset);
+                       data |= MEM_POWER_OVERRIDE;
+                       if (data != orig)
+                               WREG32(DMA_POWER_CNTL + offset, data);
+
+                       orig = data = RREG32(DMA_CLK_CTRL + offset);
+                       data = 0xff000000;
+                       if (data != orig)
+                               WREG32(DMA_CLK_CTRL + offset, data);
+               }
+       }
+
+       return 0;
+}
+
+static int si_dma_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state)
+{
+       u32 tmp;
+
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       WREG32(DMA_PGFSM_WRITE,  0x00002000);
+       WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
+
+       for (tmp = 0; tmp < 5; tmp++)
+               WREG32(DMA_PGFSM_WRITE, 0);
+
+       return 0;
+}
+
+const struct amd_ip_funcs si_dma_ip_funcs = {
+       .name = "si_dma",
+       .early_init = si_dma_early_init,
+       .late_init = NULL,
+       .sw_init = si_dma_sw_init,
+       .sw_fini = si_dma_sw_fini,
+       .hw_init = si_dma_hw_init,
+       .hw_fini = si_dma_hw_fini,
+       .suspend = si_dma_suspend,
+       .resume = si_dma_resume,
+       .is_idle = si_dma_is_idle,
+       .wait_for_idle = si_dma_wait_for_idle,
+       .soft_reset = si_dma_soft_reset,
+       .set_clockgating_state = si_dma_set_clockgating_state,
+       .set_powergating_state = si_dma_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+       .get_rptr = si_dma_ring_get_rptr,
+       .get_wptr = si_dma_ring_get_wptr,
+       .set_wptr = si_dma_ring_set_wptr,
+       .parse_cs = NULL,
+       .emit_ib = si_dma_ring_emit_ib,
+       .emit_fence = si_dma_ring_emit_fence,
+       .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
+       .emit_vm_flush = si_dma_ring_emit_vm_flush,
+       .emit_hdp_flush = si_dma_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate,
+       .test_ring = si_dma_ring_test_ring,
+       .test_ib = si_dma_ring_test_ib,
+       .insert_nop = amdgpu_ring_insert_nop,
+       .pad_ib = si_dma_ring_pad_ib,
+       .get_emit_ib_size = si_dma_ring_get_emit_ib_size,
+       .get_dma_frame_size = si_dma_ring_get_dma_frame_size,
+};
+
+static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
+       .set = si_dma_set_trap_irq_state,
+       .process = si_dma_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = {
+       .set = si_dma_set_trap_irq_state,
+       .process = si_dma_process_trap_irq_1,
+};
+
+static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
+       .process = si_dma_process_illegal_inst_irq,
+};
+
+static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+       adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
+       adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1;
+       adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
+}
+
+/**
+ * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Copy GPU buffers using the DMA engine (VI).
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
+                                      uint64_t src_offset,
+                                      uint64_t dst_offset,
+                                      uint32_t byte_count)
+{
+       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+                                             1, 0, 0, byte_count);
+       ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+       ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+       ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
+       ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
+}
+
+/**
+ * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine (VI).
+ */
+static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
+                                      uint32_t src_data,
+                                      uint64_t dst_offset,
+                                      uint32_t byte_count)
+{
+       ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
+                                             0, 0, 0, byte_count / 4);
+       ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+       ib->ptr[ib->length_dw++] = src_data;
+       ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
+}
+
+
+static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
+       .copy_max_bytes = 0xffff8,
+       .copy_num_dw = 5,
+       .emit_copy_buffer = si_dma_emit_copy_buffer,
+
+       .fill_max_bytes = 0xffff8,
+       .fill_num_dw = 4,
+       .emit_fill_buffer = si_dma_emit_fill_buffer,
+};
+
+static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
+{
+       if (adev->mman.buffer_funcs == NULL) {
+               adev->mman.buffer_funcs = &si_dma_buffer_funcs;
+               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+       }
+}
+
+static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
+       .copy_pte = si_dma_vm_copy_pte,
+       .write_pte = si_dma_vm_write_pte,
+       .set_pte_pde = si_dma_vm_set_pte_pde,
+};
+
+static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+       unsigned i;
+
+       if (adev->vm_manager.vm_pte_funcs == NULL) {
+               adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
+               for (i = 0; i < adev->sdma.num_instances; i++)
+                       adev->vm_manager.vm_pte_rings[i] =
+                               &adev->sdma.instance[i].ring;
+
+               adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+       }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
new file mode 100644 (file)
index 0000000..3a3e0c7
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SI_DMA_H__
+#define __SI_DMA_H__
+
+extern const struct amd_ip_funcs si_dma_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
new file mode 100644 (file)
index 0000000..e2db4a7
--- /dev/null
@@ -0,0 +1,7993 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_atombios.h"
+#include "si/sid.h"
+#include "r600_dpm.h"
+#include "si_dpm.h"
+#include "atom.h"
+#include "../include/pptable.h"
+#include <linux/math64.h>
+#include <linux/seq_file.h>
+#include <linux/firmware.h>
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define SMC_RAM_END                 0x20000
+
+#define SCLK_MIN_DEEPSLEEP_FREQ     1350
+
+
+/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
+
+#define BIOS_SCRATCH_4                                    0x5cd
+
+MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
+MODULE_FIRMWARE("radeon/verde_smc.bin");
+MODULE_FIRMWARE("radeon/verde_k_smc.bin");
+MODULE_FIRMWARE("radeon/oland_smc.bin");
+MODULE_FIRMWARE("radeon/oland_k_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+
+union power_info {
+       struct _ATOM_POWERPLAY_INFO info;
+       struct _ATOM_POWERPLAY_INFO_V2 info_2;
+       struct _ATOM_POWERPLAY_INFO_V3 info_3;
+       struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+       struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+       struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+       struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+       struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+union fan_info {
+       struct _ATOM_PPLIB_FANTABLE fan;
+       struct _ATOM_PPLIB_FANTABLE2 fan2;
+       struct _ATOM_PPLIB_FANTABLE3 fan3;
+};
+
+union pplib_clock_info {
+       struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+       struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+       struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+       struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+       struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+};
+
+static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
+{
+       R600_UTC_DFLT_00,
+       R600_UTC_DFLT_01,
+       R600_UTC_DFLT_02,
+       R600_UTC_DFLT_03,
+       R600_UTC_DFLT_04,
+       R600_UTC_DFLT_05,
+       R600_UTC_DFLT_06,
+       R600_UTC_DFLT_07,
+       R600_UTC_DFLT_08,
+       R600_UTC_DFLT_09,
+       R600_UTC_DFLT_10,
+       R600_UTC_DFLT_11,
+       R600_UTC_DFLT_12,
+       R600_UTC_DFLT_13,
+       R600_UTC_DFLT_14,
+};
+
+static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
+{
+       R600_DTC_DFLT_00,
+       R600_DTC_DFLT_01,
+       R600_DTC_DFLT_02,
+       R600_DTC_DFLT_03,
+       R600_DTC_DFLT_04,
+       R600_DTC_DFLT_05,
+       R600_DTC_DFLT_06,
+       R600_DTC_DFLT_07,
+       R600_DTC_DFLT_08,
+       R600_DTC_DFLT_09,
+       R600_DTC_DFLT_10,
+       R600_DTC_DFLT_11,
+       R600_DTC_DFLT_12,
+       R600_DTC_DFLT_13,
+       R600_DTC_DFLT_14,
+};
+
+static const struct si_cac_config_reg cac_weights_tahiti[] =
+{
+       { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_tahiti[] =
+{
+       { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+       { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+
+};
+
+static const struct si_cac_config_reg cac_override_tahiti[] =
+{
+       { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_tahiti =
+{
+       ((1 << 16) | 27027),
+       6,
+       0,
+       4,
+       95,
+       {
+               0UL,
+               0UL,
+               4521550UL,
+               309631529UL,
+               -1270850L,
+               4513710L,
+               40
+       },
+       595000000UL,
+       12,
+       {
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0
+       },
+       true
+};
+
+static const struct si_dte_data dte_data_tahiti =
+{
+       { 1159409, 0, 0, 0, 0 },
+       { 777, 0, 0, 0, 0 },
+       2,
+       54000,
+       127000,
+       25,
+       2,
+       10,
+       13,
+       { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
+       { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
+       { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
+       85,
+       false
+};
+
+#if 0
+static const struct si_dte_data dte_data_tahiti_le =
+{
+       { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
+       { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
+       0x5,
+       0xAFC8,
+       0x64,
+       0x32,
+       1,
+       0,
+       0x10,
+       { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
+       { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
+       { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
+       85,
+       true
+};
+#endif
+
+static const struct si_dte_data dte_data_tahiti_pro =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       45000,
+       100,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_new_zealand =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
+       { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
+       0x5,
+       0xAFC8,
+       0x69,
+       0x32,
+       1,
+       0,
+       0x10,
+       { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
+       85,
+       true
+};
+
+static const struct si_dte_data dte_data_aruba_pro =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       45000,
+       100,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_malta =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       45000,
+       100,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_cac_config_reg cac_weights_pitcairn[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_pitcairn[] =
+{
+       { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_pitcairn[] =
+{
+    { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_pitcairn =
+{
+       ((1 << 16) | 27027),
+       5,
+       0,
+       6,
+       100,
+       {
+               51600000UL,
+               1800000UL,
+               7194395UL,
+               309631529UL,
+               -1270850L,
+               4513710L,
+               100
+       },
+       117830498UL,
+       12,
+       {
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0
+       },
+       true
+};
+
+static const struct si_dte_data dte_data_pitcairn =
+{
+       { 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0 },
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       0,
+       false
+};
+
+static const struct si_dte_data dte_data_curacao_xt =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       45000,
+       100,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_curacao_pro =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       45000,
+       100,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_neptune_xt =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       45000,
+       100,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_heathrow[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_cape_verde[] =
+{
+       { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_cape_verde[] =
+{
+    { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_cape_verde =
+{
+       ((1 << 16) | 0x6993),
+       5,
+       0,
+       7,
+       105,
+       {
+               0UL,
+               0UL,
+               7194395UL,
+               309631529UL,
+               -1270850L,
+               4513710L,
+               100
+       },
+       117830498UL,
+       12,
+       {
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0
+       },
+       true
+};
+
+static const struct si_dte_data dte_data_cape_verde =
+{
+       { 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0 },
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       0,
+       false
+};
+
+static const struct si_dte_data dte_data_venus_xtx =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
+       5,
+       55000,
+       0x69,
+       0xA,
+       1,
+       0,
+       0x3,
+       { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_venus_xt =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
+       5,
+       55000,
+       0x69,
+       0xA,
+       1,
+       0,
+       0x3,
+       { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_venus_pro =
+{
+       {  0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
+       5,
+       55000,
+       0x69,
+       0xA,
+       1,
+       0,
+       0x3,
+       { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_cac_config_reg cac_weights_oland[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_pro[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_xt[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_pro[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_xt[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_oland[] =
+{
+       { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_mars_pro[] =
+{
+       { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+       { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+       { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_oland[] =
+{
+       { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_oland =
+{
+       ((1 << 16) | 0x6993),
+       5,
+       0,
+       7,
+       105,
+       {
+               0UL,
+               0UL,
+               7194395UL,
+               309631529UL,
+               -1270850L,
+               4513710L,
+               100
+       },
+       117830498UL,
+       12,
+       {
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0
+       },
+       true
+};
+
+static const struct si_powertune_data powertune_data_mars_pro =
+{
+       ((1 << 16) | 0x6993),
+       5,
+       0,
+       7,
+       105,
+       {
+               0UL,
+               0UL,
+               7194395UL,
+               309631529UL,
+               -1270850L,
+               4513710L,
+               100
+       },
+       117830498UL,
+       12,
+       {
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0
+       },
+       true
+};
+
+static const struct si_dte_data dte_data_oland =
+{
+       { 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0 },
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+       0,
+       false
+};
+
+static const struct si_dte_data dte_data_mars_pro =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       55000,
+       105,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+static const struct si_dte_data dte_data_sun_xt =
+{
+       { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+       { 0x0, 0x0, 0x0, 0x0, 0x0 },
+       5,
+       55000,
+       105,
+       0xA,
+       1,
+       0,
+       0x10,
+       { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+       { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+       { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+       90,
+       true
+};
+
+
+static const struct si_cac_config_reg cac_weights_hainan[] =
+{
+       { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
+       { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
+       { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
+       { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
+       { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
+       { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
+       { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
+       { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
+       { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
+       { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
+       { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
+       { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
+       { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
+       { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
+       { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
+       { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
+       { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+       { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
+       { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+       { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
+       { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_hainan =
+{
+       ((1 << 16) | 0x6993),
+       5,
+       0,
+       9,
+       105,
+       {
+               0UL,
+               0UL,
+               7194395UL,
+               309631529UL,
+               -1270850L,
+               4513710L,
+               100
+       },
+       117830498UL,
+       12,
+       {
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0,
+               0
+       },
+       true
+};
+
+static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev);
+static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev);
+static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev);
+static struct  si_ps *si_get_ps(struct amdgpu_ps *rps);
+
+static int si_populate_voltage_value(struct amdgpu_device *adev,
+                                    const struct atom_voltage_table *table,
+                                    u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
+static int si_get_std_voltage_value(struct amdgpu_device *adev,
+                                   SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+                                   u16 *std_voltage);
+static int si_write_smc_soft_register(struct amdgpu_device *adev,
+                                     u16 reg_offset, u32 value);
+static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
+                                        struct rv7xx_pl *pl,
+                                        SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
+static int si_calculate_sclk_params(struct amdgpu_device *adev,
+                                   u32 engine_clock,
+                                   SISLANDS_SMC_SCLK_VALUE *sclk);
+
+static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
+static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
+static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev);
+static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
+
+static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
+{
+       struct si_power_info *pi = adev->pm.dpm.priv;
+       return pi;
+}
+
+static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
+                                                    u16 v, s32 t, u32 ileakage, u32 *leakage)
+{
+       s64 kt, kv, leakage_w, i_leakage, vddc;
+       s64 temperature, t_slope, t_intercept, av, bv, t_ref;
+       s64 tmp;
+
+       i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+       vddc = div64_s64(drm_int2fixp(v), 1000);
+       temperature = div64_s64(drm_int2fixp(t), 1000);
+
+       t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
+       t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
+       av = div64_s64(drm_int2fixp(coeff->av), 100000000);
+       bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
+       t_ref = drm_int2fixp(coeff->t_ref);
+
+       tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
+       kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
+       kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
+       kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
+
+       leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+       *leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev,
+                                            const struct ni_leakage_coeffients *coeff,
+                                            u16 v,
+                                            s32 t,
+                                            u32 i_leakage,
+                                            u32 *leakage)
+{
+       si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
+}
+
+static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
+                                              const u32 fixed_kt, u16 v,
+                                              u32 ileakage, u32 *leakage)
+{
+       s64 kt, kv, leakage_w, i_leakage, vddc;
+
+       i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+       vddc = div64_s64(drm_int2fixp(v), 1000);
+
+       kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
+       kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
+                         drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
+
+       leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+       *leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v(struct amdgpu_device *adev,
+                                      const struct ni_leakage_coeffients *coeff,
+                                      const u32 fixed_kt,
+                                      u16 v,
+                                      u32 i_leakage,
+                                      u32 *leakage)
+{
+       si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
+}
+
+
+static void si_update_dte_from_pl2(struct amdgpu_device *adev,
+                                  struct si_dte_data *dte_data)
+{
+       u32 p_limit1 = adev->pm.dpm.tdp_limit;
+       u32 p_limit2 = adev->pm.dpm.near_tdp_limit;
+       u32 k = dte_data->k;
+       u32 t_max = dte_data->max_t;
+       u32 t_split[5] = { 10, 15, 20, 25, 30 };
+       u32 t_0 = dte_data->t0;
+       u32 i;
+
+       if (p_limit2 != 0 && p_limit2 <= p_limit1) {
+               dte_data->tdep_count = 3;
+
+               for (i = 0; i < k; i++) {
+                       dte_data->r[i] =
+                               (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
+                               (p_limit2  * (u32)100);
+               }
+
+               dte_data->tdep_r[1] = dte_data->r[4] * 2;
+
+               for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
+                       dte_data->tdep_r[i] = dte_data->r[4];
+               }
+       } else {
+               DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
+       }
+}
+
+static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = adev->pm.dpm.priv;
+
+       return pi;
+}
+
+static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
+{
+       struct ni_power_info *pi = adev->pm.dpm.priv;
+
+       return pi;
+}
+
+static struct si_ps *si_get_ps(struct amdgpu_ps *aps)
+{
+       struct  si_ps *ps = aps->ps_priv;
+
+       return ps;
+}
+
+static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       bool update_dte_from_pl2 = false;
+
+       if (adev->asic_type == CHIP_TAHITI) {
+               si_pi->cac_weights = cac_weights_tahiti;
+               si_pi->lcac_config = lcac_tahiti;
+               si_pi->cac_override = cac_override_tahiti;
+               si_pi->powertune_data = &powertune_data_tahiti;
+               si_pi->dte_data = dte_data_tahiti;
+
+               switch (adev->pdev->device) {
+               case 0x6798:
+                       si_pi->dte_data.enable_dte_by_default = true;
+                       break;
+               case 0x6799:
+                       si_pi->dte_data = dte_data_new_zealand;
+                       break;
+               case 0x6790:
+               case 0x6791:
+               case 0x6792:
+               case 0x679E:
+                       si_pi->dte_data = dte_data_aruba_pro;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x679B:
+                       si_pi->dte_data = dte_data_malta;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x679A:
+                       si_pi->dte_data = dte_data_tahiti_pro;
+                       update_dte_from_pl2 = true;
+                       break;
+               default:
+                       if (si_pi->dte_data.enable_dte_by_default == true)
+                               DRM_ERROR("DTE is not enabled!\n");
+                       break;
+               }
+       } else if (adev->asic_type == CHIP_PITCAIRN) {
+               si_pi->cac_weights = cac_weights_pitcairn;
+               si_pi->lcac_config = lcac_pitcairn;
+               si_pi->cac_override = cac_override_pitcairn;
+               si_pi->powertune_data = &powertune_data_pitcairn;
+
+               switch (adev->pdev->device) {
+               case 0x6810:
+               case 0x6818:
+                       si_pi->dte_data = dte_data_curacao_xt;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x6819:
+               case 0x6811:
+                       si_pi->dte_data = dte_data_curacao_pro;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x6800:
+               case 0x6806:
+                       si_pi->dte_data = dte_data_neptune_xt;
+                       update_dte_from_pl2 = true;
+                       break;
+               default:
+                       si_pi->dte_data = dte_data_pitcairn;
+                       break;
+               }
+       } else if (adev->asic_type == CHIP_VERDE) {
+               si_pi->lcac_config = lcac_cape_verde;
+               si_pi->cac_override = cac_override_cape_verde;
+               si_pi->powertune_data = &powertune_data_cape_verde;
+
+               switch (adev->pdev->device) {
+               case 0x683B:
+               case 0x683F:
+               case 0x6829:
+               case 0x6835:
+                       si_pi->cac_weights = cac_weights_cape_verde_pro;
+                       si_pi->dte_data = dte_data_cape_verde;
+                       break;
+               case 0x682C:
+                       si_pi->cac_weights = cac_weights_cape_verde_pro;
+                       si_pi->dte_data = dte_data_sun_xt;
+                       break;
+               case 0x6825:
+               case 0x6827:
+                       si_pi->cac_weights = cac_weights_heathrow;
+                       si_pi->dte_data = dte_data_cape_verde;
+                       break;
+               case 0x6824:
+               case 0x682D:
+                       si_pi->cac_weights = cac_weights_chelsea_xt;
+                       si_pi->dte_data = dte_data_cape_verde;
+                       break;
+               case 0x682F:
+                       si_pi->cac_weights = cac_weights_chelsea_pro;
+                       si_pi->dte_data = dte_data_cape_verde;
+                       break;
+               case 0x6820:
+                       si_pi->cac_weights = cac_weights_heathrow;
+                       si_pi->dte_data = dte_data_venus_xtx;
+                       break;
+               case 0x6821:
+                       si_pi->cac_weights = cac_weights_heathrow;
+                       si_pi->dte_data = dte_data_venus_xt;
+                       break;
+               case 0x6823:
+               case 0x682B:
+               case 0x6822:
+               case 0x682A:
+                       si_pi->cac_weights = cac_weights_chelsea_pro;
+                       si_pi->dte_data = dte_data_venus_pro;
+                       break;
+               default:
+                       si_pi->cac_weights = cac_weights_cape_verde;
+                       si_pi->dte_data = dte_data_cape_verde;
+                       break;
+               }
+       } else if (adev->asic_type == CHIP_OLAND) {
+               si_pi->lcac_config = lcac_mars_pro;
+               si_pi->cac_override = cac_override_oland;
+               si_pi->powertune_data = &powertune_data_mars_pro;
+               si_pi->dte_data = dte_data_mars_pro;
+
+               switch (adev->pdev->device) {
+               case 0x6601:
+               case 0x6621:
+               case 0x6603:
+               case 0x6605:
+                       si_pi->cac_weights = cac_weights_mars_pro;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x6600:
+               case 0x6606:
+               case 0x6620:
+               case 0x6604:
+                       si_pi->cac_weights = cac_weights_mars_xt;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x6611:
+               case 0x6613:
+               case 0x6608:
+                       si_pi->cac_weights = cac_weights_oland_pro;
+                       update_dte_from_pl2 = true;
+                       break;
+               case 0x6610:
+                       si_pi->cac_weights = cac_weights_oland_xt;
+                       update_dte_from_pl2 = true;
+                       break;
+               default:
+                       si_pi->cac_weights = cac_weights_oland;
+                       si_pi->lcac_config = lcac_oland;
+                       si_pi->cac_override = cac_override_oland;
+                       si_pi->powertune_data = &powertune_data_oland;
+                       si_pi->dte_data = dte_data_oland;
+                       break;
+               }
+       } else if (adev->asic_type == CHIP_HAINAN) {
+               si_pi->cac_weights = cac_weights_hainan;
+               si_pi->lcac_config = lcac_oland;
+               si_pi->cac_override = cac_override_oland;
+               si_pi->powertune_data = &powertune_data_hainan;
+               si_pi->dte_data = dte_data_sun_xt;
+               update_dte_from_pl2 = true;
+       } else {
+               DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
+               return;
+       }
+
+       ni_pi->enable_power_containment = false;
+       ni_pi->enable_cac = false;
+       ni_pi->enable_sq_ramping = false;
+       si_pi->enable_dte = false;
+
+       if (si_pi->powertune_data->enable_powertune_by_default) {
+               ni_pi->enable_power_containment = true;
+               ni_pi->enable_cac = true;
+               if (si_pi->dte_data.enable_dte_by_default) {
+                       si_pi->enable_dte = true;
+                       if (update_dte_from_pl2)
+                               si_update_dte_from_pl2(adev, &si_pi->dte_data);
+
+               }
+               ni_pi->enable_sq_ramping = true;
+       }
+
+       ni_pi->driver_calculate_cac_leakage = true;
+       ni_pi->cac_configuration_required = true;
+
+       if (ni_pi->cac_configuration_required) {
+               ni_pi->support_cac_long_term_average = true;
+               si_pi->dyn_powertune_data.l2_lta_window_size =
+                       si_pi->powertune_data->l2_lta_window_size_default;
+               si_pi->dyn_powertune_data.lts_truncate =
+                       si_pi->powertune_data->lts_truncate_default;
+       } else {
+               ni_pi->support_cac_long_term_average = false;
+               si_pi->dyn_powertune_data.l2_lta_window_size = 0;
+               si_pi->dyn_powertune_data.lts_truncate = 0;
+       }
+
+       si_pi->dyn_powertune_data.disable_uvd_powertune = false;
+}
+
+static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev)
+{
+       return 1;
+}
+
+static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
+{
+       u32 xclk;
+       u32 wintime;
+       u32 cac_window;
+       u32 cac_window_size;
+
+       xclk = amdgpu_asic_get_xclk(adev);
+
+       if (xclk == 0)
+               return 0;
+
+       cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
+       cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
+
+       wintime = (cac_window_size * 100) / xclk;
+
+       return wintime;
+}
+
+static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
+{
+       return power_in_watts;
+}
+
+static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev,
+                                           bool adjust_polarity,
+                                           u32 tdp_adjustment,
+                                           u32 *tdp_limit,
+                                           u32 *near_tdp_limit)
+{
+       u32 adjustment_delta, max_tdp_limit;
+
+       if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit)
+               return -EINVAL;
+
+       max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100;
+
+       if (adjust_polarity) {
+               *tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
+               *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit);
+       } else {
+               *tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
+               adjustment_delta  = adev->pm.dpm.tdp_limit - *tdp_limit;
+               if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted)
+                       *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
+               else
+                       *near_tdp_limit = 0;
+       }
+
+       if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
+               return -EINVAL;
+       if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
+                                     struct amdgpu_ps *amdgpu_state)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       if (ni_pi->enable_power_containment) {
+               SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+               PP_SIslands_PAPMParameters *papm_parm;
+               struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
+               u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
+               u32 tdp_limit;
+               u32 near_tdp_limit;
+               int ret;
+
+               if (scaling_factor == 0)
+                       return -EINVAL;
+
+               memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+               ret = si_calculate_adjusted_tdp_limits(adev,
+                                                      false, /* ??? */
+                                                      adev->pm.dpm.tdp_adjustment,
+                                                      &tdp_limit,
+                                                      &near_tdp_limit);
+               if (ret)
+                       return ret;
+
+               smc_table->dpm2Params.TDPLimit =
+                       cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
+               smc_table->dpm2Params.NearTDPLimit =
+                       cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
+               smc_table->dpm2Params.SafePowerLimit =
+                       cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+               ret = amdgpu_si_copy_bytes_to_smc(adev,
+                                                 (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+                                                  offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
+                                                 (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
+                                                 sizeof(u32) * 3,
+                                                 si_pi->sram_end);
+               if (ret)
+                       return ret;
+
+               if (si_pi->enable_ppm) {
+                       papm_parm = &si_pi->papm_parm;
+                       memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
+                       papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
+                       papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
+                       papm_parm->dGPU_T_Warning = cpu_to_be32(95);
+                       papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
+                       papm_parm->PlatformPowerLimit = 0xffffffff;
+                       papm_parm->NearTDPLimitPAPM = 0xffffffff;
+
+                       ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start,
+                                                         (u8 *)papm_parm,
+                                                         sizeof(PP_SIslands_PAPMParameters),
+                                                         si_pi->sram_end);
+                       if (ret)
+                               return ret;
+               }
+       }
+       return 0;
+}
+
+static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
+                                       struct amdgpu_ps *amdgpu_state)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       if (ni_pi->enable_power_containment) {
+               SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+               u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
+               int ret;
+
+               memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+               smc_table->dpm2Params.NearTDPLimit =
+                       cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
+               smc_table->dpm2Params.SafePowerLimit =
+                       cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+               ret = amdgpu_si_copy_bytes_to_smc(adev,
+                                                 (si_pi->state_table_start +
+                                                  offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+                                                  offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
+                                                 (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
+                                                 sizeof(u32) * 2,
+                                                 si_pi->sram_end);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev,
+                                              const u16 prev_std_vddc,
+                                              const u16 curr_std_vddc)
+{
+       u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
+       u64 prev_vddc = (u64)prev_std_vddc;
+       u64 curr_vddc = (u64)curr_std_vddc;
+       u64 pwr_efficiency_ratio, n, d;
+
+       if ((prev_vddc == 0) || (curr_vddc == 0))
+               return 0;
+
+       n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
+       d = prev_vddc * prev_vddc;
+       pwr_efficiency_ratio = div64_u64(n, d);
+
+       if (pwr_efficiency_ratio > (u64)0xFFFF)
+               return 0;
+
+       return (u16)pwr_efficiency_ratio;
+}
+
+static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev,
+                                           struct amdgpu_ps *amdgpu_state)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
+           amdgpu_state->vclk && amdgpu_state->dclk)
+               return true;
+
+       return false;
+}
+
+struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev)
+{
+       struct evergreen_power_info *pi = adev->pm.dpm.priv;
+
+       return pi;
+}
+
+static int si_populate_power_containment_values(struct amdgpu_device *adev,
+                                               struct amdgpu_ps *amdgpu_state,
+                                               SISLANDS_SMC_SWSTATE *smc_state)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct  si_ps *state = si_get_ps(amdgpu_state);
+       SISLANDS_SMC_VOLTAGE_VALUE vddc;
+       u32 prev_sclk;
+       u32 max_sclk;
+       u32 min_sclk;
+       u16 prev_std_vddc;
+       u16 curr_std_vddc;
+       int i;
+       u16 pwr_efficiency_ratio;
+       u8 max_ps_percent;
+       bool disable_uvd_power_tune;
+       int ret;
+
+       if (ni_pi->enable_power_containment == false)
+               return 0;
+
+       if (state->performance_level_count == 0)
+               return -EINVAL;
+
+       if (smc_state->levelCount != state->performance_level_count)
+               return -EINVAL;
+
+       disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state);
+
+       smc_state->levels[0].dpm2.MaxPS = 0;
+       smc_state->levels[0].dpm2.NearTDPDec = 0;
+       smc_state->levels[0].dpm2.AboveSafeInc = 0;
+       smc_state->levels[0].dpm2.BelowSafeInc = 0;
+       smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+       for (i = 1; i < state->performance_level_count; i++) {
+               prev_sclk = state->performance_levels[i-1].sclk;
+               max_sclk  = state->performance_levels[i].sclk;
+               if (i == 1)
+                       max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
+               else
+                       max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
+
+               if (prev_sclk > max_sclk)
+                       return -EINVAL;
+
+               if ((max_ps_percent == 0) ||
+                   (prev_sclk == max_sclk) ||
+                   disable_uvd_power_tune)
+                       min_sclk = max_sclk;
+               else if (i == 1)
+                       min_sclk = prev_sclk;
+               else
+                       min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
+
+               if (min_sclk < state->performance_levels[0].sclk)
+                       min_sclk = state->performance_levels[0].sclk;
+
+               if (min_sclk == 0)
+                       return -EINVAL;
+
+               ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+                                               state->performance_levels[i-1].vddc, &vddc);
+               if (ret)
+                       return ret;
+
+               ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc);
+               if (ret)
+                       return ret;
+
+               ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+                                               state->performance_levels[i].vddc, &vddc);
+               if (ret)
+                       return ret;
+
+               ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc);
+               if (ret)
+                       return ret;
+
+               pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev,
+                                                                          prev_std_vddc, curr_std_vddc);
+
+               smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
+               smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
+               smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
+               smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
+               smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
+       }
+
+       return 0;
+}
+
+static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
+                                        struct amdgpu_ps *amdgpu_state,
+                                        SISLANDS_SMC_SWSTATE *smc_state)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct  si_ps *state = si_get_ps(amdgpu_state);
+       u32 sq_power_throttle, sq_power_throttle2;
+       bool enable_sq_ramping = ni_pi->enable_sq_ramping;
+       int i;
+
+       if (state->performance_level_count == 0)
+               return -EINVAL;
+
+       if (smc_state->levelCount != state->performance_level_count)
+               return -EINVAL;
+
+       if (adev->pm.dpm.sq_ramping_threshold == 0)
+               return -EINVAL;
+
+       if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+               enable_sq_ramping = false;
+
+       if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+               enable_sq_ramping = false;
+
+       if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+               enable_sq_ramping = false;
+
+       if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+               enable_sq_ramping = false;
+
+       if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+               enable_sq_ramping = false;
+
+       for (i = 0; i < state->performance_level_count; i++) {
+               sq_power_throttle = 0;
+               sq_power_throttle2 = 0;
+
+               if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
+                   enable_sq_ramping) {
+                       sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
+                       sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
+                       sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
+                       sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
+                       sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+               } else {
+                       sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
+                       sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+               }
+
+               smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
+               smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
+       }
+
+       return 0;
+}
+
+static int si_enable_power_containment(struct amdgpu_device *adev,
+                                      struct amdgpu_ps *amdgpu_new_state,
+                                      bool enable)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       PPSMC_Result smc_result;
+       int ret = 0;
+
+       if (ni_pi->enable_power_containment) {
+               if (enable) {
+                       if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
+                               smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
+                               if (smc_result != PPSMC_Result_OK) {
+                                       ret = -EINVAL;
+                                       ni_pi->pc_enabled = false;
+                               } else {
+                                       ni_pi->pc_enabled = true;
+                               }
+                       }
+               } else {
+                       smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
+                       if (smc_result != PPSMC_Result_OK)
+                               ret = -EINVAL;
+                       ni_pi->pc_enabled = false;
+               }
+       }
+
+       return ret;
+}
+
+static int si_initialize_smc_dte_tables(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       int ret = 0;
+       struct si_dte_data *dte_data = &si_pi->dte_data;
+       Smc_SIslands_DTE_Configuration *dte_tables = NULL;
+       u32 table_size;
+       u8 tdep_count;
+       u32 i;
+
+       if (dte_data == NULL)
+               si_pi->enable_dte = false;
+
+       if (si_pi->enable_dte == false)
+               return 0;
+
+       if (dte_data->k <= 0)
+               return -EINVAL;
+
+       dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
+       if (dte_tables == NULL) {
+               si_pi->enable_dte = false;
+               return -ENOMEM;
+       }
+
+       table_size = dte_data->k;
+
+       if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
+               table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
+
+       tdep_count = dte_data->tdep_count;
+       if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
+               tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
+
+       dte_tables->K = cpu_to_be32(table_size);
+       dte_tables->T0 = cpu_to_be32(dte_data->t0);
+       dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
+       dte_tables->WindowSize = dte_data->window_size;
+       dte_tables->temp_select = dte_data->temp_select;
+       dte_tables->DTE_mode = dte_data->dte_mode;
+       dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
+
+       if (tdep_count > 0)
+               table_size--;
+
+       for (i = 0; i < table_size; i++) {
+               dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
+               dte_tables->R[i]   = cpu_to_be32(dte_data->r[i]);
+       }
+
+       dte_tables->Tdep_count = tdep_count;
+
+       for (i = 0; i < (u32)tdep_count; i++) {
+               dte_tables->T_limits[i] = dte_data->t_limits[i];
+               dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
+               dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
+       }
+
+       ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start,
+                                         (u8 *)dte_tables,
+                                         sizeof(Smc_SIslands_DTE_Configuration),
+                                         si_pi->sram_end);
+       kfree(dte_tables);
+
+       return ret;
+}
+
+static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
+                                         u16 *max, u16 *min)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct amdgpu_cac_leakage_table *table =
+               &adev->pm.dpm.dyn_state.cac_leakage_table;
+       u32 i;
+       u32 v0_loadline;
+
+       if (table == NULL)
+               return -EINVAL;
+
+       *max = 0;
+       *min = 0xFFFF;
+
+       for (i = 0; i < table->count; i++) {
+               if (table->entries[i].vddc > *max)
+                       *max = table->entries[i].vddc;
+               if (table->entries[i].vddc < *min)
+                       *min = table->entries[i].vddc;
+       }
+
+       if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
+               return -EINVAL;
+
+       v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
+
+       if (v0_loadline > 0xFFFFUL)
+               return -EINVAL;
+
+       *min = (u16)v0_loadline;
+
+       if ((*min > *max) || (*max == 0) || (*min == 0))
+               return -EINVAL;
+
+       return 0;
+}
+
+static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
+{
+       return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
+               SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
+}
+
+static int si_init_dte_leakage_table(struct amdgpu_device *adev,
+                                    PP_SIslands_CacConfig *cac_tables,
+                                    u16 vddc_max, u16 vddc_min, u16 vddc_step,
+                                    u16 t0, u16 t_step)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 leakage;
+       unsigned int i, j;
+       s32 t;
+       u32 smc_leakage;
+       u32 scaling_factor;
+       u16 voltage;
+
+       scaling_factor = si_get_smc_power_scaling_factor(adev);
+
+       for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
+               t = (1000 * (i * t_step + t0));
+
+               for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+                       voltage = vddc_max - (vddc_step * j);
+
+                       si_calculate_leakage_for_v_and_t(adev,
+                                                        &si_pi->powertune_data->leakage_coefficients,
+                                                        voltage,
+                                                        t,
+                                                        si_pi->dyn_powertune_data.cac_leakage,
+                                                        &leakage);
+
+                       smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+                       if (smc_leakage > 0xFFFF)
+                               smc_leakage = 0xFFFF;
+
+                       cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+                               cpu_to_be16((u16)smc_leakage);
+               }
+       }
+       return 0;
+}
+
+static int si_init_simplified_leakage_table(struct amdgpu_device *adev,
+                                           PP_SIslands_CacConfig *cac_tables,
+                                           u16 vddc_max, u16 vddc_min, u16 vddc_step)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 leakage;
+       unsigned int i, j;
+       u32 smc_leakage;
+       u32 scaling_factor;
+       u16 voltage;
+
+       scaling_factor = si_get_smc_power_scaling_factor(adev);
+
+       for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+               voltage = vddc_max - (vddc_step * j);
+
+               si_calculate_leakage_for_v(adev,
+                                          &si_pi->powertune_data->leakage_coefficients,
+                                          si_pi->powertune_data->fixed_kt,
+                                          voltage,
+                                          si_pi->dyn_powertune_data.cac_leakage,
+                                          &leakage);
+
+               smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+               if (smc_leakage > 0xFFFF)
+                       smc_leakage = 0xFFFF;
+
+               for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
+                       cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+                               cpu_to_be16((u16)smc_leakage);
+       }
+       return 0;
+}
+
+static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       PP_SIslands_CacConfig *cac_tables = NULL;
+       u16 vddc_max, vddc_min, vddc_step;
+       u16 t0, t_step;
+       u32 load_line_slope, reg;
+       int ret = 0;
+       u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100;
+
+       if (ni_pi->enable_cac == false)
+               return 0;
+
+       cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
+       if (!cac_tables)
+               return -ENOMEM;
+
+       reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
+       reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
+       WREG32(CG_CAC_CTRL, reg);
+
+       si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
+       si_pi->dyn_powertune_data.dc_pwr_value =
+               si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
+       si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev);
+       si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
+
+       si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
+
+       ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min);
+       if (ret)
+               goto done_free;
+
+       vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
+       vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
+       t_step = 4;
+       t0 = 60;
+
+       if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
+               ret = si_init_dte_leakage_table(adev, cac_tables,
+                                               vddc_max, vddc_min, vddc_step,
+                                               t0, t_step);
+       else
+               ret = si_init_simplified_leakage_table(adev, cac_tables,
+                                                      vddc_max, vddc_min, vddc_step);
+       if (ret)
+               goto done_free;
+
+       load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
+
+       cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
+       cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
+       cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
+       cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
+       cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
+       cac_tables->R_LL = cpu_to_be32(load_line_slope);
+       cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
+       cac_tables->calculation_repeats = cpu_to_be32(2);
+       cac_tables->dc_cac = cpu_to_be32(0);
+       cac_tables->log2_PG_LKG_SCALE = 12;
+       cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
+       cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
+       cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
+
+       ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start,
+                                         (u8 *)cac_tables,
+                                         sizeof(PP_SIslands_CacConfig),
+                                         si_pi->sram_end);
+
+       if (ret)
+               goto done_free;
+
+       ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
+
+done_free:
+       if (ret) {
+               ni_pi->enable_cac = false;
+               ni_pi->enable_power_containment = false;
+       }
+
+       kfree(cac_tables);
+
+       return ret;
+}
+
+static int si_program_cac_config_registers(struct amdgpu_device *adev,
+                                          const struct si_cac_config_reg *cac_config_regs)
+{
+       const struct si_cac_config_reg *config_regs = cac_config_regs;
+       u32 data = 0, offset;
+
+       if (!config_regs)
+               return -EINVAL;
+
+       while (config_regs->offset != 0xFFFFFFFF) {
+               switch (config_regs->type) {
+               case SISLANDS_CACCONFIG_CGIND:
+                       offset = SMC_CG_IND_START + config_regs->offset;
+                       if (offset < SMC_CG_IND_END)
+                               data = RREG32_SMC(offset);
+                       break;
+               default:
+                       data = RREG32(config_regs->offset);
+                       break;
+               }
+
+               data &= ~config_regs->mask;
+               data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+
+               switch (config_regs->type) {
+               case SISLANDS_CACCONFIG_CGIND:
+                       offset = SMC_CG_IND_START + config_regs->offset;
+                       if (offset < SMC_CG_IND_END)
+                               WREG32_SMC(offset, data);
+                       break;
+               default:
+                       WREG32(config_regs->offset, data);
+                       break;
+               }
+               config_regs++;
+       }
+       return 0;
+}
+
+static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       int ret;
+
+       if ((ni_pi->enable_cac == false) ||
+           (ni_pi->cac_configuration_required == false))
+               return 0;
+
+       ret = si_program_cac_config_registers(adev, si_pi->lcac_config);
+       if (ret)
+               return ret;
+       ret = si_program_cac_config_registers(adev, si_pi->cac_override);
+       if (ret)
+               return ret;
+       ret = si_program_cac_config_registers(adev, si_pi->cac_weights);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int si_enable_smc_cac(struct amdgpu_device *adev,
+                            struct amdgpu_ps *amdgpu_new_state,
+                            bool enable)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       PPSMC_Result smc_result;
+       int ret = 0;
+
+       if (ni_pi->enable_cac) {
+               if (enable) {
+                       if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
+                               if (ni_pi->support_cac_long_term_average) {
+                                       smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable);
+                                       if (smc_result != PPSMC_Result_OK)
+                                               ni_pi->support_cac_long_term_average = false;
+                               }
+
+                               smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
+                               if (smc_result != PPSMC_Result_OK) {
+                                       ret = -EINVAL;
+                                       ni_pi->cac_enabled = false;
+                               } else {
+                                       ni_pi->cac_enabled = true;
+                               }
+
+                               if (si_pi->enable_dte) {
+                                       smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
+                                       if (smc_result != PPSMC_Result_OK)
+                                               ret = -EINVAL;
+                               }
+                       }
+               } else if (ni_pi->cac_enabled) {
+                       if (si_pi->enable_dte)
+                               smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
+
+                       smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
+
+                       ni_pi->cac_enabled = false;
+
+                       if (ni_pi->support_cac_long_term_average)
+                               smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable);
+               }
+       }
+       return ret;
+}
+
+static int si_init_smc_spll_table(struct amdgpu_device *adev)
+{
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
+       SISLANDS_SMC_SCLK_VALUE sclk_params;
+       u32 fb_div, p_div;
+       u32 clk_s, clk_v;
+       u32 sclk = 0;
+       int ret = 0;
+       u32 tmp;
+       int i;
+
+       if (si_pi->spll_table_start == 0)
+               return -EINVAL;
+
+       spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
+       if (spll_table == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < 256; i++) {
+               ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
+               if (ret)
+                       break;
+               p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
+               fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
+               clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
+               clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+
+               fb_div &= ~0x00001FFF;
+               fb_div >>= 1;
+               clk_v >>= 6;
+
+               if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
+                       ret = -EINVAL;
+               if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
+                       ret = -EINVAL;
+               if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+                       ret = -EINVAL;
+               if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
+                       ret = -EINVAL;
+
+               if (ret)
+                       break;
+
+               tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
+                       ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
+               spll_table->freq[i] = cpu_to_be32(tmp);
+
+               tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
+                       ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
+               spll_table->ss[i] = cpu_to_be32(tmp);
+
+               sclk += 512;
+       }
+
+
+       if (!ret)
+               ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start,
+                                                 (u8 *)spll_table,
+                                                 sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
+                                                 si_pi->sram_end);
+
+       if (ret)
+               ni_pi->enable_power_containment = false;
+
+       kfree(spll_table);
+
+       return ret;
+}
+
+struct si_dpm_quirk {
+       u32 chip_vendor;
+       u32 chip_device;
+       u32 subsys_vendor;
+       u32 subsys_device;
+       u32 max_sclk;
+       u32 max_mclk;
+};
+
+/* cards with dpm stability problems */
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
+       /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+       { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+       { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+       { 0, 0, 0, 0 },
+};
+
+static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
+                                                  u16 vce_voltage)
+{
+       u16 highest_leakage = 0;
+       struct si_power_info *si_pi = si_get_pi(adev);
+       int i;
+
+       for (i = 0; i < si_pi->leakage_voltage.count; i++){
+               if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
+                       highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
+       }
+
+       if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
+               return highest_leakage;
+
+       return vce_voltage;
+}
+
+static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
+                                   u32 evclk, u32 ecclk, u16 *voltage)
+{
+       u32 i;
+       int ret = -EINVAL;
+       struct amdgpu_vce_clock_voltage_dependency_table *table =
+               &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+       if (((evclk == 0) && (ecclk == 0)) ||
+           (table && (table->count == 0))) {
+               *voltage = 0;
+               return 0;
+       }
+
+       for (i = 0; i < table->count; i++) {
+               if ((evclk <= table->entries[i].evclk) &&
+                   (ecclk <= table->entries[i].ecclk)) {
+                       *voltage = table->entries[i].v;
+                       ret = 0;
+                       break;
+               }
+       }
+
+       /* if no match return the highest voltage */
+       if (ret)
+               *voltage = table->entries[table->count - 1].v;
+
+       *voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage);
+
+       return ret;
+}
+
+static bool si_dpm_vblank_too_short(struct amdgpu_device *adev)
+{
+
+       u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+       /* we never hit the non-gddr5 limit so disable it */
+       u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
+
+       if (vblank_time < switch_limit)
+               return true;
+       else
+               return false;
+
+}
+
+static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
+                               u32 arb_freq_src, u32 arb_freq_dest)
+{
+       u32 mc_arb_dram_timing;
+       u32 mc_arb_dram_timing2;
+       u32 burst_time;
+       u32 mc_cg_config;
+
+       switch (arb_freq_src) {
+       case MC_CG_ARB_FREQ_F0:
+               mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
+               mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+               burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
+               break;
+       case MC_CG_ARB_FREQ_F1:
+               mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_1);
+               mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
+               burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
+               break;
+       case MC_CG_ARB_FREQ_F2:
+               mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_2);
+               mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
+               burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
+               break;
+       case MC_CG_ARB_FREQ_F3:
+               mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_3);
+               mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
+               burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       switch (arb_freq_dest) {
+       case MC_CG_ARB_FREQ_F0:
+               WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+               WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+               WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
+               break;
+       case MC_CG_ARB_FREQ_F1:
+               WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+               WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+               WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
+               break;
+       case MC_CG_ARB_FREQ_F2:
+               WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
+               WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
+               WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
+               break;
+       case MC_CG_ARB_FREQ_F3:
+               WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
+               WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
+               WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
+       WREG32(MC_CG_CONFIG, mc_cg_config);
+       WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
+
+       return 0;
+}
+
+static void ni_update_current_ps(struct amdgpu_device *adev,
+                         struct amdgpu_ps *rps)
+{
+       struct si_ps *new_ps = si_get_ps(rps);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+
+       eg_pi->current_rps = *rps;
+       ni_pi->current_ps = *new_ps;
+       eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+}
+
+static void ni_update_requested_ps(struct amdgpu_device *adev,
+                           struct amdgpu_ps *rps)
+{
+       struct si_ps *new_ps = si_get_ps(rps);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+
+       eg_pi->requested_rps = *rps;
+       ni_pi->requested_ps = *new_ps;
+       eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+}
+
+static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
+                                          struct amdgpu_ps *new_ps,
+                                          struct amdgpu_ps *old_ps)
+{
+       struct si_ps *new_state = si_get_ps(new_ps);
+       struct si_ps *current_state = si_get_ps(old_ps);
+
+       if ((new_ps->vclk == old_ps->vclk) &&
+           (new_ps->dclk == old_ps->dclk))
+               return;
+
+       if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
+           current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+               return;
+
+       amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
+}
+
+static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
+                                         struct amdgpu_ps *new_ps,
+                                         struct amdgpu_ps *old_ps)
+{
+       struct si_ps *new_state = si_get_ps(new_ps);
+       struct si_ps *current_state = si_get_ps(old_ps);
+
+       if ((new_ps->vclk == old_ps->vclk) &&
+           (new_ps->dclk == old_ps->dclk))
+               return;
+
+       if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
+           current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+               return;
+
+       amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
+}
+
+static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
+{
+       unsigned int i;
+
+       for (i = 0; i < table->count; i++)
+               if (voltage <= table->entries[i].value)
+                       return table->entries[i].value;
+
+       return table->entries[table->count - 1].value;
+}
+
+static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
+                               u32 max_clock, u32 requested_clock)
+{
+       unsigned int i;
+
+       if ((clocks == NULL) || (clocks->count == 0))
+               return (requested_clock < max_clock) ? requested_clock : max_clock;
+
+       for (i = 0; i < clocks->count; i++) {
+               if (clocks->values[i] >= requested_clock)
+                       return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
+       }
+
+       return (clocks->values[clocks->count - 1] < max_clock) ?
+               clocks->values[clocks->count - 1] : max_clock;
+}
+
+static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
+                             u32 max_mclk, u32 requested_mclk)
+{
+       return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
+                                   max_mclk, requested_mclk);
+}
+
+static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
+                             u32 max_sclk, u32 requested_sclk)
+{
+       return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
+                                   max_sclk, requested_sclk);
+}
+
+static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
+                                                           u32 *max_clock)
+{
+       u32 i, clock = 0;
+
+       if ((table == NULL) || (table->count == 0)) {
+               *max_clock = clock;
+               return;
+       }
+
+       for (i = 0; i < table->count; i++) {
+               if (clock < table->entries[i].clk)
+                       clock = table->entries[i].clk;
+       }
+       *max_clock = clock;
+}
+
+static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
+                                              u32 clock, u16 max_voltage, u16 *voltage)
+{
+       u32 i;
+
+       if ((table == NULL) || (table->count == 0))
+               return;
+
+       for (i= 0; i < table->count; i++) {
+               if (clock <= table->entries[i].clk) {
+                       if (*voltage < table->entries[i].v)
+                               *voltage = (u16)((table->entries[i].v < max_voltage) ?
+                                          table->entries[i].v : max_voltage);
+                       return;
+               }
+       }
+
+       *voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
+}
+
+static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
+                                         const struct amdgpu_clock_and_voltage_limits *max_limits,
+                                         struct rv7xx_pl *pl)
+{
+
+       if ((pl->mclk == 0) || (pl->sclk == 0))
+               return;
+
+       if (pl->mclk == pl->sclk)
+               return;
+
+       if (pl->mclk > pl->sclk) {
+               if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
+                       pl->sclk = btc_get_valid_sclk(adev,
+                                                     max_limits->sclk,
+                                                     (pl->mclk +
+                                                     (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
+                                                     adev->pm.dpm.dyn_state.mclk_sclk_ratio);
+       } else {
+               if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
+                       pl->mclk = btc_get_valid_mclk(adev,
+                                                     max_limits->mclk,
+                                                     pl->sclk -
+                                                     adev->pm.dpm.dyn_state.sclk_mclk_delta);
+       }
+}
+
+static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
+                                         u16 max_vddc, u16 max_vddci,
+                                         u16 *vddc, u16 *vddci)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       u16 new_voltage;
+
+       if ((0 == *vddc) || (0 == *vddci))
+               return;
+
+       if (*vddc > *vddci) {
+               if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+                       new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
+                                                      (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+                       *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
+               }
+       } else {
+               if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+                       new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
+                                                      (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+                       *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
+               }
+       }
+}
+
+static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
+                                              u32 sys_mask,
+                                              enum amdgpu_pcie_gen asic_gen,
+                                              enum amdgpu_pcie_gen default_gen)
+{
+       switch (asic_gen) {
+       case AMDGPU_PCIE_GEN1:
+               return AMDGPU_PCIE_GEN1;
+       case AMDGPU_PCIE_GEN2:
+               return AMDGPU_PCIE_GEN2;
+       case AMDGPU_PCIE_GEN3:
+               return AMDGPU_PCIE_GEN3;
+       default:
+               if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
+                       return AMDGPU_PCIE_GEN3;
+               else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
+                       return AMDGPU_PCIE_GEN2;
+               else
+                       return AMDGPU_PCIE_GEN1;
+       }
+       return AMDGPU_PCIE_GEN1;
+}
+
+static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
+                           u32 *p, u32 *u)
+{
+       u32 b_c = 0;
+       u32 i_c;
+       u32 tmp;
+
+       i_c = (i * r_c) / 100;
+       tmp = i_c >> p_b;
+
+       while (tmp) {
+               b_c++;
+               tmp >>= 1;
+       }
+
+       *u = (b_c + 1) / 2;
+       *p = i_c / (1 << (2 * (*u)));
+}
+
+static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
+{
+       u32 k, a, ah, al;
+       u32 t1;
+
+       if ((fl == 0) || (fh == 0) || (fl > fh))
+               return -EINVAL;
+
+       k = (100 * fh) / fl;
+       t1 = (t * (k - 100));
+       a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
+       a = (a + 5) / 10;
+       ah = ((a * t) + 5000) / 10000;
+       al = a - ah;
+
+       *th = t - ah;
+       *tl = t + al;
+
+       return 0;
+}
+
+static bool r600_is_uvd_state(u32 class, u32 class2)
+{
+       if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+               return true;
+       if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+               return true;
+       if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+               return true;
+       if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+               return true;
+       if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+               return true;
+       return false;
+}
+
+static u8 rv770_get_memory_module_index(struct amdgpu_device *adev)
+{
+       return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
+}
+
+static void rv770_get_max_vddc(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       u16 vddc;
+
+       if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc))
+               pi->max_vddc = 0;
+       else
+               pi->max_vddc = vddc;
+}
+
+static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct amdgpu_atom_ss ss;
+
+       pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
+                                                      ASIC_INTERNAL_ENGINE_SS, 0);
+       pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
+                                                      ASIC_INTERNAL_MEMORY_SS, 0);
+
+       if (pi->sclk_ss || pi->mclk_ss)
+               pi->dynamic_ss = true;
+       else
+               pi->dynamic_ss = false;
+}
+
+
+static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+                                       struct amdgpu_ps *rps)
+{
+       struct  si_ps *ps = si_get_ps(rps);
+       struct amdgpu_clock_and_voltage_limits *max_limits;
+       bool disable_mclk_switching = false;
+       bool disable_sclk_switching = false;
+       u32 mclk, sclk;
+       u16 vddc, vddci, min_vce_voltage = 0;
+       u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+       u32 max_sclk = 0, max_mclk = 0;
+       int i;
+       struct si_dpm_quirk *p = si_dpm_quirk_list;
+
+       /* Apply dpm quirks */
+       while (p && p->chip_device != 0) {
+               if (adev->pdev->vendor == p->chip_vendor &&
+                   adev->pdev->device == p->chip_device &&
+                   adev->pdev->subsystem_vendor == p->subsys_vendor &&
+                   adev->pdev->subsystem_device == p->subsys_device) {
+                       max_sclk = p->max_sclk;
+                       max_mclk = p->max_mclk;
+                       break;
+               }
+               ++p;
+       }
+
+       if (rps->vce_active) {
+               rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
+               rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
+               si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk,
+                                        &min_vce_voltage);
+       } else {
+               rps->evclk = 0;
+               rps->ecclk = 0;
+       }
+
+       if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+           si_dpm_vblank_too_short(adev))
+               disable_mclk_switching = true;
+
+       if (rps->vclk || rps->dclk) {
+               disable_mclk_switching = true;
+               disable_sclk_switching = true;
+       }
+
+       if (adev->pm.dpm.ac_power)
+               max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+       else
+               max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+       for (i = ps->performance_level_count - 2; i >= 0; i--) {
+               if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
+                       ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
+       }
+       if (adev->pm.dpm.ac_power == false) {
+               for (i = 0; i < ps->performance_level_count; i++) {
+                       if (ps->performance_levels[i].mclk > max_limits->mclk)
+                               ps->performance_levels[i].mclk = max_limits->mclk;
+                       if (ps->performance_levels[i].sclk > max_limits->sclk)
+                               ps->performance_levels[i].sclk = max_limits->sclk;
+                       if (ps->performance_levels[i].vddc > max_limits->vddc)
+                               ps->performance_levels[i].vddc = max_limits->vddc;
+                       if (ps->performance_levels[i].vddci > max_limits->vddci)
+                               ps->performance_levels[i].vddci = max_limits->vddci;
+               }
+       }
+
+       /* limit clocks to max supported clocks based on voltage dependency tables */
+       btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                       &max_sclk_vddc);
+       btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                       &max_mclk_vddci);
+       btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                       &max_mclk_vddc);
+
+       for (i = 0; i < ps->performance_level_count; i++) {
+               if (max_sclk_vddc) {
+                       if (ps->performance_levels[i].sclk > max_sclk_vddc)
+                               ps->performance_levels[i].sclk = max_sclk_vddc;
+               }
+               if (max_mclk_vddci) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddci)
+                               ps->performance_levels[i].mclk = max_mclk_vddci;
+               }
+               if (max_mclk_vddc) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddc)
+                               ps->performance_levels[i].mclk = max_mclk_vddc;
+               }
+               if (max_mclk) {
+                       if (ps->performance_levels[i].mclk > max_mclk)
+                               ps->performance_levels[i].mclk = max_mclk;
+               }
+               if (max_sclk) {
+                       if (ps->performance_levels[i].sclk > max_sclk)
+                               ps->performance_levels[i].sclk = max_sclk;
+               }
+       }
+
+       /* XXX validate the min clocks required for display */
+
+       if (disable_mclk_switching) {
+               mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
+               vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
+       } else {
+               mclk = ps->performance_levels[0].mclk;
+               vddci = ps->performance_levels[0].vddci;
+       }
+
+       if (disable_sclk_switching) {
+               sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
+               vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
+       } else {
+               sclk = ps->performance_levels[0].sclk;
+               vddc = ps->performance_levels[0].vddc;
+       }
+
+       if (rps->vce_active) {
+               if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
+                       sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
+               if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
+                       mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
+       }
+
+       /* adjusted low state */
+       ps->performance_levels[0].sclk = sclk;
+       ps->performance_levels[0].mclk = mclk;
+       ps->performance_levels[0].vddc = vddc;
+       ps->performance_levels[0].vddci = vddci;
+
+       if (disable_sclk_switching) {
+               sclk = ps->performance_levels[0].sclk;
+               for (i = 1; i < ps->performance_level_count; i++) {
+                       if (sclk < ps->performance_levels[i].sclk)
+                               sclk = ps->performance_levels[i].sclk;
+               }
+               for (i = 0; i < ps->performance_level_count; i++) {
+                       ps->performance_levels[i].sclk = sclk;
+                       ps->performance_levels[i].vddc = vddc;
+               }
+       } else {
+               for (i = 1; i < ps->performance_level_count; i++) {
+                       if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+                               ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+                       if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+                               ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+               }
+       }
+
+       if (disable_mclk_switching) {
+               mclk = ps->performance_levels[0].mclk;
+               for (i = 1; i < ps->performance_level_count; i++) {
+                       if (mclk < ps->performance_levels[i].mclk)
+                               mclk = ps->performance_levels[i].mclk;
+               }
+               for (i = 0; i < ps->performance_level_count; i++) {
+                       ps->performance_levels[i].mclk = mclk;
+                       ps->performance_levels[i].vddci = vddci;
+               }
+       } else {
+               for (i = 1; i < ps->performance_level_count; i++) {
+                       if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
+                               ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
+                       if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
+                               ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
+               }
+       }
+
+       for (i = 0; i < ps->performance_level_count; i++)
+               btc_adjust_clock_combinations(adev, max_limits,
+                                             &ps->performance_levels[i]);
+
+       for (i = 0; i < ps->performance_level_count; i++) {
+               if (ps->performance_levels[i].vddc < min_vce_voltage)
+                       ps->performance_levels[i].vddc = min_vce_voltage;
+               btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                  ps->performance_levels[i].sclk,
+                                                  max_limits->vddc,  &ps->performance_levels[i].vddc);
+               btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                  ps->performance_levels[i].mclk,
+                                                  max_limits->vddci, &ps->performance_levels[i].vddci);
+               btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                  ps->performance_levels[i].mclk,
+                                                  max_limits->vddc,  &ps->performance_levels[i].vddc);
+               btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
+                                                  adev->clock.current_dispclk,
+                                                  max_limits->vddc,  &ps->performance_levels[i].vddc);
+       }
+
+       for (i = 0; i < ps->performance_level_count; i++) {
+               btc_apply_voltage_delta_rules(adev,
+                                             max_limits->vddc, max_limits->vddci,
+                                             &ps->performance_levels[i].vddc,
+                                             &ps->performance_levels[i].vddci);
+       }
+
+       ps->dc_compatible = true;
+       for (i = 0; i < ps->performance_level_count; i++) {
+               if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
+                       ps->dc_compatible = false;
+       }
+}
+
+#if 0
+static int si_read_smc_soft_register(struct amdgpu_device *adev,
+                                    u16 reg_offset, u32 *value)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       return amdgpu_si_read_smc_sram_dword(adev,
+                                            si_pi->soft_regs_start + reg_offset, value,
+                                            si_pi->sram_end);
+}
+#endif
+
+static int si_write_smc_soft_register(struct amdgpu_device *adev,
+                                     u16 reg_offset, u32 value)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       return amdgpu_si_write_smc_sram_dword(adev,
+                                             si_pi->soft_regs_start + reg_offset,
+                                             value, si_pi->sram_end);
+}
+
+static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
+{
+       bool ret = false;
+       u32 tmp, width, row, column, bank, density;
+       bool is_memory_gddr5, is_special;
+
+       tmp = RREG32(MC_SEQ_MISC0);
+       is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
+       is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
+               & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
+
+       WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
+       width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
+
+       tmp = RREG32(MC_ARB_RAMCFG);
+       row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
+       column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
+       bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
+
+       density = (1 << (row + column - 20 + bank)) * width;
+
+       if ((adev->pdev->device == 0x6819) &&
+           is_memory_gddr5 && is_special && (density == 0x400))
+               ret = true;
+
+       return ret;
+}
+
+static void si_get_leakage_vddc(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u16 vddc, count = 0;
+       int i, ret;
+
+       for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
+               ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
+
+               if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
+                       si_pi->leakage_voltage.entries[count].voltage = vddc;
+                       si_pi->leakage_voltage.entries[count].leakage_index =
+                               SISLANDS_LEAKAGE_INDEX0 + i;
+                       count++;
+               }
+       }
+       si_pi->leakage_voltage.count = count;
+}
+
+static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev,
+                                                    u32 index, u16 *leakage_voltage)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       int i;
+
+       if (leakage_voltage == NULL)
+               return -EINVAL;
+
+       if ((index & 0xff00) != 0xff00)
+               return -EINVAL;
+
+       if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
+               return -EINVAL;
+
+       if (index < SISLANDS_LEAKAGE_INDEX0)
+               return -EINVAL;
+
+       for (i = 0; i < si_pi->leakage_voltage.count; i++) {
+               if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
+                       *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
+                       return 0;
+               }
+       }
+       return -EAGAIN;
+}
+
+static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       bool want_thermal_protection;
+       enum amdgpu_dpm_event_src dpm_event_src;
+
+       switch (sources) {
+       case 0:
+       default:
+               want_thermal_protection = false;
+               break;
+       case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
+               want_thermal_protection = true;
+               dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
+               break;
+       case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+               want_thermal_protection = true;
+               dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
+               break;
+       case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+             (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+               want_thermal_protection = true;
+               dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+               break;
+       }
+
+       if (want_thermal_protection) {
+               WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+               if (pi->thermal_protection)
+                       WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+       } else {
+               WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+       }
+}
+
+static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
+                                          enum amdgpu_dpm_auto_throttle_src source,
+                                          bool enable)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+       if (enable) {
+               if (!(pi->active_auto_throttle_sources & (1 << source))) {
+                       pi->active_auto_throttle_sources |= 1 << source;
+                       si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+               }
+       } else {
+               if (pi->active_auto_throttle_sources & (1 << source)) {
+                       pi->active_auto_throttle_sources &= ~(1 << source);
+                       si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+               }
+       }
+}
+
+static void si_start_dpm(struct amdgpu_device *adev)
+{
+       WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_stop_dpm(struct amdgpu_device *adev)
+{
+       WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
+{
+       if (enable)
+               WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+       else
+               WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+
+}
+
+#if 0
+static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev,
+                                              u32 thermal_level)
+{
+       PPSMC_Result ret;
+
+       if (thermal_level == 0) {
+               ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+               if (ret == PPSMC_Result_OK)
+                       return 0;
+               else
+                       return -EINVAL;
+       }
+       return 0;
+}
+
+static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev)
+{
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
+}
+#endif
+
+#if 0
+static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power)
+{
+       if (ac_power)
+               return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
+                       0 : -EINVAL;
+
+       return 0;
+}
+#endif
+
+static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+                                                     PPSMC_Msg msg, u32 parameter)
+{
+       WREG32(SMC_SCRATCH0, parameter);
+       return amdgpu_si_send_msg_to_smc(adev, msg);
+}
+
+static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev)
+{
+       if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
+               return -EINVAL;
+
+       return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+
+static int si_dpm_force_performance_level(struct amdgpu_device *adev,
+                                  enum amdgpu_dpm_forced_level level)
+{
+       struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
+       struct  si_ps *ps = si_get_ps(rps);
+       u32 levels = ps->performance_level_count;
+
+       if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
+               if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+                       return -EINVAL;
+
+               if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
+                       return -EINVAL;
+       } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
+               if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+                       return -EINVAL;
+
+               if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
+                       return -EINVAL;
+       } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
+               if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+                       return -EINVAL;
+
+               if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+                       return -EINVAL;
+       }
+
+       adev->pm.dpm.forced_level = level;
+
+       return 0;
+}
+
+#if 0
+static int si_set_boot_state(struct amdgpu_device *adev)
+{
+       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+#endif
+
+static int si_set_sw_state(struct amdgpu_device *adev)
+{
+       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+
+static int si_halt_smc(struct amdgpu_device *adev)
+{
+       if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
+               return -EINVAL;
+
+       return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+
+static int si_resume_smc(struct amdgpu_device *adev)
+{
+       if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
+               return -EINVAL;
+
+       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+
+static void si_dpm_start_smc(struct amdgpu_device *adev)
+{
+       amdgpu_si_program_jump_on_start(adev);
+       amdgpu_si_start_smc(adev);
+       amdgpu_si_smc_clock(adev, true);
+}
+
+static void si_dpm_stop_smc(struct amdgpu_device *adev)
+{
+       amdgpu_si_reset_smc(adev);
+       amdgpu_si_smc_clock(adev, false);
+}
+
+static int si_process_firmware_header(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+       int ret;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->state_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->soft_regs_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->mc_reg_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->fan_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->arb_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->cac_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->dte_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->spll_table_start = tmp;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev,
+                                           SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+                                           SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       si_pi->papm_cfg_table_start = tmp;
+
+       return ret;
+}
+
+static void si_read_clock_registers(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+       si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
+       si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
+       si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
+       si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
+       si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+       si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
+       si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
+       si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
+       si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
+       si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
+       si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
+       si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
+       si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
+       si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+static void si_enable_thermal_protection(struct amdgpu_device *adev,
+                                         bool enable)
+{
+       if (enable)
+               WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+       else
+               WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+}
+
+static void si_enable_acpi_power_management(struct amdgpu_device *adev)
+{
+       WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+}
+
+#if 0
+static int si_enter_ulp_state(struct amdgpu_device *adev)
+{
+       WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+       udelay(25000);
+
+       return 0;
+}
+
+static int si_exit_ulp_state(struct amdgpu_device *adev)
+{
+       int i;
+
+       WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+       udelay(7000);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (RREG32(SMC_RESP_0) == 1)
+                       break;
+               udelay(1000);
+       }
+
+       return 0;
+}
+#endif
+
+static int si_notify_smc_display_change(struct amdgpu_device *adev,
+                                    bool has_display)
+{
+       PPSMC_Msg msg = has_display ?
+               PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+       return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+
+static void si_program_response_times(struct amdgpu_device *adev)
+{
+       u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+       u32 vddc_dly, acpi_dly, vbi_dly;
+       u32 reference_clock;
+
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
+
+       voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
+       backbias_response_time = (u32)adev->pm.dpm.backbias_response_time;
+
+       if (voltage_response_time == 0)
+               voltage_response_time = 1000;
+
+       acpi_delay_time = 15000;
+       vbi_time_out = 100000;
+
+       reference_clock = amdgpu_asic_get_xclk(adev);
+
+       vddc_dly = (voltage_response_time  * reference_clock) / 100;
+       acpi_dly = (acpi_delay_time * reference_clock) / 100;
+       vbi_dly  = (vbi_time_out * reference_clock) / 100;
+
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg,  vddc_dly);
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi,  acpi_dly);
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
+}
+
+static void si_program_ds_registers(struct amdgpu_device *adev)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       u32 tmp;
+
+       /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
+       if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0)
+               tmp = 0x10;
+       else
+               tmp = 0x1;
+
+       if (eg_pi->sclk_deep_sleep) {
+               WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
+               WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
+                        ~AUTOSCALE_ON_SS_CLEAR);
+       }
+}
+
+static void si_program_display_gap(struct amdgpu_device *adev)
+{
+       u32 tmp, pipe;
+       int i;
+
+       tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+       if (adev->pm.dpm.new_active_crtc_count > 0)
+               tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+       else
+               tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+       if (adev->pm.dpm.new_active_crtc_count > 1)
+               tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+       else
+               tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+       WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+
+       tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
+       pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
+
+       if ((adev->pm.dpm.new_active_crtc_count > 0) &&
+           (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
+               /* find the first active crtc */
+               for (i = 0; i < adev->mode_info.num_crtc; i++) {
+                       if (adev->pm.dpm.new_active_crtcs & (1 << i))
+                               break;
+               }
+               if (i == adev->mode_info.num_crtc)
+                       pipe = 0;
+               else
+                       pipe = i;
+
+               tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
+               tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
+               WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
+       }
+
+       /* Setting this to false forces the performance state to low if the crtcs are disabled.
+        * This can be a problem on PowerXpress systems or if you want to use the card
+        * for offscreen rendering or compute if there are no crtcs enabled.
+        */
+       si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
+}
+
+static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+       if (enable) {
+               if (pi->sclk_ss)
+                       WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+       } else {
+               WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
+               WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+       }
+}
+
+static void si_setup_bsp(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       u32 xclk = amdgpu_asic_get_xclk(adev);
+
+       r600_calculate_u_and_p(pi->asi,
+                              xclk,
+                              16,
+                              &pi->bsp,
+                              &pi->bsu);
+
+       r600_calculate_u_and_p(pi->pasi,
+                              xclk,
+                              16,
+                              &pi->pbsp,
+                              &pi->pbsu);
+
+
+        pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
+       pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+
+       WREG32(CG_BSP, pi->dsp);
+}
+
+static void si_program_git(struct amdgpu_device *adev)
+{
+       WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+}
+
+static void si_program_tp(struct amdgpu_device *adev)
+{
+       int i;
+       enum r600_td td = R600_TD_DFLT;
+
+       for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
+               WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+
+       if (td == R600_TD_AUTO)
+               WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+       else
+               WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+
+       if (td == R600_TD_UP)
+               WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+
+       if (td == R600_TD_DOWN)
+               WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+}
+
+static void si_program_tpp(struct amdgpu_device *adev)
+{
+       WREG32(CG_TPC, R600_TPC_DFLT);
+}
+
+static void si_program_sstp(struct amdgpu_device *adev)
+{
+       WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+}
+
+static void si_enable_display_gap(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+
+       tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+       tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+               DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+
+       tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
+       tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
+               DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
+       WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void si_program_vc(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+       WREG32(CG_FTV, pi->vrc);
+}
+
+static void si_clear_vc(struct amdgpu_device *adev)
+{
+       WREG32(CG_FTV, 0);
+}
+
+static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
+{
+       u8 mc_para_index;
+
+       if (memory_clock < 10000)
+               mc_para_index = 0;
+       else if (memory_clock >= 80000)
+               mc_para_index = 0x0f;
+       else
+               mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
+       return mc_para_index;
+}
+
+static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
+{
+       u8 mc_para_index;
+
+       if (strobe_mode) {
+               if (memory_clock < 12500)
+                       mc_para_index = 0x00;
+               else if (memory_clock > 47500)
+                       mc_para_index = 0x0f;
+               else
+                       mc_para_index = (u8)((memory_clock - 10000) / 2500);
+       } else {
+               if (memory_clock < 65000)
+                       mc_para_index = 0x00;
+               else if (memory_clock > 135000)
+                       mc_para_index = 0x0f;
+               else
+                       mc_para_index = (u8)((memory_clock - 60000) / 5000);
+       }
+       return mc_para_index;
+}
+
+static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       bool strobe_mode = false;
+       u8 result = 0;
+
+       if (mclk <= pi->mclk_strobe_mode_threshold)
+               strobe_mode = true;
+
+       if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+               result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
+       else
+               result = si_get_ddr3_mclk_frequency_ratio(mclk);
+
+       if (strobe_mode)
+               result |= SISLANDS_SMC_STROBE_ENABLE;
+
+       return result;
+}
+
+static int si_upload_firmware(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       amdgpu_si_reset_smc(adev);
+       amdgpu_si_smc_clock(adev, false);
+
+       return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end);
+}
+
+static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
+                                             const struct atom_voltage_table *table,
+                                             const struct amdgpu_phase_shedding_limits_table *limits)
+{
+       u32 data, num_bits, num_levels;
+
+       if ((table == NULL) || (limits == NULL))
+               return false;
+
+       data = table->mask_low;
+
+       num_bits = hweight32(data);
+
+       if (num_bits == 0)
+               return false;
+
+       num_levels = (1 << num_bits);
+
+       if (table->count != num_levels)
+               return false;
+
+       if (limits->count != (num_levels - 1))
+               return false;
+
+       return true;
+}
+
+static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
+                                             u32 max_voltage_steps,
+                                             struct atom_voltage_table *voltage_table)
+{
+       unsigned int i, diff;
+
+       if (voltage_table->count <= max_voltage_steps)
+               return;
+
+       diff = voltage_table->count - max_voltage_steps;
+
+       for (i= 0; i < max_voltage_steps; i++)
+               voltage_table->entries[i] = voltage_table->entries[i + diff];
+
+       voltage_table->count = max_voltage_steps;
+}
+
+static int si_get_svi2_voltage_table(struct amdgpu_device *adev,
+                                    struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
+                                    struct atom_voltage_table *voltage_table)
+{
+       u32 i;
+
+       if (voltage_dependency_table == NULL)
+               return -EINVAL;
+
+       voltage_table->mask_low = 0;
+       voltage_table->phase_delay = 0;
+
+       voltage_table->count = voltage_dependency_table->count;
+       for (i = 0; i < voltage_table->count; i++) {
+               voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+               voltage_table->entries[i].smio_low = 0;
+       }
+
+       return 0;
+}
+
+static int si_construct_voltage_tables(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       int ret;
+
+       if (pi->voltage_control) {
+               ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+                                                   VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
+               if (ret)
+                       return ret;
+
+               if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+                       si_trim_voltage_table_to_fit_state_table(adev,
+                                                                SISLANDS_MAX_NO_VREG_STEPS,
+                                                                &eg_pi->vddc_voltage_table);
+       } else if (si_pi->voltage_control_svi2) {
+               ret = si_get_svi2_voltage_table(adev,
+                                               &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                               &eg_pi->vddc_voltage_table);
+               if (ret)
+                       return ret;
+       } else {
+               return -EINVAL;
+       }
+
+       if (eg_pi->vddci_control) {
+               ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
+                                                   VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
+               if (ret)
+                       return ret;
+
+               if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+                       si_trim_voltage_table_to_fit_state_table(adev,
+                                                                SISLANDS_MAX_NO_VREG_STEPS,
+                                                                &eg_pi->vddci_voltage_table);
+       }
+       if (si_pi->vddci_control_svi2) {
+               ret = si_get_svi2_voltage_table(adev,
+                                               &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                               &eg_pi->vddci_voltage_table);
+               if (ret)
+                       return ret;
+       }
+
+       if (pi->mvdd_control) {
+               ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
+                                                   VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
+
+               if (ret) {
+                       pi->mvdd_control = false;
+                       return ret;
+               }
+
+               if (si_pi->mvdd_voltage_table.count == 0) {
+                       pi->mvdd_control = false;
+                       return -EINVAL;
+               }
+
+               if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+                       si_trim_voltage_table_to_fit_state_table(adev,
+                                                                SISLANDS_MAX_NO_VREG_STEPS,
+                                                                &si_pi->mvdd_voltage_table);
+       }
+
+       if (si_pi->vddc_phase_shed_control) {
+               ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+                                                   VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
+               if (ret)
+                       si_pi->vddc_phase_shed_control = false;
+
+               if ((si_pi->vddc_phase_shed_table.count == 0) ||
+                   (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
+                       si_pi->vddc_phase_shed_control = false;
+       }
+
+       return 0;
+}
+
+static void si_populate_smc_voltage_table(struct amdgpu_device *adev,
+                                         const struct atom_voltage_table *voltage_table,
+                                         SISLANDS_SMC_STATETABLE *table)
+{
+       unsigned int i;
+
+       for (i = 0; i < voltage_table->count; i++)
+               table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
+}
+
+static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
+                                         SISLANDS_SMC_STATETABLE *table)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u8 i;
+
+       if (si_pi->voltage_control_svi2) {
+               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
+                       si_pi->svc_gpio_id);
+               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
+                       si_pi->svd_gpio_id);
+               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
+                                          2);
+       } else {
+               if (eg_pi->vddc_voltage_table.count) {
+                       si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
+                       table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+                               cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+                       for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+                               if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
+                                       table->maxVDDCIndexInPPTable = i;
+                                       break;
+                               }
+                       }
+               }
+
+               if (eg_pi->vddci_voltage_table.count) {
+                       si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
+
+                       table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+                               cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+               }
+
+
+               if (si_pi->mvdd_voltage_table.count) {
+                       si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
+
+                       table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
+                               cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
+               }
+
+               if (si_pi->vddc_phase_shed_control) {
+                       if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table,
+                                                             &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
+                               si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
+
+                               table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+                                       cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
+
+                               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
+                                                          (u32)si_pi->vddc_phase_shed_table.phase_delay);
+                       } else {
+                               si_pi->vddc_phase_shed_control = false;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int si_populate_voltage_value(struct amdgpu_device *adev,
+                                    const struct atom_voltage_table *table,
+                                    u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+       unsigned int i;
+
+       for (i = 0; i < table->count; i++) {
+               if (value <= table->entries[i].value) {
+                       voltage->index = (u8)i;
+                       voltage->value = cpu_to_be16(table->entries[i].value);
+                       break;
+               }
+       }
+
+       if (i >= table->count)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
+                                 SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       if (pi->mvdd_control) {
+               if (mclk <= pi->mvdd_split_frequency)
+                       voltage->index = 0;
+               else
+                       voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
+
+               voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
+       }
+       return 0;
+}
+
+static int si_get_std_voltage_value(struct amdgpu_device *adev,
+                                   SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+                                   u16 *std_voltage)
+{
+       u16 v_index;
+       bool voltage_found = false;
+       *std_voltage = be16_to_cpu(voltage->value);
+
+       if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+               if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
+                       if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+                               return -EINVAL;
+
+                       for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+                               if (be16_to_cpu(voltage->value) ==
+                                   (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+                                       voltage_found = true;
+                                       if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+                                               *std_voltage =
+                                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+                                       else
+                                               *std_voltage =
+                                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+                                       break;
+                               }
+                       }
+
+                       if (!voltage_found) {
+                               for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+                                       if (be16_to_cpu(voltage->value) <=
+                                           (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+                                               voltage_found = true;
+                                               if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+                                                       *std_voltage =
+                                                               adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+                                               else
+                                                       *std_voltage =
+                                                               adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+                                               break;
+                                       }
+                               }
+                       }
+               } else {
+                       if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+                               *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
+               }
+       }
+
+       return 0;
+}
+
+static int si_populate_std_voltage_value(struct amdgpu_device *adev,
+                                        u16 value, u8 index,
+                                        SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+       voltage->index = index;
+       voltage->value = cpu_to_be16(value);
+
+       return 0;
+}
+
+static int si_populate_phase_shedding_value(struct amdgpu_device *adev,
+                                           const struct amdgpu_phase_shedding_limits_table *limits,
+                                           u16 voltage, u32 sclk, u32 mclk,
+                                           SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
+{
+       unsigned int i;
+
+       for (i = 0; i < limits->count; i++) {
+               if ((voltage <= limits->entries[i].voltage) &&
+                   (sclk <= limits->entries[i].sclk) &&
+                   (mclk <= limits->entries[i].mclk))
+                       break;
+       }
+
+       smc_voltage->phase_settings = (u8)i;
+
+       return 0;
+}
+
+static int si_init_arb_table_index(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+       int ret;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       tmp &= 0x00FFFFFF;
+       tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+       return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start,
+                                             tmp, si_pi->sram_end);
+}
+
+static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
+{
+       return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int si_reset_to_default(struct amdgpu_device *adev)
+{
+       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+               0 : -EINVAL;
+}
+
+static int si_force_switch_to_arb_f0(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+       int ret;
+
+       ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
+                                           &tmp, si_pi->sram_end);
+       if (ret)
+               return ret;
+
+       tmp = (tmp >> 24) & 0xff;
+
+       if (tmp == MC_CG_ARB_FREQ_F0)
+               return 0;
+
+       return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
+                                           u32 engine_clock)
+{
+       u32 dram_rows;
+       u32 dram_refresh_rate;
+       u32 mc_arb_rfsh_rate;
+       u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+
+       if (tmp >= 4)
+               dram_rows = 16384;
+       else
+               dram_rows = 1 << (tmp + 10);
+
+       dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
+       mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
+
+       return mc_arb_rfsh_rate;
+}
+
+static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
+                                               struct rv7xx_pl *pl,
+                                               SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
+{
+       u32 dram_timing;
+       u32 dram_timing2;
+       u32 burst_time;
+
+       arb_regs->mc_arb_rfsh_rate =
+               (u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
+
+       amdgpu_atombios_set_engine_dram_timings(adev,
+                                           pl->sclk,
+                                           pl->mclk);
+
+       dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
+       dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+       burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+
+       arb_regs->mc_arb_dram_timing  = cpu_to_be32(dram_timing);
+       arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
+       arb_regs->mc_arb_burst_time = (u8)burst_time;
+
+       return 0;
+}
+
+static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
+                                                 struct amdgpu_ps *amdgpu_state,
+                                                 unsigned int first_arb_set)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct  si_ps *state = si_get_ps(amdgpu_state);
+       SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+       int i, ret = 0;
+
+       for (i = 0; i < state->performance_level_count; i++) {
+               ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs);
+               if (ret)
+                       break;
+               ret = amdgpu_si_copy_bytes_to_smc(adev,
+                                                 si_pi->arb_table_start +
+                                                 offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+                                                 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
+                                                 (u8 *)&arb_regs,
+                                                 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+                                                 si_pi->sram_end);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static int si_program_memory_timing_parameters(struct amdgpu_device *adev,
+                                              struct amdgpu_ps *amdgpu_new_state)
+{
+       return si_do_program_memory_timing_parameters(adev, amdgpu_new_state,
+                                                     SISLANDS_DRIVER_STATE_ARB_INDEX);
+}
+
+static int si_populate_initial_mvdd_value(struct amdgpu_device *adev,
+                                         struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       if (pi->mvdd_control)
+               return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table,
+                                                si_pi->mvdd_bootup_value, voltage);
+
+       return 0;
+}
+
+static int si_populate_smc_initial_state(struct amdgpu_device *adev,
+                                        struct amdgpu_ps *amdgpu_initial_state,
+                                        SISLANDS_SMC_STATETABLE *table)
+{
+       struct  si_ps *initial_state = si_get_ps(amdgpu_initial_state);
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 reg;
+       int ret;
+
+       table->initialState.levels[0].mclk.vDLL_CNTL =
+               cpu_to_be32(si_pi->clock_registers.dll_cntl);
+       table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+               cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
+       table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+               cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
+       table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+               cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
+       table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+               cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
+       table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+               cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
+       table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+               cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
+       table->initialState.levels[0].mclk.vMPLL_SS =
+               cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+       table->initialState.levels[0].mclk.vMPLL_SS2 =
+               cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+       table->initialState.levels[0].mclk.mclk_value =
+               cpu_to_be32(initial_state->performance_levels[0].mclk);
+
+       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+               cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
+       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+               cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
+       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+               cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
+       table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+               cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
+       table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+               cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
+       table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2  =
+               cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
+
+       table->initialState.levels[0].sclk.sclk_value =
+               cpu_to_be32(initial_state->performance_levels[0].sclk);
+
+       table->initialState.levels[0].arbRefreshState =
+               SISLANDS_INITIAL_STATE_ARB_INDEX;
+
+       table->initialState.levels[0].ACIndex = 0;
+
+       ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+                                       initial_state->performance_levels[0].vddc,
+                                       &table->initialState.levels[0].vddc);
+
+       if (!ret) {
+               u16 std_vddc;
+
+               ret = si_get_std_voltage_value(adev,
+                                              &table->initialState.levels[0].vddc,
+                                              &std_vddc);
+               if (!ret)
+                       si_populate_std_voltage_value(adev, std_vddc,
+                                                     table->initialState.levels[0].vddc.index,
+                                                     &table->initialState.levels[0].std_vddc);
+       }
+
+       if (eg_pi->vddci_control)
+               si_populate_voltage_value(adev,
+                                         &eg_pi->vddci_voltage_table,
+                                         initial_state->performance_levels[0].vddci,
+                                         &table->initialState.levels[0].vddci);
+
+       if (si_pi->vddc_phase_shed_control)
+               si_populate_phase_shedding_value(adev,
+                                                &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+                                                initial_state->performance_levels[0].vddc,
+                                                initial_state->performance_levels[0].sclk,
+                                                initial_state->performance_levels[0].mclk,
+                                                &table->initialState.levels[0].vddc);
+
+       si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
+
+       reg = CG_R(0xffff) | CG_L(0);
+       table->initialState.levels[0].aT = cpu_to_be32(reg);
+       table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+       table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+
+       if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+               table->initialState.levels[0].strobeMode =
+                       si_get_strobe_mode_settings(adev,
+                                                   initial_state->performance_levels[0].mclk);
+
+               if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
+                       table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+               else
+                       table->initialState.levels[0].mcFlags =  0;
+       }
+
+       table->initialState.levelCount = 1;
+
+       table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
+
+       table->initialState.levels[0].dpm2.MaxPS = 0;
+       table->initialState.levels[0].dpm2.NearTDPDec = 0;
+       table->initialState.levels[0].dpm2.AboveSafeInc = 0;
+       table->initialState.levels[0].dpm2.BelowSafeInc = 0;
+       table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+       reg = MIN_POWER_MASK | MAX_POWER_MASK;
+       table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+       reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+       table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+       return 0;
+}
+
+static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
+                                     SISLANDS_SMC_STATETABLE *table)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+       u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+       u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+       u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+       u32 dll_cntl = si_pi->clock_registers.dll_cntl;
+       u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+       u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+       u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+       u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+       u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+       u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+       u32 reg;
+       int ret;
+
+       table->ACPIState = table->initialState;
+
+       table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+       if (pi->acpi_vddc) {
+               ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+                                               pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+               if (!ret) {
+                       u16 std_vddc;
+
+                       ret = si_get_std_voltage_value(adev,
+                                                      &table->ACPIState.levels[0].vddc, &std_vddc);
+                       if (!ret)
+                               si_populate_std_voltage_value(adev, std_vddc,
+                                                             table->ACPIState.levels[0].vddc.index,
+                                                             &table->ACPIState.levels[0].std_vddc);
+               }
+               table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+
+               if (si_pi->vddc_phase_shed_control) {
+                       si_populate_phase_shedding_value(adev,
+                                                        &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+                                                        pi->acpi_vddc,
+                                                        0,
+                                                        0,
+                                                        &table->ACPIState.levels[0].vddc);
+               }
+       } else {
+               ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+                                               pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+               if (!ret) {
+                       u16 std_vddc;
+
+                       ret = si_get_std_voltage_value(adev,
+                                                      &table->ACPIState.levels[0].vddc, &std_vddc);
+
+                       if (!ret)
+                               si_populate_std_voltage_value(adev, std_vddc,
+                                                             table->ACPIState.levels[0].vddc.index,
+                                                             &table->ACPIState.levels[0].std_vddc);
+               }
+               table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
+                                                                                   si_pi->sys_pcie_mask,
+                                                                                   si_pi->boot_pcie_gen,
+                                                                                   AMDGPU_PCIE_GEN1);
+
+               if (si_pi->vddc_phase_shed_control)
+                       si_populate_phase_shedding_value(adev,
+                                                        &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+                                                        pi->min_vddc_in_table,
+                                                        0,
+                                                        0,
+                                                        &table->ACPIState.levels[0].vddc);
+       }
+
+       if (pi->acpi_vddc) {
+               if (eg_pi->acpi_vddci)
+                       si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
+                                                 eg_pi->acpi_vddci,
+                                                 &table->ACPIState.levels[0].vddci);
+       }
+
+       mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
+       mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+       dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
+
+       spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+       spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+       table->ACPIState.levels[0].mclk.vDLL_CNTL =
+               cpu_to_be32(dll_cntl);
+       table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+               cpu_to_be32(mclk_pwrmgt_cntl);
+       table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+               cpu_to_be32(mpll_ad_func_cntl);
+       table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+               cpu_to_be32(mpll_dq_func_cntl);
+       table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+               cpu_to_be32(mpll_func_cntl);
+       table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+               cpu_to_be32(mpll_func_cntl_1);
+       table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+               cpu_to_be32(mpll_func_cntl_2);
+       table->ACPIState.levels[0].mclk.vMPLL_SS =
+               cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+       table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+               cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+               cpu_to_be32(spll_func_cntl);
+       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+               cpu_to_be32(spll_func_cntl_2);
+       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+               cpu_to_be32(spll_func_cntl_3);
+       table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+               cpu_to_be32(spll_func_cntl_4);
+
+       table->ACPIState.levels[0].mclk.mclk_value = 0;
+       table->ACPIState.levels[0].sclk.sclk_value = 0;
+
+       si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
+
+       if (eg_pi->dynamic_ac_timing)
+               table->ACPIState.levels[0].ACIndex = 0;
+
+       table->ACPIState.levels[0].dpm2.MaxPS = 0;
+       table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
+       table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
+       table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
+       table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+       reg = MIN_POWER_MASK | MAX_POWER_MASK;
+       table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+       reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+       table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+       return 0;
+}
+
+static int si_populate_ulv_state(struct amdgpu_device *adev,
+                                SISLANDS_SMC_SWSTATE *state)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct si_ulv_param *ulv = &si_pi->ulv;
+       u32 sclk_in_sr = 1350; /* ??? */
+       int ret;
+
+       ret = si_convert_power_level_to_smc(adev, &ulv->pl,
+                                           &state->levels[0]);
+       if (!ret) {
+               if (eg_pi->sclk_deep_sleep) {
+                       if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+                               state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+                       else
+                               state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+               }
+               if (ulv->one_pcie_lane_in_ulv)
+                       state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
+               state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+               state->levels[0].ACIndex = 1;
+               state->levels[0].std_vddc = state->levels[0].vddc;
+               state->levelCount = 1;
+
+               state->flags |= PPSMC_SWSTATE_FLAG_DC;
+       }
+
+       return ret;
+}
+
+static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct si_ulv_param *ulv = &si_pi->ulv;
+       SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+       int ret;
+
+       ret = si_populate_memory_timing_parameters(adev, &ulv->pl,
+                                                  &arb_regs);
+       if (ret)
+               return ret;
+
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
+                                  ulv->volt_change_delay);
+
+       ret = amdgpu_si_copy_bytes_to_smc(adev,
+                                         si_pi->arb_table_start +
+                                         offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+                                         sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
+                                         (u8 *)&arb_regs,
+                                         sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+                                         si_pi->sram_end);
+
+       return ret;
+}
+
+static void si_get_mvdd_configuration(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+       pi->mvdd_split_frequency = 30000;
+}
+
+static int si_init_smc_table(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
+       const struct si_ulv_param *ulv = &si_pi->ulv;
+       SISLANDS_SMC_STATETABLE  *table = &si_pi->smc_statetable;
+       int ret;
+       u32 lane_width;
+       u32 vr_hot_gpio;
+
+       si_populate_smc_voltage_tables(adev, table);
+
+       switch (adev->pm.int_thermal_type) {
+       case THERMAL_TYPE_SI:
+       case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+               table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
+               break;
+       case THERMAL_TYPE_NONE:
+               table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
+               break;
+       default:
+               table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
+               break;
+       }
+
+       if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+               table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+       if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
+               if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819))
+                       table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
+       }
+
+       if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+               table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+       if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+               table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+       if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
+               table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
+
+       if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
+               table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
+               vr_hot_gpio = adev->pm.dpm.backbias_response_time;
+               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
+                                          vr_hot_gpio);
+       }
+
+       ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
+       if (ret)
+               return ret;
+
+       ret = si_populate_smc_acpi_state(adev, table);
+       if (ret)
+               return ret;
+
+       table->driverState = table->initialState;
+
+       ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
+                                                    SISLANDS_INITIAL_STATE_ARB_INDEX);
+       if (ret)
+               return ret;
+
+       if (ulv->supported && ulv->pl.vddc) {
+               ret = si_populate_ulv_state(adev, &table->ULVState);
+               if (ret)
+                       return ret;
+
+               ret = si_program_ulv_memory_timing_parameters(adev);
+               if (ret)
+                       return ret;
+
+               WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
+               WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+
+               lane_width = amdgpu_get_pcie_lanes(adev);
+               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+       } else {
+               table->ULVState = table->initialState;
+       }
+
+       return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start,
+                                          (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
+                                          si_pi->sram_end);
+}
+
+static int si_calculate_sclk_params(struct amdgpu_device *adev,
+                                   u32 engine_clock,
+                                   SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct atom_clock_dividers dividers;
+       u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+       u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+       u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+       u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+       u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
+       u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
+       u64 tmp;
+       u32 reference_clock = adev->clock.spll.reference_freq;
+       u32 reference_divider;
+       u32 fbdiv;
+       int ret;
+
+       ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+                                            engine_clock, false, &dividers);
+       if (ret)
+               return ret;
+
+       reference_divider = 1 + dividers.ref_div;
+
+       tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
+       do_div(tmp, reference_clock);
+       fbdiv = (u32) tmp;
+
+       spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
+       spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
+       spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+
+       spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+       spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+
+       spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+       spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+       spll_func_cntl_3 |= SPLL_DITHEN;
+
+       if (pi->sclk_ss) {
+               struct amdgpu_atom_ss ss;
+               u32 vco_freq = engine_clock * dividers.post_div;
+
+               if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+                                                    ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+                       u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+                       u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+                       cg_spll_spread_spectrum &= ~CLK_S_MASK;
+                       cg_spll_spread_spectrum |= CLK_S(clk_s);
+                       cg_spll_spread_spectrum |= SSEN;
+
+                       cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+                       cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+               }
+       }
+
+       sclk->sclk_value = engine_clock;
+       sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
+       sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
+       sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
+       sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
+       sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
+       sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
+
+       return 0;
+}
+
+static int si_populate_sclk_value(struct amdgpu_device *adev,
+                                 u32 engine_clock,
+                                 SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+       SISLANDS_SMC_SCLK_VALUE sclk_tmp;
+       int ret;
+
+       ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp);
+       if (!ret) {
+               sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
+               sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
+               sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
+               sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
+               sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
+               sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
+               sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
+       }
+
+       return ret;
+}
+
+static int si_populate_mclk_value(struct amdgpu_device *adev,
+                                 u32 engine_clock,
+                                 u32 memory_clock,
+                                 SISLANDS_SMC_MCLK_VALUE *mclk,
+                                 bool strobe_mode,
+                                 bool dll_state_on)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32  dll_cntl = si_pi->clock_registers.dll_cntl;
+       u32  mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+       u32  mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+       u32  mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+       u32  mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+       u32  mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+       u32  mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+       u32  mpll_ss1 = si_pi->clock_registers.mpll_ss1;
+       u32  mpll_ss2 = si_pi->clock_registers.mpll_ss2;
+       struct atom_mpll_param mpll_param;
+       int ret;
+
+       ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
+       if (ret)
+               return ret;
+
+       mpll_func_cntl &= ~BWCTRL_MASK;
+       mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
+
+       mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
+       mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
+               CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
+
+       mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
+       mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
+
+       if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+               mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
+               mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
+                       YCLK_POST_DIV(mpll_param.post_div);
+       }
+
+       if (pi->mclk_ss) {
+               struct amdgpu_atom_ss ss;
+               u32 freq_nom;
+               u32 tmp;
+               u32 reference_clock = adev->clock.mpll.reference_freq;
+
+               if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+                       freq_nom = memory_clock * 4;
+               else
+                       freq_nom = memory_clock * 2;
+
+               tmp = freq_nom / reference_clock;
+               tmp = tmp * tmp;
+               if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+                                                    ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+                       u32 clks = reference_clock * 5 / ss.rate;
+                       u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+                       mpll_ss1 &= ~CLKV_MASK;
+                       mpll_ss1 |= CLKV(clkv);
+
+                       mpll_ss2 &= ~CLKS_MASK;
+                       mpll_ss2 |= CLKS(clks);
+               }
+       }
+
+       mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+       mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
+
+       if (dll_state_on)
+               mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
+       else
+               mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+       mclk->mclk_value = cpu_to_be32(memory_clock);
+       mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
+       mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
+       mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
+       mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+       mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+       mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+       mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
+       mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
+       mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
+
+       return 0;
+}
+
+static void si_populate_smc_sp(struct amdgpu_device *adev,
+                              struct amdgpu_ps *amdgpu_state,
+                              SISLANDS_SMC_SWSTATE *smc_state)
+{
+       struct  si_ps *ps = si_get_ps(amdgpu_state);
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       int i;
+
+       for (i = 0; i < ps->performance_level_count - 1; i++)
+               smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
+
+       smc_state->levels[ps->performance_level_count - 1].bSP =
+               cpu_to_be32(pi->psp);
+}
+
+static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
+                                        struct rv7xx_pl *pl,
+                                        SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       int ret;
+       bool dll_state_on;
+       u16 std_vddc;
+       bool gmc_pg = false;
+
+       if (eg_pi->pcie_performance_request &&
+           (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
+               level->gen2PCIE = (u8)si_pi->force_pcie_gen;
+       else
+               level->gen2PCIE = (u8)pl->pcie_gen;
+
+       ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk);
+       if (ret)
+               return ret;
+
+       level->mcFlags =  0;
+
+       if (pi->mclk_stutter_mode_threshold &&
+           (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
+           !eg_pi->uvd_enabled &&
+           (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+           (adev->pm.dpm.new_active_crtc_count <= 2)) {
+               level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
+
+               if (gmc_pg)
+                       level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
+       }
+
+       if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+               if (pl->mclk > pi->mclk_edc_enable_threshold)
+                       level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
+
+               if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
+                       level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
+
+               level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk);
+
+               if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
+                       if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
+                           ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+                               dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+                       else
+                               dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+               } else {
+                       dll_state_on = false;
+               }
+       } else {
+               level->strobeMode = si_get_strobe_mode_settings(adev,
+                                                               pl->mclk);
+
+               dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+       }
+
+       ret = si_populate_mclk_value(adev,
+                                    pl->sclk,
+                                    pl->mclk,
+                                    &level->mclk,
+                                    (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
+       if (ret)
+               return ret;
+
+       ret = si_populate_voltage_value(adev,
+                                       &eg_pi->vddc_voltage_table,
+                                       pl->vddc, &level->vddc);
+       if (ret)
+               return ret;
+
+
+       ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc);
+       if (ret)
+               return ret;
+
+       ret = si_populate_std_voltage_value(adev, std_vddc,
+                                           level->vddc.index, &level->std_vddc);
+       if (ret)
+               return ret;
+
+       if (eg_pi->vddci_control) {
+               ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
+                                               pl->vddci, &level->vddci);
+               if (ret)
+                       return ret;
+       }
+
+       if (si_pi->vddc_phase_shed_control) {
+               ret = si_populate_phase_shedding_value(adev,
+                                                      &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+                                                      pl->vddc,
+                                                      pl->sclk,
+                                                      pl->mclk,
+                                                      &level->vddc);
+               if (ret)
+                       return ret;
+       }
+
+       level->MaxPoweredUpCU = si_pi->max_cu;
+
+       ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd);
+
+       return ret;
+}
+
+static int si_populate_smc_t(struct amdgpu_device *adev,
+                            struct amdgpu_ps *amdgpu_state,
+                            SISLANDS_SMC_SWSTATE *smc_state)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct  si_ps *state = si_get_ps(amdgpu_state);
+       u32 a_t;
+       u32 t_l, t_h;
+       u32 high_bsp;
+       int i, ret;
+
+       if (state->performance_level_count >= 9)
+               return -EINVAL;
+
+       if (state->performance_level_count < 2) {
+               a_t = CG_R(0xffff) | CG_L(0);
+               smc_state->levels[0].aT = cpu_to_be32(a_t);
+               return 0;
+       }
+
+       smc_state->levels[0].aT = cpu_to_be32(0);
+
+       for (i = 0; i <= state->performance_level_count - 2; i++) {
+               ret = r600_calculate_at(
+                       (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
+                       100 * R600_AH_DFLT,
+                       state->performance_levels[i + 1].sclk,
+                       state->performance_levels[i].sclk,
+                       &t_l,
+                       &t_h);
+
+               if (ret) {
+                       t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
+                       t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
+               }
+
+               a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
+               a_t |= CG_R(t_l * pi->bsp / 20000);
+               smc_state->levels[i].aT = cpu_to_be32(a_t);
+
+               high_bsp = (i == state->performance_level_count - 2) ?
+                       pi->pbsp : pi->bsp;
+               a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+               smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
+       }
+
+       return 0;
+}
+
+static int si_disable_ulv(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct si_ulv_param *ulv = &si_pi->ulv;
+
+       if (ulv->supported)
+               return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+                       0 : -EINVAL;
+
+       return 0;
+}
+
+static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
+                                      struct amdgpu_ps *amdgpu_state)
+{
+       const struct si_power_info *si_pi = si_get_pi(adev);
+       const struct si_ulv_param *ulv = &si_pi->ulv;
+       const struct  si_ps *state = si_get_ps(amdgpu_state);
+       int i;
+
+       if (state->performance_levels[0].mclk != ulv->pl.mclk)
+               return false;
+
+       /* XXX validate against display requirements! */
+
+       for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
+               if (adev->clock.current_dispclk <=
+                   adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
+                       if (ulv->pl.vddc <
+                           adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
+                               return false;
+               }
+       }
+
+       if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0))
+               return false;
+
+       return true;
+}
+
+static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev,
+                                                      struct amdgpu_ps *amdgpu_new_state)
+{
+       const struct si_power_info *si_pi = si_get_pi(adev);
+       const struct si_ulv_param *ulv = &si_pi->ulv;
+
+       if (ulv->supported) {
+               if (si_is_state_ulv_compatible(adev, amdgpu_new_state))
+                       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+                               0 : -EINVAL;
+       }
+       return 0;
+}
+
+static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
+                                        struct amdgpu_ps *amdgpu_state,
+                                        SISLANDS_SMC_SWSTATE *smc_state)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct ni_power_info *ni_pi = ni_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct  si_ps *state = si_get_ps(amdgpu_state);
+       int i, ret;
+       u32 threshold;
+       u32 sclk_in_sr = 1350; /* ??? */
+
+       if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
+               return -EINVAL;
+
+       threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
+
+       if (amdgpu_state->vclk && amdgpu_state->dclk) {
+               eg_pi->uvd_enabled = true;
+               if (eg_pi->smu_uvd_hs)
+                       smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
+       } else {
+               eg_pi->uvd_enabled = false;
+       }
+
+       if (state->dc_compatible)
+               smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
+
+       smc_state->levelCount = 0;
+       for (i = 0; i < state->performance_level_count; i++) {
+               if (eg_pi->sclk_deep_sleep) {
+                       if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
+                               if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+                                       smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+                               else
+                                       smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+                       }
+               }
+
+               ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i],
+                                                   &smc_state->levels[i]);
+               smc_state->levels[i].arbRefreshState =
+                       (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
+
+               if (ret)
+                       return ret;
+
+               if (ni_pi->enable_power_containment)
+                       smc_state->levels[i].displayWatermark =
+                               (state->performance_levels[i].sclk < threshold) ?
+                               PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+               else
+                       smc_state->levels[i].displayWatermark = (i < 2) ?
+                               PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+
+               if (eg_pi->dynamic_ac_timing)
+                       smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
+               else
+                       smc_state->levels[i].ACIndex = 0;
+
+               smc_state->levelCount++;
+       }
+
+       si_write_smc_soft_register(adev,
+                                  SI_SMC_SOFT_REGISTER_watermark_threshold,
+                                  threshold / 512);
+
+       si_populate_smc_sp(adev, amdgpu_state, smc_state);
+
+       ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state);
+       if (ret)
+               ni_pi->enable_power_containment = false;
+
+       ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
+       if (ret)
+               ni_pi->enable_sq_ramping = false;
+
+       return si_populate_smc_t(adev, amdgpu_state, smc_state);
+}
+
+static int si_upload_sw_state(struct amdgpu_device *adev,
+                             struct amdgpu_ps *amdgpu_new_state)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct  si_ps *new_state = si_get_ps(amdgpu_new_state);
+       int ret;
+       u32 address = si_pi->state_table_start +
+               offsetof(SISLANDS_SMC_STATETABLE, driverState);
+       u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
+               ((new_state->performance_level_count - 1) *
+                sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
+       SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
+
+       memset(smc_state, 0, state_size);
+
+       ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state);
+       if (ret)
+               return ret;
+
+       return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+                                          state_size, si_pi->sram_end);
+}
+
+static int si_upload_ulv_state(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct si_ulv_param *ulv = &si_pi->ulv;
+       int ret = 0;
+
+       if (ulv->supported && ulv->pl.vddc) {
+               u32 address = si_pi->state_table_start +
+                       offsetof(SISLANDS_SMC_STATETABLE, ULVState);
+               SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
+               u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+
+               memset(smc_state, 0, state_size);
+
+               ret = si_populate_ulv_state(adev, smc_state);
+               if (!ret)
+                       ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+                                                         state_size, si_pi->sram_end);
+       }
+
+       return ret;
+}
+
+static int si_upload_smc_data(struct amdgpu_device *adev)
+{
+       struct amdgpu_crtc *amdgpu_crtc = NULL;
+       int i;
+
+       if (adev->pm.dpm.new_active_crtc_count == 0)
+               return 0;
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
+                       amdgpu_crtc = adev->mode_info.crtcs[i];
+                       break;
+               }
+       }
+
+       if (amdgpu_crtc == NULL)
+               return 0;
+
+       if (amdgpu_crtc->line_time <= 0)
+               return 0;
+
+       if (si_write_smc_soft_register(adev,
+                                      SI_SMC_SOFT_REGISTER_crtc_index,
+                                      amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
+               return 0;
+
+       if (si_write_smc_soft_register(adev,
+                                      SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+                                      amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+               return 0;
+
+       if (si_write_smc_soft_register(adev,
+                                      SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+                                      amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+               return 0;
+
+       return 0;
+}
+
+static int si_set_mc_special_registers(struct amdgpu_device *adev,
+                                      struct si_mc_reg_table *table)
+{
+       u8 i, j, k;
+       u32 temp_reg;
+
+       for (i = 0, j = table->last; i < table->last; i++) {
+               if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                       return -EINVAL;
+               switch (table->mc_reg_address[i].s1) {
+               case MC_SEQ_MISC1:
+                       temp_reg = RREG32(MC_PMG_CMD_EMRS);
+                       table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
+                       table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
+                       for (k = 0; k < table->num_entries; k++)
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       ((temp_reg & 0xffff0000)) |
+                                       ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+                       j++;
+                       if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                               return -EINVAL;
+
+                       temp_reg = RREG32(MC_PMG_CMD_MRS);
+                       table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
+                       table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
+                       for (k = 0; k < table->num_entries; k++) {
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+                               if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
+                                       table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+                       }
+                       j++;
+                       if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                               return -EINVAL;
+
+                       if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
+                               table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
+                               table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
+                               for (k = 0; k < table->num_entries; k++)
+                                       table->mc_reg_table_entry[k].mc_data[j] =
+                                               (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+                               j++;
+                               if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                                       return -EINVAL;
+                       }
+                       break;
+               case MC_SEQ_RESERVE_M:
+                       temp_reg = RREG32(MC_PMG_CMD_MRS1);
+                       table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
+                       table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
+                       for(k = 0; k < table->num_entries; k++)
+                               table->mc_reg_table_entry[k].mc_data[j] =
+                                       (temp_reg & 0xffff0000) |
+                                       (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+                       j++;
+                       if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                               return -EINVAL;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       table->last = j;
+
+       return 0;
+}
+
+static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+       bool result = true;
+       switch (in_reg) {
+       case  MC_SEQ_RAS_TIMING:
+               *out_reg = MC_SEQ_RAS_TIMING_LP;
+               break;
+       case MC_SEQ_CAS_TIMING:
+               *out_reg = MC_SEQ_CAS_TIMING_LP;
+               break;
+       case MC_SEQ_MISC_TIMING:
+               *out_reg = MC_SEQ_MISC_TIMING_LP;
+               break;
+       case MC_SEQ_MISC_TIMING2:
+               *out_reg = MC_SEQ_MISC_TIMING2_LP;
+               break;
+       case MC_SEQ_RD_CTL_D0:
+               *out_reg = MC_SEQ_RD_CTL_D0_LP;
+               break;
+       case MC_SEQ_RD_CTL_D1:
+               *out_reg = MC_SEQ_RD_CTL_D1_LP;
+               break;
+       case MC_SEQ_WR_CTL_D0:
+               *out_reg = MC_SEQ_WR_CTL_D0_LP;
+               break;
+       case MC_SEQ_WR_CTL_D1:
+               *out_reg = MC_SEQ_WR_CTL_D1_LP;
+               break;
+       case MC_PMG_CMD_EMRS:
+               *out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
+               break;
+       case MC_PMG_CMD_MRS:
+               *out_reg = MC_SEQ_PMG_CMD_MRS_LP;
+               break;
+       case MC_PMG_CMD_MRS1:
+               *out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
+               break;
+       case MC_SEQ_PMG_TIMING:
+               *out_reg = MC_SEQ_PMG_TIMING_LP;
+               break;
+       case MC_PMG_CMD_MRS2:
+               *out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
+               break;
+       case MC_SEQ_WR_CTL_2:
+               *out_reg = MC_SEQ_WR_CTL_2_LP;
+               break;
+       default:
+               result = false;
+               break;
+       }
+
+       return result;
+}
+
+static void si_set_valid_flag(struct si_mc_reg_table *table)
+{
+       u8 i, j;
+
+       for (i = 0; i < table->last; i++) {
+               for (j = 1; j < table->num_entries; j++) {
+                       if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
+                               table->valid_flag |= 1 << i;
+                               break;
+                       }
+               }
+       }
+}
+
+static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
+{
+       u32 i;
+       u16 address;
+
+       for (i = 0; i < table->last; i++)
+               table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+                       address : table->mc_reg_address[i].s1;
+
+}
+
+static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
+                                     struct si_mc_reg_table *si_table)
+{
+       u8 i, j;
+
+       if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+               return -EINVAL;
+       if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+               return -EINVAL;
+
+       for (i = 0; i < table->last; i++)
+               si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+       si_table->last = table->last;
+
+       for (i = 0; i < table->num_entries; i++) {
+               si_table->mc_reg_table_entry[i].mclk_max =
+                       table->mc_reg_table_entry[i].mclk_max;
+               for (j = 0; j < table->last; j++) {
+                       si_table->mc_reg_table_entry[i].mc_data[j] =
+                               table->mc_reg_table_entry[i].mc_data[j];
+               }
+       }
+       si_table->num_entries = table->num_entries;
+
+       return 0;
+}
+
+static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct atom_mc_reg_table *table;
+       struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
+       u8 module_index = rv770_get_memory_module_index(adev);
+       int ret;
+
+       table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+
+       WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+       WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+       WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+       WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+       WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+       WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+       WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+       WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+       WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+       WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+       WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+       WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
+       WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
+       WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
+
+       ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
+       if (ret)
+               goto init_mc_done;
+
+       ret = si_copy_vbios_mc_reg_table(table, si_table);
+       if (ret)
+               goto init_mc_done;
+
+       si_set_s0_mc_reg_index(si_table);
+
+       ret = si_set_mc_special_registers(adev, si_table);
+       if (ret)
+               goto init_mc_done;
+
+       si_set_valid_flag(si_table);
+
+init_mc_done:
+       kfree(table);
+
+       return ret;
+
+}
+
+static void si_populate_mc_reg_addresses(struct amdgpu_device *adev,
+                                        SMC_SIslands_MCRegisters *mc_reg_table)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 i, j;
+
+       for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
+               if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
+                       if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                               break;
+                       mc_reg_table->address[i].s0 =
+                               cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
+                       mc_reg_table->address[i].s1 =
+                               cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
+                       i++;
+               }
+       }
+       mc_reg_table->last = (u8)i;
+}
+
+static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
+                                   SMC_SIslands_MCRegisterSet *data,
+                                   u32 num_entries, u32 valid_flag)
+{
+       u32 i, j;
+
+       for(i = 0, j = 0; j < num_entries; j++) {
+               if (valid_flag & (1 << j)) {
+                       data->value[i] = cpu_to_be32(entry->mc_data[j]);
+                       i++;
+               }
+       }
+}
+
+static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
+                                                struct rv7xx_pl *pl,
+                                                SMC_SIslands_MCRegisterSet *mc_reg_table_data)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 i = 0;
+
+       for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
+               if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+                       break;
+       }
+
+       if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
+               --i;
+
+       si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
+                               mc_reg_table_data, si_pi->mc_reg_table.last,
+                               si_pi->mc_reg_table.valid_flag);
+}
+
+static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
+                                          struct amdgpu_ps *amdgpu_state,
+                                          SMC_SIslands_MCRegisters *mc_reg_table)
+{
+       struct si_ps *state = si_get_ps(amdgpu_state);
+       int i;
+
+       for (i = 0; i < state->performance_level_count; i++) {
+               si_convert_mc_reg_table_entry_to_smc(adev,
+                                                    &state->performance_levels[i],
+                                                    &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
+       }
+}
+
+static int si_populate_mc_reg_table(struct amdgpu_device *adev,
+                                   struct amdgpu_ps *amdgpu_boot_state)
+{
+       struct  si_ps *boot_state = si_get_ps(amdgpu_boot_state);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct si_ulv_param *ulv = &si_pi->ulv;
+       SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+       memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+       si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1);
+
+       si_populate_mc_reg_addresses(adev, smc_mc_reg_table);
+
+       si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0],
+                                            &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
+
+       si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+                               &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
+                               si_pi->mc_reg_table.last,
+                               si_pi->mc_reg_table.valid_flag);
+
+       if (ulv->supported && ulv->pl.vddc != 0)
+               si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl,
+                                                    &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
+       else
+               si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+                                       &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
+                                       si_pi->mc_reg_table.last,
+                                       si_pi->mc_reg_table.valid_flag);
+
+       si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table);
+
+       return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start,
+                                          (u8 *)smc_mc_reg_table,
+                                          sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
+}
+
+static int si_upload_mc_reg_table(struct amdgpu_device *adev,
+                                 struct amdgpu_ps *amdgpu_new_state)
+{
+       struct si_ps *new_state = si_get_ps(amdgpu_new_state);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 address = si_pi->mc_reg_table_start +
+               offsetof(SMC_SIslands_MCRegisters,
+                        data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
+       SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+       memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+       si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
+
+       return amdgpu_si_copy_bytes_to_smc(adev, address,
+                                          (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
+                                          sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
+                                          si_pi->sram_end);
+}
+
+static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
+{
+       if (enable)
+               WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+       else
+               WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+}
+
+static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
+                                                     struct amdgpu_ps *amdgpu_state)
+{
+       struct si_ps *state = si_get_ps(amdgpu_state);
+       int i;
+       u16 pcie_speed, max_speed = 0;
+
+       for (i = 0; i < state->performance_level_count; i++) {
+               pcie_speed = state->performance_levels[i].pcie_gen;
+               if (max_speed < pcie_speed)
+                       max_speed = pcie_speed;
+       }
+       return max_speed;
+}
+
+static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
+{
+       u32 speed_cntl;
+
+       speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
+       speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+
+       return (u16)speed_cntl;
+}
+
+static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
+                                                            struct amdgpu_ps *amdgpu_new_state,
+                                                            struct amdgpu_ps *amdgpu_current_state)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+       enum amdgpu_pcie_gen current_link_speed;
+
+       if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
+               current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
+       else
+               current_link_speed = si_pi->force_pcie_gen;
+
+       si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+       si_pi->pspp_notify_required = false;
+       if (target_link_speed > current_link_speed) {
+               switch (target_link_speed) {
+#if defined(CONFIG_ACPI)
+               case AMDGPU_PCIE_GEN3:
+                       if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+                               break;
+                       si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
+                       if (current_link_speed == AMDGPU_PCIE_GEN2)
+                               break;
+               case AMDGPU_PCIE_GEN2:
+                       if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+                               break;
+#endif
+               default:
+                       si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
+                       break;
+               }
+       } else {
+               if (target_link_speed < current_link_speed)
+                       si_pi->pspp_notify_required = true;
+       }
+}
+
+static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
+                                                          struct amdgpu_ps *amdgpu_new_state,
+                                                          struct amdgpu_ps *amdgpu_current_state)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+       u8 request;
+
+       if (si_pi->pspp_notify_required) {
+               if (target_link_speed == AMDGPU_PCIE_GEN3)
+                       request = PCIE_PERF_REQ_PECI_GEN3;
+               else if (target_link_speed == AMDGPU_PCIE_GEN2)
+                       request = PCIE_PERF_REQ_PECI_GEN2;
+               else
+                       request = PCIE_PERF_REQ_PECI_GEN1;
+
+               if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+                   (si_get_current_pcie_speed(adev) > 0))
+                       return;
+
+#if defined(CONFIG_ACPI)
+               amdgpu_acpi_pcie_performance_request(adev, request, false);
+#endif
+       }
+}
+
+#if 0
+static int si_ds_request(struct amdgpu_device *adev,
+                        bool ds_status_on, u32 count_write)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+
+       if (eg_pi->sclk_deep_sleep) {
+               if (ds_status_on)
+                       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
+                               PPSMC_Result_OK) ?
+                               0 : -EINVAL;
+               else
+                       return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
+                               PPSMC_Result_OK) ? 0 : -EINVAL;
+       }
+       return 0;
+}
+#endif
+
+static void si_set_max_cu_value(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+
+       if (adev->asic_type == CHIP_VERDE) {
+               switch (adev->pdev->device) {
+               case 0x6820:
+               case 0x6825:
+               case 0x6821:
+               case 0x6823:
+               case 0x6827:
+                       si_pi->max_cu = 10;
+                       break;
+               case 0x682D:
+               case 0x6824:
+               case 0x682F:
+               case 0x6826:
+                       si_pi->max_cu = 8;
+                       break;
+               case 0x6828:
+               case 0x6830:
+               case 0x6831:
+               case 0x6838:
+               case 0x6839:
+               case 0x683D:
+                       si_pi->max_cu = 10;
+                       break;
+               case 0x683B:
+               case 0x683F:
+               case 0x6829:
+                       si_pi->max_cu = 8;
+                       break;
+               default:
+                       si_pi->max_cu = 0;
+                       break;
+               }
+       } else {
+               si_pi->max_cu = 0;
+       }
+}
+
+static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev,
+                                                            struct amdgpu_clock_voltage_dependency_table *table)
+{
+       u32 i;
+       int j;
+       u16 leakage_voltage;
+
+       if (table) {
+               for (i = 0; i < table->count; i++) {
+                       switch (si_get_leakage_voltage_from_leakage_index(adev,
+                                                                         table->entries[i].v,
+                                                                         &leakage_voltage)) {
+                       case 0:
+                               table->entries[i].v = leakage_voltage;
+                               break;
+                       case -EAGAIN:
+                               return -EINVAL;
+                       case -EINVAL:
+                       default:
+                               break;
+                       }
+               }
+
+               for (j = (table->count - 2); j >= 0; j--) {
+                       table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
+                               table->entries[j].v : table->entries[j + 1].v;
+               }
+       }
+       return 0;
+}
+
+static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       ret = si_patch_single_dependency_table_based_on_leakage(adev,
+                                                               &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+       if (ret)
+               DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
+       ret = si_patch_single_dependency_table_based_on_leakage(adev,
+                                                               &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+       if (ret)
+               DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
+       ret = si_patch_single_dependency_table_based_on_leakage(adev,
+                                                               &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+       if (ret)
+               DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
+       return ret;
+}
+
+static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
+                                         struct amdgpu_ps *amdgpu_new_state,
+                                         struct amdgpu_ps *amdgpu_current_state)
+{
+       u32 lane_width;
+       u32 new_lane_width =
+               (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+       u32 current_lane_width =
+               (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+
+       if (new_lane_width != current_lane_width) {
+               amdgpu_set_pcie_lanes(adev, new_lane_width);
+               lane_width = amdgpu_get_pcie_lanes(adev);
+               si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+       }
+}
+
+static void si_dpm_setup_asic(struct amdgpu_device *adev)
+{
+       si_read_clock_registers(adev);
+       si_enable_acpi_power_management(adev);
+}
+
+static int si_thermal_enable_alert(struct amdgpu_device *adev,
+                                  bool enable)
+{
+       u32 thermal_int = RREG32(CG_THERMAL_INT);
+
+       if (enable) {
+               PPSMC_Result result;
+
+               thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+               WREG32(CG_THERMAL_INT, thermal_int);
+               result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+               if (result != PPSMC_Result_OK) {
+                       DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+                       return -EINVAL;
+               }
+       } else {
+               thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+               WREG32(CG_THERMAL_INT, thermal_int);
+       }
+
+       return 0;
+}
+
+static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
+                                           int min_temp, int max_temp)
+{
+       int low_temp = 0 * 1000;
+       int high_temp = 255 * 1000;
+
+       if (low_temp < min_temp)
+               low_temp = min_temp;
+       if (high_temp > max_temp)
+               high_temp = max_temp;
+       if (high_temp < low_temp) {
+               DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+               return -EINVAL;
+       }
+
+       WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
+       WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
+       WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+
+       adev->pm.dpm.thermal.min_temp = low_temp;
+       adev->pm.dpm.thermal.max_temp = high_temp;
+
+       return 0;
+}
+
+static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+
+       if (si_pi->fan_ctrl_is_in_default_mode) {
+               tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+               si_pi->fan_ctrl_default_mode = tmp;
+               tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+               si_pi->t_min = tmp;
+               si_pi->fan_ctrl_is_in_default_mode = false;
+       }
+
+       tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+       tmp |= TMIN(0);
+       WREG32(CG_FDO_CTRL2, tmp);
+
+       tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+       tmp |= FDO_PWM_MODE(mode);
+       WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
+       u32 duty100;
+       u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+       u16 fdo_min, slope1, slope2;
+       u32 reference_clock, tmp;
+       int ret;
+       u64 tmp64;
+
+       if (!si_pi->fan_table_start) {
+               adev->pm.dpm.fan.ucode_fan_control = false;
+               return 0;
+       }
+
+       duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+       if (duty100 == 0) {
+               adev->pm.dpm.fan.ucode_fan_control = false;
+               return 0;
+       }
+
+       tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
+       do_div(tmp64, 10000);
+       fdo_min = (u16)tmp64;
+
+       t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
+       t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
+
+       pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
+       pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
+
+       slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+       slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+       fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
+       fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
+       fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
+       fan_table.slope1 = cpu_to_be16(slope1);
+       fan_table.slope2 = cpu_to_be16(slope2);
+       fan_table.fdo_min = cpu_to_be16(fdo_min);
+       fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
+       fan_table.hys_up = cpu_to_be16(1);
+       fan_table.hys_slope = cpu_to_be16(1);
+       fan_table.temp_resp_lim = cpu_to_be16(5);
+       reference_clock = amdgpu_asic_get_xclk(adev);
+
+       fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
+                                               reference_clock) / 1600);
+       fan_table.fdo_max = cpu_to_be16((u16)duty100);
+
+       tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+       fan_table.temp_src = (uint8_t)tmp;
+
+       ret = amdgpu_si_copy_bytes_to_smc(adev,
+                                         si_pi->fan_table_start,
+                                         (u8 *)(&fan_table),
+                                         sizeof(fan_table),
+                                         si_pi->sram_end);
+
+       if (ret) {
+               DRM_ERROR("Failed to load fan table to the SMC.");
+               adev->pm.dpm.fan.ucode_fan_control = false;
+       }
+
+       return ret;
+}
+
+static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       PPSMC_Result ret;
+
+       ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl);
+       if (ret == PPSMC_Result_OK) {
+               si_pi->fan_is_controlled_by_smc = true;
+               return 0;
+       } else {
+               return -EINVAL;
+       }
+}
+
+static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       PPSMC_Result ret;
+
+       ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl);
+
+       if (ret == PPSMC_Result_OK) {
+               si_pi->fan_is_controlled_by_smc = false;
+               return 0;
+       } else {
+               return -EINVAL;
+       }
+}
+
+static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+                                     u32 *speed)
+{
+       u32 duty, duty100;
+       u64 tmp64;
+
+       if (adev->pm.no_fan)
+               return -ENOENT;
+
+       duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+       duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+       if (duty100 == 0)
+               return -EINVAL;
+
+       tmp64 = (u64)duty * 100;
+       do_div(tmp64, duty100);
+       *speed = (u32)tmp64;
+
+       if (*speed > 100)
+               *speed = 100;
+
+       return 0;
+}
+
+static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+                                     u32 speed)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+       u32 duty, duty100;
+       u64 tmp64;
+
+       if (adev->pm.no_fan)
+               return -ENOENT;
+
+       if (si_pi->fan_is_controlled_by_smc)
+               return -EINVAL;
+
+       if (speed > 100)
+               return -EINVAL;
+
+       duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+       if (duty100 == 0)
+               return -EINVAL;
+
+       tmp64 = (u64)speed * duty100;
+       do_div(tmp64, 100);
+       duty = (u32)tmp64;
+
+       tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+       tmp |= FDO_STATIC_DUTY(duty);
+       WREG32(CG_FDO_CTRL0, tmp);
+
+       return 0;
+}
+
+static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+{
+       if (mode) {
+               /* stop auto-manage */
+               if (adev->pm.dpm.fan.ucode_fan_control)
+                       si_fan_ctrl_stop_smc_fan_control(adev);
+               si_fan_ctrl_set_static_mode(adev, mode);
+       } else {
+               /* restart auto-manage */
+               if (adev->pm.dpm.fan.ucode_fan_control)
+                       si_thermal_start_smc_fan_control(adev);
+               else
+                       si_fan_ctrl_set_default_mode(adev);
+       }
+}
+
+static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+
+       if (si_pi->fan_is_controlled_by_smc)
+               return 0;
+
+       tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+       return (tmp >> FDO_PWM_MODE_SHIFT);
+}
+
+#if 0
+static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
+                                        u32 *speed)
+{
+       u32 tach_period;
+       u32 xclk = amdgpu_asic_get_xclk(adev);
+
+       if (adev->pm.no_fan)
+               return -ENOENT;
+
+       if (adev->pm.fan_pulses_per_revolution == 0)
+               return -ENOENT;
+
+       tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+       if (tach_period == 0)
+               return -ENOENT;
+
+       *speed = 60 * xclk * 10000 / tach_period;
+
+       return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
+                                        u32 speed)
+{
+       u32 tach_period, tmp;
+       u32 xclk = amdgpu_asic_get_xclk(adev);
+
+       if (adev->pm.no_fan)
+               return -ENOENT;
+
+       if (adev->pm.fan_pulses_per_revolution == 0)
+               return -ENOENT;
+
+       if ((speed < adev->pm.fan_min_rpm) ||
+           (speed > adev->pm.fan_max_rpm))
+               return -EINVAL;
+
+       if (adev->pm.dpm.fan.ucode_fan_control)
+               si_fan_ctrl_stop_smc_fan_control(adev);
+
+       tach_period = 60 * xclk * 10000 / (8 * speed);
+       tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+       tmp |= TARGET_PERIOD(tach_period);
+       WREG32(CG_TACH_CTRL, tmp);
+
+       si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
+
+       return 0;
+}
+#endif
+
+static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
+{
+       struct si_power_info *si_pi = si_get_pi(adev);
+       u32 tmp;
+
+       if (!si_pi->fan_ctrl_is_in_default_mode) {
+               tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+               tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
+               WREG32(CG_FDO_CTRL2, tmp);
+
+               tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+               tmp |= TMIN(si_pi->t_min);
+               WREG32(CG_FDO_CTRL2, tmp);
+               si_pi->fan_ctrl_is_in_default_mode = true;
+       }
+}
+
+static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev)
+{
+       if (adev->pm.dpm.fan.ucode_fan_control) {
+               si_fan_ctrl_start_smc_fan_control(adev);
+               si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
+       }
+}
+
+static void si_thermal_initialize(struct amdgpu_device *adev)
+{
+       u32 tmp;
+
+       if (adev->pm.fan_pulses_per_revolution) {
+               tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+               tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
+               WREG32(CG_TACH_CTRL, tmp);
+       }
+
+       tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+       tmp |= TACH_PWM_RESP_RATE(0x28);
+       WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
+{
+       int ret;
+
+       si_thermal_initialize(adev);
+       ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+       if (ret)
+               return ret;
+       ret = si_thermal_enable_alert(adev, true);
+       if (ret)
+               return ret;
+       if (adev->pm.dpm.fan.ucode_fan_control) {
+               ret = si_halt_smc(adev);
+               if (ret)
+                       return ret;
+               ret = si_thermal_setup_fan_table(adev);
+               if (ret)
+                       return ret;
+               ret = si_resume_smc(adev);
+               if (ret)
+                       return ret;
+               si_thermal_start_smc_fan_control(adev);
+       }
+
+       return 0;
+}
+
+static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev)
+{
+       if (!adev->pm.no_fan) {
+               si_fan_ctrl_set_default_mode(adev);
+               si_fan_ctrl_stop_smc_fan_control(adev);
+       }
+}
+
+static int si_dpm_enable(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+       int ret;
+
+       if (amdgpu_si_is_smc_running(adev))
+               return -EINVAL;
+       if (pi->voltage_control || si_pi->voltage_control_svi2)
+               si_enable_voltage_control(adev, true);
+       if (pi->mvdd_control)
+               si_get_mvdd_configuration(adev);
+       if (pi->voltage_control || si_pi->voltage_control_svi2) {
+               ret = si_construct_voltage_tables(adev);
+               if (ret) {
+                       DRM_ERROR("si_construct_voltage_tables failed\n");
+                       return ret;
+               }
+       }
+       if (eg_pi->dynamic_ac_timing) {
+               ret = si_initialize_mc_reg_table(adev);
+               if (ret)
+                       eg_pi->dynamic_ac_timing = false;
+       }
+       if (pi->dynamic_ss)
+               si_enable_spread_spectrum(adev, true);
+       if (pi->thermal_protection)
+               si_enable_thermal_protection(adev, true);
+       si_setup_bsp(adev);
+       si_program_git(adev);
+       si_program_tp(adev);
+       si_program_tpp(adev);
+       si_program_sstp(adev);
+       si_enable_display_gap(adev);
+       si_program_vc(adev);
+       ret = si_upload_firmware(adev);
+       if (ret) {
+               DRM_ERROR("si_upload_firmware failed\n");
+               return ret;
+       }
+       ret = si_process_firmware_header(adev);
+       if (ret) {
+               DRM_ERROR("si_process_firmware_header failed\n");
+               return ret;
+       }
+       ret = si_initial_switch_from_arb_f0_to_f1(adev);
+       if (ret) {
+               DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
+               return ret;
+       }
+       ret = si_init_smc_table(adev);
+       if (ret) {
+               DRM_ERROR("si_init_smc_table failed\n");
+               return ret;
+       }
+       ret = si_init_smc_spll_table(adev);
+       if (ret) {
+               DRM_ERROR("si_init_smc_spll_table failed\n");
+               return ret;
+       }
+       ret = si_init_arb_table_index(adev);
+       if (ret) {
+               DRM_ERROR("si_init_arb_table_index failed\n");
+               return ret;
+       }
+       if (eg_pi->dynamic_ac_timing) {
+               ret = si_populate_mc_reg_table(adev, boot_ps);
+               if (ret) {
+                       DRM_ERROR("si_populate_mc_reg_table failed\n");
+                       return ret;
+               }
+       }
+       ret = si_initialize_smc_cac_tables(adev);
+       if (ret) {
+               DRM_ERROR("si_initialize_smc_cac_tables failed\n");
+               return ret;
+       }
+       ret = si_initialize_hardware_cac_manager(adev);
+       if (ret) {
+               DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
+               return ret;
+       }
+       ret = si_initialize_smc_dte_tables(adev);
+       if (ret) {
+               DRM_ERROR("si_initialize_smc_dte_tables failed\n");
+               return ret;
+       }
+       ret = si_populate_smc_tdp_limits(adev, boot_ps);
+       if (ret) {
+               DRM_ERROR("si_populate_smc_tdp_limits failed\n");
+               return ret;
+       }
+       ret = si_populate_smc_tdp_limits_2(adev, boot_ps);
+       if (ret) {
+               DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
+               return ret;
+       }
+       si_program_response_times(adev);
+       si_program_ds_registers(adev);
+       si_dpm_start_smc(adev);
+       ret = si_notify_smc_display_change(adev, false);
+       if (ret) {
+               DRM_ERROR("si_notify_smc_display_change failed\n");
+               return ret;
+       }
+       si_enable_sclk_control(adev, true);
+       si_start_dpm(adev);
+
+       si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+       si_thermal_start_thermal_controller(adev);
+       ni_update_current_ps(adev, boot_ps);
+
+       return 0;
+}
+
+static int si_set_temperature_range(struct amdgpu_device *adev)
+{
+       int ret;
+
+       ret = si_thermal_enable_alert(adev, false);
+       if (ret)
+               return ret;
+       ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+       if (ret)
+               return ret;
+       ret = si_thermal_enable_alert(adev, true);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+static void si_dpm_disable(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+
+       if (!amdgpu_si_is_smc_running(adev))
+               return;
+       si_thermal_stop_thermal_controller(adev);
+       si_disable_ulv(adev);
+       si_clear_vc(adev);
+       if (pi->thermal_protection)
+               si_enable_thermal_protection(adev, false);
+       si_enable_power_containment(adev, boot_ps, false);
+       si_enable_smc_cac(adev, boot_ps, false);
+       si_enable_spread_spectrum(adev, false);
+       si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+       si_stop_dpm(adev);
+       si_reset_to_default(adev);
+       si_dpm_stop_smc(adev);
+       si_force_switch_to_arb_f0(adev);
+
+       ni_update_current_ps(adev, boot_ps);
+}
+
+static int si_dpm_pre_set_power_state(struct amdgpu_device *adev)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
+       struct amdgpu_ps *new_ps = &requested_ps;
+
+       ni_update_requested_ps(adev, new_ps);
+       si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
+
+       return 0;
+}
+
+static int si_power_control_set_level(struct amdgpu_device *adev)
+{
+       struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps;
+       int ret;
+
+       ret = si_restrict_performance_levels_before_switch(adev);
+       if (ret)
+               return ret;
+       ret = si_halt_smc(adev);
+       if (ret)
+               return ret;
+       ret = si_populate_smc_tdp_limits(adev, new_ps);
+       if (ret)
+               return ret;
+       ret = si_populate_smc_tdp_limits_2(adev, new_ps);
+       if (ret)
+               return ret;
+       ret = si_resume_smc(adev);
+       if (ret)
+               return ret;
+       ret = si_set_sw_state(adev);
+       if (ret)
+               return ret;
+       return 0;
+}
+
+static int si_dpm_set_power_state(struct amdgpu_device *adev)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
+       struct amdgpu_ps *old_ps = &eg_pi->current_rps;
+       int ret;
+
+       ret = si_disable_ulv(adev);
+       if (ret) {
+               DRM_ERROR("si_disable_ulv failed\n");
+               return ret;
+       }
+       ret = si_restrict_performance_levels_before_switch(adev);
+       if (ret) {
+               DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
+               return ret;
+       }
+       if (eg_pi->pcie_performance_request)
+               si_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
+       ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps);
+       ret = si_enable_power_containment(adev, new_ps, false);
+       if (ret) {
+               DRM_ERROR("si_enable_power_containment failed\n");
+               return ret;
+       }
+       ret = si_enable_smc_cac(adev, new_ps, false);
+       if (ret) {
+               DRM_ERROR("si_enable_smc_cac failed\n");
+               return ret;
+       }
+       ret = si_halt_smc(adev);
+       if (ret) {
+               DRM_ERROR("si_halt_smc failed\n");
+               return ret;
+       }
+       ret = si_upload_sw_state(adev, new_ps);
+       if (ret) {
+               DRM_ERROR("si_upload_sw_state failed\n");
+               return ret;
+       }
+       ret = si_upload_smc_data(adev);
+       if (ret) {
+               DRM_ERROR("si_upload_smc_data failed\n");
+               return ret;
+       }
+       ret = si_upload_ulv_state(adev);
+       if (ret) {
+               DRM_ERROR("si_upload_ulv_state failed\n");
+               return ret;
+       }
+       if (eg_pi->dynamic_ac_timing) {
+               ret = si_upload_mc_reg_table(adev, new_ps);
+               if (ret) {
+                       DRM_ERROR("si_upload_mc_reg_table failed\n");
+                       return ret;
+               }
+       }
+       ret = si_program_memory_timing_parameters(adev, new_ps);
+       if (ret) {
+               DRM_ERROR("si_program_memory_timing_parameters failed\n");
+               return ret;
+       }
+       si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps);
+
+       ret = si_resume_smc(adev);
+       if (ret) {
+               DRM_ERROR("si_resume_smc failed\n");
+               return ret;
+       }
+       ret = si_set_sw_state(adev);
+       if (ret) {
+               DRM_ERROR("si_set_sw_state failed\n");
+               return ret;
+       }
+       ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps);
+       if (eg_pi->pcie_performance_request)
+               si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
+       ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps);
+       if (ret) {
+               DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
+               return ret;
+       }
+       ret = si_enable_smc_cac(adev, new_ps, true);
+       if (ret) {
+               DRM_ERROR("si_enable_smc_cac failed\n");
+               return ret;
+       }
+       ret = si_enable_power_containment(adev, new_ps, true);
+       if (ret) {
+               DRM_ERROR("si_enable_power_containment failed\n");
+               return ret;
+       }
+
+       ret = si_power_control_set_level(adev);
+       if (ret) {
+               DRM_ERROR("si_power_control_set_level failed\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static void si_dpm_post_set_power_state(struct amdgpu_device *adev)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
+
+       ni_update_current_ps(adev, new_ps);
+}
+
+#if 0
+void si_dpm_reset_asic(struct amdgpu_device *adev)
+{
+       si_restrict_performance_levels_before_switch(adev);
+       si_disable_ulv(adev);
+       si_set_boot_state(adev);
+}
+#endif
+
+static void si_dpm_display_configuration_changed(struct amdgpu_device *adev)
+{
+       si_program_display_gap(adev);
+}
+
+
+static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev,
+                                         struct amdgpu_ps *rps,
+                                         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+                                         u8 table_rev)
+{
+       rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+       rps->class = le16_to_cpu(non_clock_info->usClassification);
+       rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+       if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+               rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+               rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+       } else if (r600_is_uvd_state(rps->class, rps->class2)) {
+               rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+               rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+       } else {
+               rps->vclk = 0;
+               rps->dclk = 0;
+       }
+
+       if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+               adev->pm.dpm.boot_ps = rps;
+       if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+               adev->pm.dpm.uvd_ps = rps;
+}
+
+static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
+                                     struct amdgpu_ps *rps, int index,
+                                     union pplib_clock_info *clock_info)
+{
+       struct rv7xx_power_info *pi = rv770_get_pi(adev);
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct si_power_info *si_pi = si_get_pi(adev);
+       struct  si_ps *ps = si_get_ps(rps);
+       u16 leakage_voltage;
+       struct rv7xx_pl *pl = &ps->performance_levels[index];
+       int ret;
+
+       ps->performance_level_count = index + 1;
+
+       pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+       pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
+       pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+       pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
+
+       pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
+       pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
+       pl->flags = le32_to_cpu(clock_info->si.ulFlags);
+       pl->pcie_gen = r600_get_pcie_gen_support(adev,
+                                                si_pi->sys_pcie_mask,
+                                                si_pi->boot_pcie_gen,
+                                                clock_info->si.ucPCIEGen);
+
+       /* patch up vddc if necessary */
+       ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
+                                                       &leakage_voltage);
+       if (ret == 0)
+               pl->vddc = leakage_voltage;
+
+       if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+               pi->acpi_vddc = pl->vddc;
+               eg_pi->acpi_vddci = pl->vddci;
+               si_pi->acpi_pcie_gen = pl->pcie_gen;
+       }
+
+       if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
+           index == 0) {
+               /* XXX disable for A0 tahiti */
+               si_pi->ulv.supported = false;
+               si_pi->ulv.pl = *pl;
+               si_pi->ulv.one_pcie_lane_in_ulv = false;
+               si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
+               si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
+               si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
+       }
+
+       if (pi->min_vddc_in_table > pl->vddc)
+               pi->min_vddc_in_table = pl->vddc;
+
+       if (pi->max_vddc_in_table < pl->vddc)
+               pi->max_vddc_in_table = pl->vddc;
+
+       /* patch up boot state */
+       if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+               u16 vddc, vddci, mvdd;
+               amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd);
+               pl->mclk = adev->clock.default_mclk;
+               pl->sclk = adev->clock.default_sclk;
+               pl->vddc = vddc;
+               pl->vddci = vddci;
+               si_pi->mvdd_bootup_value = mvdd;
+       }
+
+       if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+           ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+               adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+               adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+               adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+               adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
+       }
+}
+
+union pplib_power_state {
+       struct _ATOM_PPLIB_STATE v1;
+       struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static int si_parse_power_table(struct amdgpu_device *adev)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+       union pplib_power_state *power_state;
+       int i, j, k, non_clock_array_index, clock_array_index;
+       union pplib_clock_info *clock_info;
+       struct _StateArray *state_array;
+       struct _ClockInfoArray *clock_info_array;
+       struct _NonClockInfoArray *non_clock_info_array;
+       union power_info *power_info;
+       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+       u16 data_offset;
+       u8 frev, crev;
+       u8 *power_state_offset;
+       struct  si_ps *ps;
+
+       if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+                                  &frev, &crev, &data_offset))
+               return -EINVAL;
+       power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+       amdgpu_add_thermal_controller(adev);
+
+       state_array = (struct _StateArray *)
+               (mode_info->atom_context->bios + data_offset +
+                le16_to_cpu(power_info->pplib.usStateArrayOffset));
+       clock_info_array = (struct _ClockInfoArray *)
+               (mode_info->atom_context->bios + data_offset +
+                le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+       non_clock_info_array = (struct _NonClockInfoArray *)
+               (mode_info->atom_context->bios + data_offset +
+                le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+       adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
+                                 state_array->ucNumEntries, GFP_KERNEL);
+       if (!adev->pm.dpm.ps)
+               return -ENOMEM;
+       power_state_offset = (u8 *)state_array->states;
+       for (i = 0; i < state_array->ucNumEntries; i++) {
+               u8 *idx;
+               power_state = (union pplib_power_state *)power_state_offset;
+               non_clock_array_index = power_state->v2.nonClockInfoIndex;
+               non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+                       &non_clock_info_array->nonClockInfo[non_clock_array_index];
+               ps = kzalloc(sizeof(struct  si_ps), GFP_KERNEL);
+               if (ps == NULL) {
+                       kfree(adev->pm.dpm.ps);
+                       return -ENOMEM;
+               }
+               adev->pm.dpm.ps[i].ps_priv = ps;
+               si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
+                                             non_clock_info,
+                                             non_clock_info_array->ucEntrySize);
+               k = 0;
+               idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+               for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+                       clock_array_index = idx[j];
+                       if (clock_array_index >= clock_info_array->ucNumEntries)
+                               continue;
+                       if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
+                               break;
+                       clock_info = (union pplib_clock_info *)
+                               ((u8 *)&clock_info_array->clockInfo[0] +
+                                (clock_array_index * clock_info_array->ucEntrySize));
+                       si_parse_pplib_clock_info(adev,
+                                                 &adev->pm.dpm.ps[i], k,
+                                                 clock_info);
+                       k++;
+               }
+               power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+       }
+       adev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+       /* fill in the vce power states */
+       for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+               u32 sclk, mclk;
+               clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
+               clock_info = (union pplib_clock_info *)
+                       &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+               sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+               sclk |= clock_info->si.ucEngineClockHigh << 16;
+               mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+               mclk |= clock_info->si.ucMemoryClockHigh << 16;
+               adev->pm.dpm.vce_states[i].sclk = sclk;
+               adev->pm.dpm.vce_states[i].mclk = mclk;
+       }
+
+       return 0;
+}
+
+static int si_dpm_init(struct amdgpu_device *adev)
+{
+       struct rv7xx_power_info *pi;
+       struct evergreen_power_info *eg_pi;
+       struct ni_power_info *ni_pi;
+       struct si_power_info *si_pi;
+       struct atom_clock_dividers dividers;
+       int ret;
+       u32 mask;
+
+       si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
+       if (si_pi == NULL)
+               return -ENOMEM;
+       adev->pm.dpm.priv = si_pi;
+       ni_pi = &si_pi->ni;
+       eg_pi = &ni_pi->eg;
+       pi = &eg_pi->rv7xx;
+
+       ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+       if (ret)
+               si_pi->sys_pcie_mask = 0;
+       else
+               si_pi->sys_pcie_mask = mask;
+       si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+       si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
+
+       si_set_max_cu_value(adev);
+
+       rv770_get_max_vddc(adev);
+       si_get_leakage_vddc(adev);
+       si_patch_dependency_tables_based_on_leakage(adev);
+
+       pi->acpi_vddc = 0;
+       eg_pi->acpi_vddci = 0;
+       pi->min_vddc_in_table = 0;
+       pi->max_vddc_in_table = 0;
+
+       ret = amdgpu_get_platform_caps(adev);
+       if (ret)
+               return ret;
+
+       ret = amdgpu_parse_extended_power_table(adev);
+       if (ret)
+               return ret;
+
+       ret = si_parse_power_table(adev);
+       if (ret)
+               return ret;
+
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+               kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
+       if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+               amdgpu_free_extended_power_table(adev);
+               return -ENOMEM;
+       }
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+       adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+       if (adev->pm.dpm.voltage_response_time == 0)
+               adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
+       if (adev->pm.dpm.backbias_response_time == 0)
+               adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
+
+       ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+                                            0, false, &dividers);
+       if (ret)
+               pi->ref_div = dividers.ref_div + 1;
+       else
+               pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+       eg_pi->smu_uvd_hs = false;
+
+       pi->mclk_strobe_mode_threshold = 40000;
+       if (si_is_special_1gb_platform(adev))
+               pi->mclk_stutter_mode_threshold = 0;
+       else
+               pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
+       pi->mclk_edc_enable_threshold = 40000;
+       eg_pi->mclk_edc_wr_enable_threshold = 40000;
+
+       ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
+
+       pi->voltage_control =
+               amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+                                           VOLTAGE_OBJ_GPIO_LUT);
+       if (!pi->voltage_control) {
+               si_pi->voltage_control_svi2 =
+                       amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+                                                   VOLTAGE_OBJ_SVID2);
+               if (si_pi->voltage_control_svi2)
+                       amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+                                                 &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
+       }
+
+       pi->mvdd_control =
+               amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+                                           VOLTAGE_OBJ_GPIO_LUT);
+
+       eg_pi->vddci_control =
+               amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+                                           VOLTAGE_OBJ_GPIO_LUT);
+       if (!eg_pi->vddci_control)
+               si_pi->vddci_control_svi2 =
+                       amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+                                                   VOLTAGE_OBJ_SVID2);
+
+       si_pi->vddc_phase_shed_control =
+               amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+                                           VOLTAGE_OBJ_PHASE_LUT);
+
+       rv770_get_engine_memory_ss(adev);
+
+       pi->asi = RV770_ASI_DFLT;
+       pi->pasi = CYPRESS_HASI_DFLT;
+       pi->vrc = SISLANDS_VRC_DFLT;
+
+       pi->gfx_clock_gating = true;
+
+       eg_pi->sclk_deep_sleep = true;
+       si_pi->sclk_deep_sleep_above_low = false;
+
+       if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+               pi->thermal_protection = true;
+       else
+               pi->thermal_protection = false;
+
+       eg_pi->dynamic_ac_timing = true;
+
+       eg_pi->light_sleep = true;
+#if defined(CONFIG_ACPI)
+       eg_pi->pcie_performance_request =
+               amdgpu_acpi_is_pcie_performance_request_supported(adev);
+#else
+       eg_pi->pcie_performance_request = false;
+#endif
+
+       si_pi->sram_end = SMC_RAM_END;
+
+       adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+       adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+       adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+       adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+       adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+       adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+       adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+       si_initialize_powertune_defaults(adev);
+
+       /* make sure dc limits are valid */
+       if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+           (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+               adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+                       adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+       si_pi->fan_ctrl_is_in_default_mode = true;
+
+       return 0;
+}
+
+static void si_dpm_fini(struct amdgpu_device *adev)
+{
+       int i;
+
+       if (adev->pm.dpm.ps)
+               for (i = 0; i < adev->pm.dpm.num_ps; i++)
+                       kfree(adev->pm.dpm.ps[i].ps_priv);
+       kfree(adev->pm.dpm.ps);
+       kfree(adev->pm.dpm.priv);
+       kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+       amdgpu_free_extended_power_table(adev);
+}
+
+static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+                                                   struct seq_file *m)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct amdgpu_ps *rps = &eg_pi->current_rps;
+       struct  si_ps *ps = si_get_ps(rps);
+       struct rv7xx_pl *pl;
+       u32 current_index =
+               (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+               CURRENT_STATE_INDEX_SHIFT;
+
+       if (current_index >= ps->performance_level_count) {
+               seq_printf(m, "invalid dpm profile %d\n", current_index);
+       } else {
+               pl = &ps->performance_levels[current_index];
+               seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+               seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+                          current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+       }
+}
+
+static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
+                                     struct amdgpu_irq_src *source,
+                                     unsigned type,
+                                     enum amdgpu_interrupt_state state)
+{
+       u32 cg_thermal_int;
+
+       switch (type) {
+       case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
+               switch (state) {
+               case AMDGPU_IRQ_STATE_DISABLE:
+                       cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+                       cg_thermal_int |= THERM_INT_MASK_HIGH;
+                       WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+                       break;
+               case AMDGPU_IRQ_STATE_ENABLE:
+                       cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+                       cg_thermal_int &= ~THERM_INT_MASK_HIGH;
+                       WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+                       break;
+               default:
+                       break;
+               }
+               break;
+
+       case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
+               switch (state) {
+               case AMDGPU_IRQ_STATE_DISABLE:
+                       cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+                       cg_thermal_int |= THERM_INT_MASK_LOW;
+                       WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+                       break;
+               case AMDGPU_IRQ_STATE_ENABLE:
+                       cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+                       cg_thermal_int &= ~THERM_INT_MASK_LOW;
+                       WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+                       break;
+               default:
+                       break;
+               }
+               break;
+
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int si_dpm_process_interrupt(struct amdgpu_device *adev,
+                                   struct amdgpu_irq_src *source,
+                                   struct amdgpu_iv_entry *entry)
+{
+       bool queue_thermal = false;
+
+       if (entry == NULL)
+               return -EINVAL;
+
+       switch (entry->src_id) {
+       case 230: /* thermal low to high */
+               DRM_DEBUG("IH: thermal low to high\n");
+               adev->pm.dpm.thermal.high_to_low = false;
+               queue_thermal = true;
+               break;
+       case 231: /* thermal high to low */
+               DRM_DEBUG("IH: thermal high to low\n");
+               adev->pm.dpm.thermal.high_to_low = true;
+               queue_thermal = true;
+               break;
+       default:
+               break;
+       }
+
+       if (queue_thermal)
+               schedule_work(&adev->pm.dpm.thermal.work);
+
+       return 0;
+}
+
+static int si_dpm_late_init(void *handle)
+{
+       int ret;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!amdgpu_dpm)
+               return 0;
+
+       /* init the sysfs and debugfs files late */
+       ret = amdgpu_pm_sysfs_init(adev);
+       if (ret)
+               return ret;
+
+       ret = si_set_temperature_range(adev);
+       if (ret)
+               return ret;
+#if 0 //TODO ?
+       si_dpm_powergate_uvd(adev, true);
+#endif
+       return 0;
+}
+
+/**
+ * si_dpm_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int si_dpm_init_microcode(struct amdgpu_device *adev)
+{
+       const char *chip_name;
+       char fw_name[30];
+       int err;
+
+       DRM_DEBUG("\n");
+       switch (adev->asic_type) {
+       case CHIP_TAHITI:
+               chip_name = "tahiti";
+               break;
+       case CHIP_PITCAIRN:
+               if ((adev->pdev->revision == 0x81) ||
+                   (adev->pdev->device == 0x6810) ||
+                   (adev->pdev->device == 0x6811) ||
+                   (adev->pdev->device == 0x6816) ||
+                   (adev->pdev->device == 0x6817) ||
+                   (adev->pdev->device == 0x6806))
+                       chip_name = "pitcairn_k";
+               else
+                       chip_name = "pitcairn";
+               break;
+       case CHIP_VERDE:
+               if ((adev->pdev->revision == 0x81) ||
+                   (adev->pdev->revision == 0x83) ||
+                   (adev->pdev->revision == 0x87) ||
+                   (adev->pdev->device == 0x6820) ||
+                   (adev->pdev->device == 0x6821) ||
+                   (adev->pdev->device == 0x6822) ||
+                   (adev->pdev->device == 0x6823) ||
+                   (adev->pdev->device == 0x682A) ||
+                   (adev->pdev->device == 0x682B))
+                       chip_name = "verde_k";
+               else
+                       chip_name = "verde";
+               break;
+       case CHIP_OLAND:
+               if ((adev->pdev->revision == 0xC7) ||
+                   (adev->pdev->revision == 0x80) ||
+                   (adev->pdev->revision == 0x81) ||
+                   (adev->pdev->revision == 0x83) ||
+                   (adev->pdev->device == 0x6604) ||
+                   (adev->pdev->device == 0x6605))
+                       chip_name = "oland_k";
+               else
+                       chip_name = "oland";
+               break;
+       case CHIP_HAINAN:
+               if ((adev->pdev->revision == 0x81) ||
+                   (adev->pdev->revision == 0x83) ||
+                   (adev->pdev->revision == 0xC3) ||
+                   (adev->pdev->device == 0x6664) ||
+                   (adev->pdev->device == 0x6665) ||
+                   (adev->pdev->device == 0x6667))
+                       chip_name = "hainan_k";
+               else
+                       chip_name = "hainan";
+               break;
+       default: BUG();
+       }
+
+       snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+       err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+       if (err)
+               goto out;
+       err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+       if (err) {
+               DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
+                         err, fw_name);
+               release_firmware(adev->pm.fw);
+               adev->pm.fw = NULL;
+       }
+       return err;
+
+}
+
+static int si_dpm_sw_init(void *handle)
+{
+       int ret;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+       if (ret)
+               return ret;
+
+       ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+       if (ret)
+               return ret;
+
+       /* default to balanced state */
+       adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
+       adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+       adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+       adev->pm.default_sclk = adev->clock.default_sclk;
+       adev->pm.default_mclk = adev->clock.default_mclk;
+       adev->pm.current_sclk = adev->clock.default_sclk;
+       adev->pm.current_mclk = adev->clock.default_mclk;
+       adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+       if (amdgpu_dpm == 0)
+               return 0;
+
+       ret = si_dpm_init_microcode(adev);
+       if (ret)
+               return ret;
+
+       INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
+       mutex_lock(&adev->pm.mutex);
+       ret = si_dpm_init(adev);
+       if (ret)
+               goto dpm_failed;
+       adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+       if (amdgpu_dpm == 1)
+               amdgpu_pm_print_power_states(adev);
+       mutex_unlock(&adev->pm.mutex);
+       DRM_INFO("amdgpu: dpm initialized\n");
+
+       return 0;
+
+dpm_failed:
+       si_dpm_fini(adev);
+       mutex_unlock(&adev->pm.mutex);
+       DRM_ERROR("amdgpu: dpm initialization failed\n");
+       return ret;
+}
+
+static int si_dpm_sw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       mutex_lock(&adev->pm.mutex);
+       amdgpu_pm_sysfs_fini(adev);
+       si_dpm_fini(adev);
+       mutex_unlock(&adev->pm.mutex);
+
+       return 0;
+}
+
+static int si_dpm_hw_init(void *handle)
+{
+       int ret;
+
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (!amdgpu_dpm)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       si_dpm_setup_asic(adev);
+       ret = si_dpm_enable(adev);
+       if (ret)
+               adev->pm.dpm_enabled = false;
+       else
+               adev->pm.dpm_enabled = true;
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+static int si_dpm_hw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->pm.dpm_enabled) {
+               mutex_lock(&adev->pm.mutex);
+               si_dpm_disable(adev);
+               mutex_unlock(&adev->pm.mutex);
+       }
+
+       return 0;
+}
+
+static int si_dpm_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->pm.dpm_enabled) {
+               mutex_lock(&adev->pm.mutex);
+               /* disable dpm */
+               si_dpm_disable(adev);
+               /* reset the power state */
+               adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+               mutex_unlock(&adev->pm.mutex);
+       }
+       return 0;
+}
+
+static int si_dpm_resume(void *handle)
+{
+       int ret;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->pm.dpm_enabled) {
+               /* asic init will reset to the boot state */
+               mutex_lock(&adev->pm.mutex);
+               si_dpm_setup_asic(adev);
+               ret = si_dpm_enable(adev);
+               if (ret)
+                       adev->pm.dpm_enabled = false;
+               else
+                       adev->pm.dpm_enabled = true;
+               mutex_unlock(&adev->pm.mutex);
+               if (adev->pm.dpm_enabled)
+                       amdgpu_pm_compute_clocks(adev);
+       }
+       return 0;
+}
+
+static bool si_dpm_is_idle(void *handle)
+{
+       /* XXX */
+       return true;
+}
+
+static int si_dpm_wait_for_idle(void *handle)
+{
+       /* XXX */
+       return 0;
+}
+
+static int si_dpm_soft_reset(void *handle)
+{
+       return 0;
+}
+
+static int si_dpm_set_clockgating_state(void *handle,
+                                       enum amd_clockgating_state state)
+{
+       return 0;
+}
+
+static int si_dpm_set_powergating_state(void *handle,
+                                       enum amd_powergating_state state)
+{
+       return 0;
+}
+
+/* get temperature in millidegrees */
+static int si_dpm_get_temp(struct amdgpu_device *adev)
+{
+       u32 temp;
+       int actual_temp = 0;
+
+       temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+               CTF_TEMP_SHIFT;
+
+       if (temp & 0x200)
+               actual_temp = 255;
+       else
+               actual_temp = temp & 0x1ff;
+
+       actual_temp = (actual_temp * 1000);
+
+       return actual_temp;
+}
+
+static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct  si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+
+       if (low)
+               return requested_state->performance_levels[0].sclk;
+       else
+               return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+       struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+       struct  si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+
+       if (low)
+               return requested_state->performance_levels[0].mclk;
+       else
+               return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
+
+static void si_dpm_print_power_state(struct amdgpu_device *adev,
+                                    struct amdgpu_ps *rps)
+{
+       struct  si_ps *ps = si_get_ps(rps);
+       struct rv7xx_pl *pl;
+       int i;
+
+       amdgpu_dpm_print_class_info(rps->class, rps->class2);
+       amdgpu_dpm_print_cap_info(rps->caps);
+       DRM_INFO("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+       for (i = 0; i < ps->performance_level_count; i++) {
+               pl = &ps->performance_levels[i];
+               if (adev->asic_type >= CHIP_TAHITI)
+                       DRM_INFO("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+                                i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+               else
+                       DRM_INFO("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u\n",
+                                i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+       }
+       amdgpu_dpm_print_ps_status(adev, rps);
+}
+
+static int si_dpm_early_init(void *handle)
+{
+
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       si_dpm_set_dpm_funcs(adev);
+       si_dpm_set_irq_funcs(adev);
+       return 0;
+}
+
+
+const struct amd_ip_funcs si_dpm_ip_funcs = {
+       .name = "si_dpm",
+       .early_init = si_dpm_early_init,
+       .late_init = si_dpm_late_init,
+       .sw_init = si_dpm_sw_init,
+       .sw_fini = si_dpm_sw_fini,
+       .hw_init = si_dpm_hw_init,
+       .hw_fini = si_dpm_hw_fini,
+       .suspend = si_dpm_suspend,
+       .resume = si_dpm_resume,
+       .is_idle = si_dpm_is_idle,
+       .wait_for_idle = si_dpm_wait_for_idle,
+       .soft_reset = si_dpm_soft_reset,
+       .set_clockgating_state = si_dpm_set_clockgating_state,
+       .set_powergating_state = si_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs si_dpm_funcs = {
+       .get_temperature = &si_dpm_get_temp,
+       .pre_set_power_state = &si_dpm_pre_set_power_state,
+       .set_power_state = &si_dpm_set_power_state,
+       .post_set_power_state = &si_dpm_post_set_power_state,
+       .display_configuration_changed = &si_dpm_display_configuration_changed,
+       .get_sclk = &si_dpm_get_sclk,
+       .get_mclk = &si_dpm_get_mclk,
+       .print_power_state = &si_dpm_print_power_state,
+       .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
+       .force_performance_level = &si_dpm_force_performance_level,
+       .vblank_too_short = &si_dpm_vblank_too_short,
+       .set_fan_control_mode = &si_dpm_set_fan_control_mode,
+       .get_fan_control_mode = &si_dpm_get_fan_control_mode,
+       .set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
+       .get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+};
+
+static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
+{
+       if (adev->pm.funcs == NULL)
+               adev->pm.funcs = &si_dpm_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
+       .set = si_dpm_set_interrupt_state,
+       .process = si_dpm_process_interrupt,
+};
+
+static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
+{
+       adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
+       adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
new file mode 100644 (file)
index 0000000..51ce21c
--- /dev/null
@@ -0,0 +1,1015 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SI_DPM_H__
+#define __SI_DPM_H__
+
+#include "amdgpu_atombios.h"
+#include "sislands_smc.h"
+
+#define MC_CG_CONFIG                                    0x96f
+#define MC_ARB_CG                                       0x9fa
+#define                CG_ARB_REQ(x)                           ((x) << 0)
+#define                CG_ARB_REQ_MASK                         (0xff << 0)
+
+#define        MC_ARB_DRAM_TIMING_1                            0x9fc
+#define        MC_ARB_DRAM_TIMING_2                            0x9fd
+#define        MC_ARB_DRAM_TIMING_3                            0x9fe
+#define        MC_ARB_DRAM_TIMING2_1                           0x9ff
+#define        MC_ARB_DRAM_TIMING2_2                           0xa00
+#define        MC_ARB_DRAM_TIMING2_3                           0xa01
+
+#define MAX_NO_OF_MVDD_VALUES 2
+#define MAX_NO_VREG_STEPS 32
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+#define RV770_ASI_DFLT                                1000
+#define CYPRESS_HASI_DFLT                               400000
+#define PCIE_PERF_REQ_PECI_GEN1         2
+#define PCIE_PERF_REQ_PECI_GEN2         3
+#define PCIE_PERF_REQ_PECI_GEN3         4
+#define RV770_DEFAULT_VCLK_FREQ  53300 /* 10 khz */
+#define RV770_DEFAULT_DCLK_FREQ  40000 /* 10 khz */
+
+#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
+
+#define RV770_SMC_TABLE_ADDRESS 0xB000
+#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE    3
+
+#define SMC_STROBE_RATIO    0x0F
+#define SMC_STROBE_ENABLE   0x10
+
+#define SMC_MC_EDC_RD_FLAG  0x01
+#define SMC_MC_EDC_WR_FLAG  0x02
+#define SMC_MC_RTT_ENABLE   0x04
+#define SMC_MC_STUTTER_EN   0x08
+
+#define RV770_SMC_VOLTAGEMASK_VDDC 0
+#define RV770_SMC_VOLTAGEMASK_MVDD 1
+#define RV770_SMC_VOLTAGEMASK_VDDCI 2
+#define RV770_SMC_VOLTAGEMASK_MAX  4
+
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+#define NISLANDS_SMC_STROBE_RATIO    0x0F
+#define NISLANDS_SMC_STROBE_ENABLE   0x10
+
+#define NISLANDS_SMC_MC_EDC_RD_FLAG  0x01
+#define NISLANDS_SMC_MC_EDC_WR_FLAG  0x02
+#define NISLANDS_SMC_MC_RTT_ENABLE   0x04
+#define NISLANDS_SMC_MC_STUTTER_EN   0x08
+
+#define MAX_NO_VREG_STEPS 32
+
+#define NISLANDS_SMC_VOLTAGEMASK_VDDC  0
+#define NISLANDS_SMC_VOLTAGEMASK_MVDD  1
+#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define NISLANDS_SMC_VOLTAGEMASK_MAX   4
+
+#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT               0
+#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT                  1
+#define SISLANDS_MCREGISTERTABLE_ULV_SLOT                   2
+#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT     3
+
+#define SISLANDS_LEAKAGE_INDEX0     0xff01
+#define SISLANDS_MAX_LEAKAGE_COUNT  4
+
+#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5
+#define SISLANDS_INITIAL_STATE_ARB_INDEX    0
+#define SISLANDS_ACPI_STATE_ARB_INDEX       1
+#define SISLANDS_ULV_STATE_ARB_INDEX        2
+#define SISLANDS_DRIVER_STATE_ARB_INDEX     3
+
+#define SISLANDS_DPM2_MAX_PULSE_SKIP        256
+
+#define SISLANDS_DPM2_NEAR_TDP_DEC          10
+#define SISLANDS_DPM2_ABOVE_SAFE_INC        5
+#define SISLANDS_DPM2_BELOW_SAFE_INC        20
+
+#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT            80
+
+#define SISLANDS_DPM2_MAXPS_PERCENT_H                   99
+#define SISLANDS_DPM2_MAXPS_PERCENT_M                   99
+
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER                 0x3FFF
+#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER                 0x12
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA           0x15
+#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE                  0x1E
+#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO                 0xF
+
+#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN         10
+
+#define SISLANDS_VRC_DFLT                               0xC000B3
+#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT             1687
+#define SISLANDS_CGULVPARAMETER_DFLT                    0x00040035
+#define SISLANDS_CGULVCONTROL_DFLT                      0x1f007550
+
+#define SI_ASI_DFLT                                10000
+#define SI_BSP_DFLT                                0x41EB
+#define SI_BSU_DFLT                                0x2
+#define SI_AH_DFLT                                 5
+#define SI_RLP_DFLT                                25
+#define SI_RMP_DFLT                                65
+#define SI_LHP_DFLT                                40
+#define SI_LMP_DFLT                                15
+#define SI_TD_DFLT                                 0
+#define SI_UTC_DFLT_00                             0x24
+#define SI_UTC_DFLT_01                             0x22
+#define SI_UTC_DFLT_02                             0x22
+#define SI_UTC_DFLT_03                             0x22
+#define SI_UTC_DFLT_04                             0x22
+#define SI_UTC_DFLT_05                             0x22
+#define SI_UTC_DFLT_06                             0x22
+#define SI_UTC_DFLT_07                             0x22
+#define SI_UTC_DFLT_08                             0x22
+#define SI_UTC_DFLT_09                             0x22
+#define SI_UTC_DFLT_10                             0x22
+#define SI_UTC_DFLT_11                             0x22
+#define SI_UTC_DFLT_12                             0x22
+#define SI_UTC_DFLT_13                             0x22
+#define SI_UTC_DFLT_14                             0x22
+#define SI_DTC_DFLT_00                             0x24
+#define SI_DTC_DFLT_01                             0x22
+#define SI_DTC_DFLT_02                             0x22
+#define SI_DTC_DFLT_03                             0x22
+#define SI_DTC_DFLT_04                             0x22
+#define SI_DTC_DFLT_05                             0x22
+#define SI_DTC_DFLT_06                             0x22
+#define SI_DTC_DFLT_07                             0x22
+#define SI_DTC_DFLT_08                             0x22
+#define SI_DTC_DFLT_09                             0x22
+#define SI_DTC_DFLT_10                             0x22
+#define SI_DTC_DFLT_11                             0x22
+#define SI_DTC_DFLT_12                             0x22
+#define SI_DTC_DFLT_13                             0x22
+#define SI_DTC_DFLT_14                             0x22
+#define SI_VRC_DFLT                                0x0000C003
+#define SI_VOLTAGERESPONSETIME_DFLT                1000
+#define SI_BACKBIASRESPONSETIME_DFLT               1000
+#define SI_VRU_DFLT                                0x3
+#define SI_SPLLSTEPTIME_DFLT                       0x1000
+#define SI_SPLLSTEPUNIT_DFLT                       0x3
+#define SI_TPU_DFLT                                0
+#define SI_TPC_DFLT                                0x200
+#define SI_SSTU_DFLT                               0
+#define SI_SST_DFLT                                0x00C8
+#define SI_GICST_DFLT                              0x200
+#define SI_FCT_DFLT                                0x0400
+#define SI_FCTU_DFLT                               0
+#define SI_CTXCGTT3DRPHC_DFLT                      0x20
+#define SI_CTXCGTT3DRSDC_DFLT                      0x40
+#define SI_VDDC3DOORPHC_DFLT                       0x100
+#define SI_VDDC3DOORSDC_DFLT                       0x7
+#define SI_VDDC3DOORSU_DFLT                        0
+#define SI_MPLLLOCKTIME_DFLT                       100
+#define SI_MPLLRESETTIME_DFLT                      150
+#define SI_VCOSTEPPCT_DFLT                          20
+#define SI_ENDINGVCOSTEPPCT_DFLT                    5
+#define SI_REFERENCEDIVIDER_DFLT                    4
+
+#define SI_PM_NUMBER_OF_TC 15
+#define SI_PM_NUMBER_OF_SCLKS 20
+#define SI_PM_NUMBER_OF_MCLKS 4
+#define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define SI_TEMP_RANGE_MIN (90 * 1000)
+#define SI_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum ni_dc_cac_level
+{
+       NISLANDS_DCCAC_LEVEL_0 = 0,
+       NISLANDS_DCCAC_LEVEL_1,
+       NISLANDS_DCCAC_LEVEL_2,
+       NISLANDS_DCCAC_LEVEL_3,
+       NISLANDS_DCCAC_LEVEL_4,
+       NISLANDS_DCCAC_LEVEL_5,
+       NISLANDS_DCCAC_LEVEL_6,
+       NISLANDS_DCCAC_LEVEL_7,
+       NISLANDS_DCCAC_MAX_LEVELS
+};
+
+enum si_cac_config_reg_type
+{
+       SISLANDS_CACCONFIG_MMR = 0,
+       SISLANDS_CACCONFIG_CGIND,
+       SISLANDS_CACCONFIG_MAX
+};
+
+enum si_power_level {
+       SI_POWER_LEVEL_LOW = 0,
+       SI_POWER_LEVEL_MEDIUM = 1,
+       SI_POWER_LEVEL_HIGH = 2,
+       SI_POWER_LEVEL_CTXSW = 3,
+};
+
+enum si_td {
+       SI_TD_AUTO,
+       SI_TD_UP,
+       SI_TD_DOWN,
+};
+
+enum si_display_watermark {
+       SI_DISPLAY_WATERMARK_LOW = 0,
+       SI_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum si_display_gap
+{
+    SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+    SI_PM_DISPLAY_GAP_VBLANK       = 1,
+    SI_PM_DISPLAY_GAP_WATERMARK    = 2,
+    SI_PM_DISPLAY_GAP_IGNORE       = 3,
+};
+
+extern const struct amd_ip_funcs si_dpm_ip_funcs;
+
+struct ni_leakage_coeffients
+{
+       u32 at;
+       u32 bt;
+       u32 av;
+       u32 bv;
+       s32 t_slope;
+       s32 t_intercept;
+       u32 t_ref;
+};
+
+struct SMC_Evergreen_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
+
+struct evergreen_mc_reg_entry {
+       u32 mclk_max;
+       u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct evergreen_mc_reg_table {
+       u8 last;
+       u8 num_entries;
+       u16 valid_flag;
+       struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+       SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct SMC_Evergreen_MCRegisterSet
+{
+    uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
+
+struct SMC_Evergreen_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_Evergreen_MCRegisterAddress     address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+    SMC_Evergreen_MCRegisterSet         data[5];
+};
+
+typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
+
+struct SMC_NIslands_MCRegisterSet
+{
+    uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
+
+struct ni_mc_reg_entry {
+       u32 mclk_max;
+       u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct SMC_NIslands_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
+
+struct SMC_NIslands_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_NIslands_MCRegisterAddress      address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+    SMC_NIslands_MCRegisterSet          data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
+
+struct evergreen_ulv_param {
+       bool supported;
+       struct rv7xx_pl *pl;
+};
+
+struct evergreen_arb_registers {
+       u32 mc_arb_dram_timing;
+       u32 mc_arb_dram_timing2;
+       u32 mc_arb_rfsh_rate;
+       u32 mc_arb_burst_time;
+};
+
+struct at {
+       u32 rlp;
+       u32 rmp;
+       u32 lhp;
+       u32 lmp;
+};
+
+struct ni_clock_registers {
+       u32 cg_spll_func_cntl;
+       u32 cg_spll_func_cntl_2;
+       u32 cg_spll_func_cntl_3;
+       u32 cg_spll_func_cntl_4;
+       u32 cg_spll_spread_spectrum;
+       u32 cg_spll_spread_spectrum_2;
+       u32 mclk_pwrmgt_cntl;
+       u32 dll_cntl;
+       u32 mpll_ad_func_cntl;
+       u32 mpll_ad_func_cntl_2;
+       u32 mpll_dq_func_cntl;
+       u32 mpll_dq_func_cntl_2;
+       u32 mpll_ss1;
+       u32 mpll_ss2;
+};
+
+struct RV770_SMC_SCLK_VALUE
+{
+    uint32_t        vCG_SPLL_FUNC_CNTL;
+    uint32_t        vCG_SPLL_FUNC_CNTL_2;
+    uint32_t        vCG_SPLL_FUNC_CNTL_3;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t        sclk_value;
+};
+
+typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
+
+struct RV770_SMC_MCLK_VALUE
+{
+    uint32_t        vMPLL_AD_FUNC_CNTL;
+    uint32_t        vMPLL_AD_FUNC_CNTL_2;
+    uint32_t        vMPLL_DQ_FUNC_CNTL;
+    uint32_t        vMPLL_DQ_FUNC_CNTL_2;
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
+
+
+struct RV730_SMC_MCLK_VALUE
+{
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL2;
+    uint32_t        vMPLL_FUNC_CNTL3;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
+
+struct RV770_SMC_VOLTAGE_VALUE
+{
+    uint16_t             value;
+    uint8_t              index;
+    uint8_t              padding;
+};
+
+typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
+
+union RV7XX_SMC_MCLK_VALUE
+{
+    RV770_SMC_MCLK_VALUE    mclk770;
+    RV730_SMC_MCLK_VALUE    mclk730;
+};
+
+typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
+
+struct RV770_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                 arbValue;
+    union{
+        uint8_t             seqValue;
+        uint8_t             ACIndex;
+    };
+    uint8_t                 displayWatermark;
+    uint8_t                 gen2PCIE;
+    uint8_t                 gen2XSP;
+    uint8_t                 backbias;
+    uint8_t                 strobeMode;
+    uint8_t                 mcFlags;
+    uint32_t                aT;
+    uint32_t                bSP;
+    RV770_SMC_SCLK_VALUE    sclk;
+    RV7XX_SMC_MCLK_VALUE    mclk;
+    RV770_SMC_VOLTAGE_VALUE vddc;
+    RV770_SMC_VOLTAGE_VALUE mvdd;
+    RV770_SMC_VOLTAGE_VALUE vddci;
+    uint8_t                 reserved1;
+    uint8_t                 reserved2;
+    uint8_t                 stateFlags;
+    uint8_t                 padding;
+};
+
+typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
+
+struct RV770_SMC_SWSTATE
+{
+    uint8_t           flags;
+    uint8_t           padding1;
+    uint8_t           padding2;
+    uint8_t           padding3;
+    RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
+
+struct RV770_SMC_VOLTAGEMASKTABLE
+{
+    uint8_t  highMask[RV770_SMC_VOLTAGEMASK_MAX];
+    uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
+
+struct RV770_SMC_STATETABLE
+{
+    uint8_t             thermalProtectType;
+    uint8_t             systemFlags;
+    uint8_t             maxVDDCIndexInPPTable;
+    uint8_t             extraFlags;
+    uint8_t             highSMIO[MAX_NO_VREG_STEPS];
+    uint32_t            lowSMIO[MAX_NO_VREG_STEPS];
+    RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+    RV770_SMC_SWSTATE   initialState;
+    RV770_SMC_SWSTATE   ACPIState;
+    RV770_SMC_SWSTATE   driverState;
+    RV770_SMC_SWSTATE   ULVState;
+};
+
+typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
+
+struct vddc_table_entry {
+       u16 vddc;
+       u8 vddc_index;
+       u8 high_smio;
+       u32 low_smio;
+};
+
+struct rv770_clock_registers {
+       u32 cg_spll_func_cntl;
+       u32 cg_spll_func_cntl_2;
+       u32 cg_spll_func_cntl_3;
+       u32 cg_spll_spread_spectrum;
+       u32 cg_spll_spread_spectrum_2;
+       u32 mpll_ad_func_cntl;
+       u32 mpll_ad_func_cntl_2;
+       u32 mpll_dq_func_cntl;
+       u32 mpll_dq_func_cntl_2;
+       u32 mclk_pwrmgt_cntl;
+       u32 dll_cntl;
+       u32 mpll_ss1;
+       u32 mpll_ss2;
+};
+
+struct rv730_clock_registers {
+       u32 cg_spll_func_cntl;
+       u32 cg_spll_func_cntl_2;
+       u32 cg_spll_func_cntl_3;
+       u32 cg_spll_spread_spectrum;
+       u32 cg_spll_spread_spectrum_2;
+       u32 mclk_pwrmgt_cntl;
+       u32 dll_cntl;
+       u32 mpll_func_cntl;
+       u32 mpll_func_cntl2;
+       u32 mpll_func_cntl3;
+       u32 mpll_ss;
+       u32 mpll_ss2;
+};
+
+union r7xx_clock_registers {
+       struct rv770_clock_registers rv770;
+       struct rv730_clock_registers rv730;
+};
+
+struct rv7xx_power_info {
+       /* flags */
+       bool mem_gddr5;
+       bool pcie_gen2;
+       bool dynamic_pcie_gen2;
+       bool acpi_pcie_gen2;
+       bool boot_in_gen2;
+       bool voltage_control; /* vddc */
+       bool mvdd_control;
+       bool sclk_ss;
+       bool mclk_ss;
+       bool dynamic_ss;
+       bool gfx_clock_gating;
+       bool mg_clock_gating;
+       bool mgcgtssm;
+       bool power_gating;
+       bool thermal_protection;
+       bool display_gap;
+       bool dcodt;
+       bool ulps;
+       /* registers */
+       union r7xx_clock_registers clk_regs;
+       u32 s0_vid_lower_smio_cntl;
+       /* voltage */
+       u32 vddc_mask_low;
+       u32 mvdd_mask_low;
+       u32 mvdd_split_frequency;
+       u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
+       u16 max_vddc;
+       u16 max_vddc_in_table;
+       u16 min_vddc_in_table;
+       struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
+       u8 valid_vddc_entries;
+       /* dc odt */
+       u32 mclk_odt_threshold;
+       u8 odt_value_0[2];
+       u8 odt_value_1[2];
+       /* stored values */
+       u32 boot_sclk;
+       u16 acpi_vddc;
+       u32 ref_div;
+       u32 active_auto_throttle_sources;
+       u32 mclk_stutter_mode_threshold;
+       u32 mclk_strobe_mode_threshold;
+       u32 mclk_edc_enable_threshold;
+       u32 bsp;
+       u32 bsu;
+       u32 pbsp;
+       u32 pbsu;
+       u32 dsp;
+       u32 psp;
+       u32 asi;
+       u32 pasi;
+       u32 vrc;
+       u32 restricted_levels;
+       u32 rlp;
+       u32 rmp;
+       u32 lhp;
+       u32 lmp;
+       /* smc offsets */
+       u16 state_table_start;
+       u16 soft_regs_start;
+       u16 sram_end;
+       /* scratch structs */
+       RV770_SMC_STATETABLE smc_statetable;
+};
+
+struct rv7xx_pl {
+       u32 sclk;
+       u32 mclk;
+       u16 vddc;
+       u16 vddci; /* eg+ only */
+       u32 flags;
+       enum amdgpu_pcie_gen pcie_gen; /* si+ only */
+};
+
+struct rv7xx_ps {
+       struct rv7xx_pl high;
+       struct rv7xx_pl medium;
+       struct rv7xx_pl low;
+       bool dc_compatible;
+};
+
+struct si_ps {
+       u16 performance_level_count;
+       bool dc_compatible;
+       struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+struct ni_mc_reg_table {
+       u8 last;
+       u8 num_entries;
+       u16 valid_flag;
+       struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+       SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ni_cac_data
+{
+       struct ni_leakage_coeffients leakage_coefficients;
+       u32 i_leakage;
+       s32 leakage_minimum_temperature;
+       u32 pwr_const;
+       u32 dc_cac_value;
+       u32 bif_cac_value;
+       u32 lkge_pwr;
+       u8 mc_wr_weight;
+       u8 mc_rd_weight;
+       u8 allow_ovrflw;
+       u8 num_win_tdp;
+       u8 l2num_win_tdp;
+       u8 lts_truncate_n;
+};
+
+struct evergreen_power_info {
+       /* must be first! */
+       struct rv7xx_power_info rv7xx;
+       /* flags */
+       bool vddci_control;
+       bool dynamic_ac_timing;
+       bool abm;
+       bool mcls;
+       bool light_sleep;
+       bool memory_transition;
+       bool pcie_performance_request;
+       bool pcie_performance_request_registered;
+       bool sclk_deep_sleep;
+       bool dll_default_on;
+       bool ls_clock_gating;
+       bool smu_uvd_hs;
+       bool uvd_enabled;
+       /* stored values */
+       u16 acpi_vddci;
+       u8 mvdd_high_index;
+       u8 mvdd_low_index;
+       u32 mclk_edc_wr_enable_threshold;
+       struct evergreen_mc_reg_table mc_reg_table;
+       struct atom_voltage_table vddc_voltage_table;
+       struct atom_voltage_table vddci_voltage_table;
+       struct evergreen_arb_registers bootup_arb_registers;
+       struct evergreen_ulv_param ulv;
+       struct at ats[2];
+       /* smc offsets */
+       u16 mc_reg_table_start;
+       struct amdgpu_ps current_rps;
+       struct rv7xx_ps current_ps;
+       struct amdgpu_ps requested_rps;
+       struct rv7xx_ps requested_ps;
+};
+
+struct PP_NIslands_Dpm2PerfLevel
+{
+    uint8_t     MaxPS;
+    uint8_t     TgtAct;
+    uint8_t     MaxPS_StepInc;
+    uint8_t     MaxPS_StepDec;
+    uint8_t     PSST;
+    uint8_t     NearTDPDec;
+    uint8_t     AboveSafeInc;
+    uint8_t     BelowSafeInc;
+    uint8_t     PSDeltaLimit;
+    uint8_t     PSDeltaWin;
+    uint8_t     Reserved[6];
+};
+
+typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
+
+struct PP_NIslands_DPM2Parameters
+{
+    uint32_t    TDPLimit;
+    uint32_t    NearTDPLimit;
+    uint32_t    SafePowerLimit;
+    uint32_t    PowerBoostLimit;
+};
+typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
+
+struct NISLANDS_SMC_SCLK_VALUE
+{
+    uint32_t        vCG_SPLL_FUNC_CNTL;
+    uint32_t        vCG_SPLL_FUNC_CNTL_2;
+    uint32_t        vCG_SPLL_FUNC_CNTL_3;
+    uint32_t        vCG_SPLL_FUNC_CNTL_4;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t        vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t        sclk_value;
+};
+
+typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
+
+struct NISLANDS_SMC_MCLK_VALUE
+{
+    uint32_t        vMPLL_FUNC_CNTL;
+    uint32_t        vMPLL_FUNC_CNTL_1;
+    uint32_t        vMPLL_FUNC_CNTL_2;
+    uint32_t        vMPLL_AD_FUNC_CNTL;
+    uint32_t        vMPLL_AD_FUNC_CNTL_2;
+    uint32_t        vMPLL_DQ_FUNC_CNTL;
+    uint32_t        vMPLL_DQ_FUNC_CNTL_2;
+    uint32_t        vMCLK_PWRMGT_CNTL;
+    uint32_t        vDLL_CNTL;
+    uint32_t        vMPLL_SS;
+    uint32_t        vMPLL_SS2;
+    uint32_t        mclk_value;
+};
+
+typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
+
+struct NISLANDS_SMC_VOLTAGE_VALUE
+{
+    uint16_t             value;
+    uint8_t              index;
+    uint8_t              padding;
+};
+
+typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
+
+struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                     arbValue;
+    uint8_t                     ACIndex;
+    uint8_t                     displayWatermark;
+    uint8_t                     gen2PCIE;
+    uint8_t                     reserved1;
+    uint8_t                     reserved2;
+    uint8_t                     strobeMode;
+    uint8_t                     mcFlags;
+    uint32_t                    aT;
+    uint32_t                    bSP;
+    NISLANDS_SMC_SCLK_VALUE     sclk;
+    NISLANDS_SMC_MCLK_VALUE     mclk;
+    NISLANDS_SMC_VOLTAGE_VALUE  vddc;
+    NISLANDS_SMC_VOLTAGE_VALUE  mvdd;
+    NISLANDS_SMC_VOLTAGE_VALUE  vddci;
+    NISLANDS_SMC_VOLTAGE_VALUE  std_vddc;
+    uint32_t                    powergate_en;
+    uint8_t                     hUp;
+    uint8_t                     hDown;
+    uint8_t                     stateFlags;
+    uint8_t                     arbRefreshState;
+    uint32_t                    SQPowerThrottle;
+    uint32_t                    SQPowerThrottle_2;
+    uint32_t                    reserved[2];
+    PP_NIslands_Dpm2PerfLevel   dpm2;
+};
+
+typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct NISLANDS_SMC_SWSTATE
+{
+    uint8_t                             flags;
+    uint8_t                             levelCount;
+    uint8_t                             padding2;
+    uint8_t                             padding3;
+    NISLANDS_SMC_HW_PERFORMANCE_LEVEL   levels[1];
+};
+
+typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
+
+struct NISLANDS_SMC_VOLTAGEMASKTABLE
+{
+    uint8_t  highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+    uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define NISLANDS_MAX_NO_VREG_STEPS 32
+
+struct NISLANDS_SMC_STATETABLE
+{
+    uint8_t                             thermalProtectType;
+    uint8_t                             systemFlags;
+    uint8_t                             maxVDDCIndexInPPTable;
+    uint8_t                             extraFlags;
+    uint8_t                             highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+    uint32_t                            lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+    NISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
+    PP_NIslands_DPM2Parameters          dpm2Params;
+    NISLANDS_SMC_SWSTATE                initialState;
+    NISLANDS_SMC_SWSTATE                ACPIState;
+    NISLANDS_SMC_SWSTATE                ULVState;
+    NISLANDS_SMC_SWSTATE                driverState;
+    NISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
+
+struct ni_power_info {
+       /* must be first! */
+       struct evergreen_power_info eg;
+       struct ni_clock_registers clock_registers;
+       struct ni_mc_reg_table mc_reg_table;
+       u32 mclk_rtt_mode_threshold;
+       /* flags */
+       bool use_power_boost_limit;
+       bool support_cac_long_term_average;
+       bool cac_enabled;
+       bool cac_configuration_required;
+       bool driver_calculate_cac_leakage;
+       bool pc_enabled;
+       bool enable_power_containment;
+       bool enable_cac;
+       bool enable_sq_ramping;
+       /* smc offsets */
+       u16 arb_table_start;
+       u16 fan_table_start;
+       u16 cac_table_start;
+       u16 spll_table_start;
+       /* CAC stuff */
+       struct ni_cac_data cac_data;
+       u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
+       const struct ni_cac_weights *cac_weights;
+       u8 lta_window_size;
+       u8 lts_truncate;
+       struct si_ps current_ps;
+       struct si_ps requested_ps;
+       /* scratch structs */
+       SMC_NIslands_MCRegisters smc_mc_reg_table;
+       NISLANDS_SMC_STATETABLE smc_statetable;
+};
+
+struct si_cac_config_reg
+{
+       u32 offset;
+       u32 mask;
+       u32 shift;
+       u32 value;
+       enum si_cac_config_reg_type type;
+};
+
+struct si_powertune_data
+{
+       u32 cac_window;
+       u32 l2_lta_window_size_default;
+       u8 lts_truncate_default;
+       u8 shift_n_default;
+       u8 operating_temp;
+       struct ni_leakage_coeffients leakage_coefficients;
+       u32 fixed_kt;
+       u32 lkge_lut_v0_percent;
+       u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
+       bool enable_powertune_by_default;
+};
+
+struct si_dyn_powertune_data
+{
+       u32 cac_leakage;
+       s32 leakage_minimum_temperature;
+       u32 wintime;
+       u32 l2_lta_window_size;
+       u8 lts_truncate;
+       u8 shift_n;
+       u8 dc_pwr_value;
+       bool disable_uvd_powertune;
+};
+
+struct si_dte_data
+{
+       u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+       u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+       u32 k;
+       u32 t0;
+       u32 max_t;
+       u8 window_size;
+       u8 temp_select;
+       u8 dte_mode;
+       u8 tdep_count;
+       u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+       u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+       u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+       u32 t_threshold;
+       bool enable_dte_by_default;
+};
+
+struct si_clock_registers {
+       u32 cg_spll_func_cntl;
+       u32 cg_spll_func_cntl_2;
+       u32 cg_spll_func_cntl_3;
+       u32 cg_spll_func_cntl_4;
+       u32 cg_spll_spread_spectrum;
+       u32 cg_spll_spread_spectrum_2;
+       u32 dll_cntl;
+       u32 mclk_pwrmgt_cntl;
+       u32 mpll_ad_func_cntl;
+       u32 mpll_dq_func_cntl;
+       u32 mpll_func_cntl;
+       u32 mpll_func_cntl_1;
+       u32 mpll_func_cntl_2;
+       u32 mpll_ss1;
+       u32 mpll_ss2;
+};
+
+struct si_mc_reg_entry {
+       u32 mclk_max;
+       u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_mc_reg_table {
+       u8 last;
+       u8 num_entries;
+       u16 valid_flag;
+       struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+       SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_leakage_voltage_entry
+{
+       u16 voltage;
+       u16 leakage_index;
+};
+
+struct si_leakage_voltage
+{
+       u16 count;
+       struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+
+struct si_ulv_param {
+       bool supported;
+       u32 cg_ulv_control;
+       u32 cg_ulv_parameter;
+       u32 volt_change_delay;
+       struct rv7xx_pl pl;
+       bool one_pcie_lane_in_ulv;
+};
+
+struct si_power_info {
+       /* must be first! */
+       struct ni_power_info ni;
+       struct si_clock_registers clock_registers;
+       struct si_mc_reg_table mc_reg_table;
+       struct atom_voltage_table mvdd_voltage_table;
+       struct atom_voltage_table vddc_phase_shed_table;
+       struct si_leakage_voltage leakage_voltage;
+       u16 mvdd_bootup_value;
+       struct si_ulv_param ulv;
+       u32 max_cu;
+       /* pcie gen */
+       enum amdgpu_pcie_gen force_pcie_gen;
+       enum amdgpu_pcie_gen boot_pcie_gen;
+       enum amdgpu_pcie_gen acpi_pcie_gen;
+       u32 sys_pcie_mask;
+       /* flags */
+       bool enable_dte;
+       bool enable_ppm;
+       bool vddc_phase_shed_control;
+       bool pspp_notify_required;
+       bool sclk_deep_sleep_above_low;
+       bool voltage_control_svi2;
+       bool vddci_control_svi2;
+       /* smc offsets */
+       u32 sram_end;
+       u32 state_table_start;
+       u32 soft_regs_start;
+       u32 mc_reg_table_start;
+       u32 arb_table_start;
+       u32 cac_table_start;
+       u32 dte_table_start;
+       u32 spll_table_start;
+       u32 papm_cfg_table_start;
+       u32 fan_table_start;
+       /* CAC stuff */
+       const struct si_cac_config_reg *cac_weights;
+       const struct si_cac_config_reg *lcac_config;
+       const struct si_cac_config_reg *cac_override;
+       const struct si_powertune_data *powertune_data;
+       struct si_dyn_powertune_data dyn_powertune_data;
+       /* DTE stuff */
+       struct si_dte_data dte_data;
+       /* scratch structs */
+       SMC_SIslands_MCRegisters smc_mc_reg_table;
+       SISLANDS_SMC_STATETABLE smc_statetable;
+       PP_SIslands_PAPMParameters papm_parm;
+       /* SVI2 */
+       u8 svd_gpio_id;
+       u8 svc_gpio_id;
+       /* fan control */
+       bool fan_ctrl_is_in_default_mode;
+       u32 t_min;
+       u32 fan_ctrl_default_mode;
+       bool fan_is_controlled_by_smc;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
new file mode 100644 (file)
index 0000000..8fae3d4
--- /dev/null
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "si/sid.h"
+#include "si_ih.h"
+
+static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+static void si_ih_enable_interrupts(struct amdgpu_device *adev)
+{
+       u32 ih_cntl = RREG32(IH_CNTL);
+       u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+       ih_cntl |= ENABLE_INTR;
+       ih_rb_cntl |= IH_RB_ENABLE;
+       WREG32(IH_CNTL, ih_cntl);
+       WREG32(IH_RB_CNTL, ih_rb_cntl);
+       adev->irq.ih.enabled = true;
+}
+  
+static void si_ih_disable_interrupts(struct amdgpu_device *adev)
+{
+       u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+       u32 ih_cntl = RREG32(IH_CNTL);
+
+       ih_rb_cntl &= ~IH_RB_ENABLE;
+       ih_cntl &= ~ENABLE_INTR;
+       WREG32(IH_RB_CNTL, ih_rb_cntl);
+       WREG32(IH_CNTL, ih_cntl);
+       WREG32(IH_RB_RPTR, 0);
+       WREG32(IH_RB_WPTR, 0);
+       adev->irq.ih.enabled = false;
+       adev->irq.ih.rptr = 0;
+}
+
+static int si_ih_irq_init(struct amdgpu_device *adev)
+{
+       int rb_bufsz;
+       u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+       u64 wptr_off;
+
+       si_ih_disable_interrupts(adev);
+       WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
+       interrupt_cntl = RREG32(INTERRUPT_CNTL);
+       interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+       interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+       WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+       WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+       rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
+
+       ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
+                    IH_WPTR_OVERFLOW_CLEAR |
+                    (rb_bufsz << 1) |
+                    IH_WPTR_WRITEBACK_ENABLE;
+
+       wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
+       WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+       WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+       WREG32(IH_RB_CNTL, ih_rb_cntl);
+       WREG32(IH_RB_RPTR, 0);
+       WREG32(IH_RB_WPTR, 0);
+
+       ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
+       if (adev->irq.msi_enabled)
+               ih_cntl |= RPTR_REARM;
+       WREG32(IH_CNTL, ih_cntl);
+
+       pci_set_master(adev->pdev);
+       si_ih_enable_interrupts(adev);
+
+       return 0;
+}
+
+static void si_ih_irq_disable(struct amdgpu_device *adev)
+{
+       si_ih_disable_interrupts(adev);
+       mdelay(1);
+}
+
+static u32 si_ih_get_wptr(struct amdgpu_device *adev)
+{
+       u32 wptr, tmp;
+
+       wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+
+       if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
+               wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
+               dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+                       wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
+               adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+               tmp = RREG32(IH_RB_CNTL);
+               tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+               WREG32(IH_RB_CNTL, tmp);
+       }
+       return (wptr & adev->irq.ih.ptr_mask);
+}
+
+static void si_ih_decode_iv(struct amdgpu_device *adev,
+                            struct amdgpu_iv_entry *entry)
+{
+       u32 ring_index = adev->irq.ih.rptr >> 2;
+       uint32_t dw[4];
+
+       dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+       dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
+       dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+       dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+       entry->src_id = dw[0] & 0xff;
+       entry->src_data = dw[1] & 0xfffffff;
+       entry->ring_id = dw[2] & 0xff;
+       entry->vm_id = (dw[2] >> 8) & 0xff;
+
+       adev->irq.ih.rptr += 16;
+}
+
+static void si_ih_set_rptr(struct amdgpu_device *adev)
+{
+       WREG32(IH_RB_RPTR, adev->irq.ih.rptr);
+}
+
+static int si_ih_early_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       si_ih_set_interrupt_funcs(adev);
+
+       return 0;
+}
+
+static int si_ih_sw_init(void *handle)
+{
+       int r;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+       if (r)
+               return r;
+
+       return amdgpu_irq_init(adev);
+}
+
+static int si_ih_sw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       amdgpu_irq_fini(adev);
+       amdgpu_ih_ring_fini(adev);
+
+       return 0;
+}
+
+static int si_ih_hw_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_ih_irq_init(adev);
+}
+
+static int si_ih_hw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       si_ih_irq_disable(adev);
+
+       return 0;
+}
+
+static int si_ih_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_ih_hw_fini(adev);
+}
+
+static int si_ih_resume(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       return si_ih_hw_init(adev);
+}
+
+static bool si_ih_is_idle(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       u32 tmp = RREG32(SRBM_STATUS);
+
+       if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+               return false;
+
+       return true;
+}
+
+static int si_ih_wait_for_idle(void *handle)
+{
+       unsigned i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               if (si_ih_is_idle(handle))
+                       return 0;
+               udelay(1);
+       }
+       return -ETIMEDOUT;
+}
+
+static int si_ih_soft_reset(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       u32 srbm_soft_reset = 0;
+       u32 tmp = RREG32(SRBM_STATUS);
+
+       if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+               srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
+
+       if (srbm_soft_reset) {
+               tmp = RREG32(SRBM_SOFT_RESET);
+               tmp |= srbm_soft_reset;
+               dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(SRBM_SOFT_RESET, tmp);
+               tmp = RREG32(SRBM_SOFT_RESET);
+
+               udelay(50);
+
+               tmp &= ~srbm_soft_reset;
+               WREG32(SRBM_SOFT_RESET, tmp);
+               tmp = RREG32(SRBM_SOFT_RESET);
+
+               udelay(50);
+       }
+
+       return 0;
+}
+
+static int si_ih_set_clockgating_state(void *handle,
+                                         enum amd_clockgating_state state)
+{
+       return 0;
+}
+
+static int si_ih_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state)
+{
+       return 0;
+}
+
+const struct amd_ip_funcs si_ih_ip_funcs = {
+       .name = "si_ih",
+       .early_init = si_ih_early_init,
+       .late_init = NULL,
+       .sw_init = si_ih_sw_init,
+       .sw_fini = si_ih_sw_fini,
+       .hw_init = si_ih_hw_init,
+       .hw_fini = si_ih_hw_fini,
+       .suspend = si_ih_suspend,
+       .resume = si_ih_resume,
+       .is_idle = si_ih_is_idle,
+       .wait_for_idle = si_ih_wait_for_idle,
+       .soft_reset = si_ih_soft_reset,
+       .set_clockgating_state = si_ih_set_clockgating_state,
+       .set_powergating_state = si_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs si_ih_funcs = {
+       .get_wptr = si_ih_get_wptr,
+       .decode_iv = si_ih_decode_iv,
+       .set_rptr = si_ih_set_rptr
+};
+
+static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+       if (adev->irq.ih_funcs == NULL)
+               adev->irq.ih_funcs = &si_ih_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
new file mode 100644 (file)
index 0000000..f3e3a95
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SI_IH_H__
+#define __SI_IH_H__
+
+extern const struct amd_ip_funcs si_ih_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c
new file mode 100644 (file)
index 0000000..668ba99
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "si/sid.h"
+#include "ppsmc.h"
+#include "amdgpu_ucode.h"
+#include "sislands_smc.h"
+
+static int si_set_smc_sram_address(struct amdgpu_device *adev,
+                                  u32 smc_address, u32 limit)
+{
+       if (smc_address & 3)
+               return -EINVAL;
+       if ((smc_address + 3) > limit)
+               return -EINVAL;
+
+       WREG32(SMC_IND_INDEX_0, smc_address);
+       WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+       return 0;
+}
+
+int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
+                               u32 smc_start_address,
+                               const u8 *src, u32 byte_count, u32 limit)
+{
+       unsigned long flags;
+       int ret = 0;
+       u32 data, original_data, addr, extra_shift;
+
+       if (smc_start_address & 3)
+               return -EINVAL;
+       if ((smc_start_address + byte_count) > limit)
+               return -EINVAL;
+
+       addr = smc_start_address;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       while (byte_count >= 4) {
+               /* SMC address space is BE */
+               data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+               ret = si_set_smc_sram_address(adev, addr, limit);
+               if (ret)
+                       goto done;
+
+               WREG32(SMC_IND_DATA_0, data);
+
+               src += 4;
+               byte_count -= 4;
+               addr += 4;
+       }
+
+       /* RMW for the final bytes */
+       if (byte_count > 0) {
+               data = 0;
+
+               ret = si_set_smc_sram_address(adev, addr, limit);
+               if (ret)
+                       goto done;
+
+               original_data = RREG32(SMC_IND_DATA_0);
+               extra_shift = 8 * (4 - byte_count);
+
+               while (byte_count > 0) {
+                       /* SMC address space is BE */
+                       data = (data << 8) + *src++;
+                       byte_count--;
+               }
+
+               data <<= extra_shift;
+               data |= (original_data & ~((~0UL) << extra_shift));
+
+               ret = si_set_smc_sram_address(adev, addr, limit);
+               if (ret)
+                       goto done;
+
+               WREG32(SMC_IND_DATA_0, data);
+       }
+
+done:
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+       return ret;
+}
+
+void amdgpu_si_start_smc(struct amdgpu_device *adev)
+{
+       u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+       tmp &= ~RST_REG;
+
+       WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void amdgpu_si_reset_smc(struct amdgpu_device *adev)
+{
+       u32 tmp;
+
+       RREG32(CB_CGTT_SCLK_CTRL);
+       RREG32(CB_CGTT_SCLK_CTRL);
+       RREG32(CB_CGTT_SCLK_CTRL);
+       RREG32(CB_CGTT_SCLK_CTRL);
+
+       tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
+             RST_REG;
+       WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev)
+{
+       static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
+
+       return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable)
+{
+       u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+       if (enable)
+               tmp &= ~CK_DISABLE;
+       else
+               tmp |= CK_DISABLE;
+
+       WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool amdgpu_si_is_smc_running(struct amdgpu_device *adev)
+{
+       u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+       u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+       if (!(rst & RST_REG) && !(clk & CK_DISABLE))
+               return true;
+
+       return false;
+}
+
+PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
+                                      PPSMC_Msg msg)
+{
+       u32 tmp;
+       int i;
+
+       if (!amdgpu_si_is_smc_running(adev))
+               return PPSMC_Result_Failed;
+
+       WREG32(SMC_MESSAGE_0, msg);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(SMC_RESP_0);
+               if (tmp != 0)
+                       break;
+               udelay(1);
+       }
+
+       return (PPSMC_Result)RREG32(SMC_RESP_0);
+}
+
+PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+       u32 tmp;
+       int i;
+
+       if (!amdgpu_si_is_smc_running(adev))
+               return PPSMC_Result_OK;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+               if ((tmp & CKEN) == 0)
+                       break;
+               udelay(1);
+       }
+
+       return PPSMC_Result_OK;
+}
+
+int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
+{
+       const struct smc_firmware_header_v1_0 *hdr;
+       unsigned long flags;
+       u32 ucode_start_address;
+       u32 ucode_size;
+       const u8 *src;
+       u32 data;
+
+       if (!adev->pm.fw)
+               return -EINVAL;
+
+       hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+
+       amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+       adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+       ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+       ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+       src = (const u8 *)
+               (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+       if (ucode_size & 3)
+               return -EINVAL;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       WREG32(SMC_IND_INDEX_0, ucode_start_address);
+       WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+       while (ucode_size >= 4) {
+               /* SMC address space is BE */
+               data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+               WREG32(SMC_IND_DATA_0, data);
+
+               src += 4;
+               ucode_size -= 4;
+       }
+       WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+       return 0;
+}
+
+int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+                                 u32 *value, u32 limit)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       ret = si_set_smc_sram_address(adev, smc_address, limit);
+       if (ret == 0)
+               *value = RREG32(SMC_IND_DATA_0);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+       return ret;
+}
+
+int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+                                  u32 value, u32 limit)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       ret = si_set_smc_sram_address(adev, smc_address, limit);
+       if (ret == 0)
+               WREG32(SMC_IND_DATA_0, value);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
new file mode 100644 (file)
index 0000000..ee4b846
--- /dev/null
@@ -0,0 +1,422 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef PP_SISLANDS_SMC_H
+#define PP_SISLANDS_SMC_H
+
+#include "ppsmc.h"
+
+#pragma pack(push, 1)
+
+#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+
+struct PP_SIslands_Dpm2PerfLevel
+{
+    uint8_t MaxPS;
+    uint8_t TgtAct;
+    uint8_t MaxPS_StepInc;
+    uint8_t MaxPS_StepDec;
+    uint8_t PSSamplingTime;
+    uint8_t NearTDPDec;
+    uint8_t AboveSafeInc;
+    uint8_t BelowSafeInc;
+    uint8_t PSDeltaLimit;
+    uint8_t PSDeltaWin;
+    uint16_t PwrEfficiencyRatio;
+    uint8_t Reserved[4];
+};
+
+typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
+
+struct PP_SIslands_DPM2Status
+{
+    uint32_t    dpm2Flags;
+    uint8_t     CurrPSkip;
+    uint8_t     CurrPSkipPowerShift;
+    uint8_t     CurrPSkipTDP;
+    uint8_t     CurrPSkipOCP;
+    uint8_t     MaxSPLLIndex;
+    uint8_t     MinSPLLIndex;
+    uint8_t     CurrSPLLIndex;
+    uint8_t     InfSweepMode;
+    uint8_t     InfSweepDir;
+    uint8_t     TDPexceeded;
+    uint8_t     reserved;
+    uint8_t     SwitchDownThreshold;
+    uint32_t    SwitchDownCounter;
+    uint32_t    SysScalingFactor;
+};
+
+typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
+
+struct PP_SIslands_DPM2Parameters
+{
+    uint32_t    TDPLimit;
+    uint32_t    NearTDPLimit;
+    uint32_t    SafePowerLimit;
+    uint32_t    PowerBoostLimit;
+    uint32_t    MinLimitDelta;
+};
+typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
+
+struct PP_SIslands_PAPMStatus
+{
+    uint32_t    EstimatedDGPU_T;
+    uint32_t    EstimatedDGPU_P;
+    uint32_t    EstimatedAPU_T;
+    uint32_t    EstimatedAPU_P;
+    uint8_t     dGPU_T_Limit_Exceeded;
+    uint8_t     reserved[3];
+};
+typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
+
+struct PP_SIslands_PAPMParameters
+{
+    uint32_t    NearTDPLimitTherm;
+    uint32_t    NearTDPLimitPAPM;
+    uint32_t    PlatformPowerLimit;
+    uint32_t    dGPU_T_Limit;
+    uint32_t    dGPU_T_Warning;
+    uint32_t    dGPU_T_Hysteresis;
+};
+typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
+
+struct SISLANDS_SMC_SCLK_VALUE
+{
+    uint32_t    vCG_SPLL_FUNC_CNTL;
+    uint32_t    vCG_SPLL_FUNC_CNTL_2;
+    uint32_t    vCG_SPLL_FUNC_CNTL_3;
+    uint32_t    vCG_SPLL_FUNC_CNTL_4;
+    uint32_t    vCG_SPLL_SPREAD_SPECTRUM;
+    uint32_t    vCG_SPLL_SPREAD_SPECTRUM_2;
+    uint32_t    sclk_value;
+};
+
+typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
+
+struct SISLANDS_SMC_MCLK_VALUE
+{
+    uint32_t    vMPLL_FUNC_CNTL;
+    uint32_t    vMPLL_FUNC_CNTL_1;
+    uint32_t    vMPLL_FUNC_CNTL_2;
+    uint32_t    vMPLL_AD_FUNC_CNTL;
+    uint32_t    vMPLL_DQ_FUNC_CNTL;
+    uint32_t    vMCLK_PWRMGT_CNTL;
+    uint32_t    vDLL_CNTL;
+    uint32_t    vMPLL_SS;
+    uint32_t    vMPLL_SS2;
+    uint32_t    mclk_value;
+};
+
+typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
+
+struct SISLANDS_SMC_VOLTAGE_VALUE
+{
+    uint16_t    value;
+    uint8_t     index;
+    uint8_t     phase_settings;
+};
+
+typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
+
+struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+    uint8_t                     ACIndex;
+    uint8_t                     displayWatermark;
+    uint8_t                     gen2PCIE;
+    uint8_t                     UVDWatermark;
+    uint8_t                     VCEWatermark;
+    uint8_t                     strobeMode;
+    uint8_t                     mcFlags;
+    uint8_t                     padding;
+    uint32_t                    aT;
+    uint32_t                    bSP;
+    SISLANDS_SMC_SCLK_VALUE     sclk;
+    SISLANDS_SMC_MCLK_VALUE     mclk;
+    SISLANDS_SMC_VOLTAGE_VALUE  vddc;
+    SISLANDS_SMC_VOLTAGE_VALUE  mvdd;
+    SISLANDS_SMC_VOLTAGE_VALUE  vddci;
+    SISLANDS_SMC_VOLTAGE_VALUE  std_vddc;
+    uint8_t                     hysteresisUp;
+    uint8_t                     hysteresisDown;
+    uint8_t                     stateFlags;
+    uint8_t                     arbRefreshState;
+    uint32_t                    SQPowerThrottle;
+    uint32_t                    SQPowerThrottle_2;
+    uint32_t                    MaxPoweredUpCU;
+    SISLANDS_SMC_VOLTAGE_VALUE  high_temp_vddc;
+    SISLANDS_SMC_VOLTAGE_VALUE  low_temp_vddc;
+    uint32_t                    reserved[2];
+    PP_SIslands_Dpm2PerfLevel   dpm2;
+};
+
+#define SISLANDS_SMC_STROBE_RATIO    0x0F
+#define SISLANDS_SMC_STROBE_ENABLE   0x10
+
+#define SISLANDS_SMC_MC_EDC_RD_FLAG  0x01
+#define SISLANDS_SMC_MC_EDC_WR_FLAG  0x02
+#define SISLANDS_SMC_MC_RTT_ENABLE   0x04
+#define SISLANDS_SMC_MC_STUTTER_EN   0x08
+#define SISLANDS_SMC_MC_PG_EN        0x10
+
+typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct SISLANDS_SMC_SWSTATE
+{
+    uint8_t                             flags;
+    uint8_t                             levelCount;
+    uint8_t                             padding2;
+    uint8_t                             padding3;
+    SISLANDS_SMC_HW_PERFORMANCE_LEVEL   levels[1];
+};
+
+typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
+
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC  0
+#define SISLANDS_SMC_VOLTAGEMASK_MVDD  1
+#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define SISLANDS_SMC_VOLTAGEMASK_MAX   4
+
+struct SISLANDS_SMC_VOLTAGEMASKTABLE
+{
+    uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define SISLANDS_MAX_NO_VREG_STEPS 32
+
+struct SISLANDS_SMC_STATETABLE
+{
+    uint8_t                             thermalProtectType;
+    uint8_t                             systemFlags;
+    uint8_t                             maxVDDCIndexInPPTable;
+    uint8_t                             extraFlags;
+    uint32_t                            lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+    SISLANDS_SMC_VOLTAGEMASKTABLE       voltageMaskTable;
+    SISLANDS_SMC_VOLTAGEMASKTABLE       phaseMaskTable;
+    PP_SIslands_DPM2Parameters          dpm2Params;
+    SISLANDS_SMC_SWSTATE                initialState;
+    SISLANDS_SMC_SWSTATE                ACPIState;
+    SISLANDS_SMC_SWSTATE                ULVState;
+    SISLANDS_SMC_SWSTATE                driverState;
+    SISLANDS_SMC_HW_PERFORMANCE_LEVEL   dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
+
+#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout         0x0
+#define SI_SMC_SOFT_REGISTER_delay_vreg               0xC
+#define SI_SMC_SOFT_REGISTER_delay_acpi               0x28
+#define SI_SMC_SOFT_REGISTER_seq_index                0x5C
+#define SI_SMC_SOFT_REGISTER_mvdd_chg_time            0x60
+#define SI_SMC_SOFT_REGISTER_mclk_switch_lim          0x70
+#define SI_SMC_SOFT_REGISTER_watermark_threshold      0x78
+#define SI_SMC_SOFT_REGISTER_phase_shedding_delay     0x88
+#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay    0x8C
+#define SI_SMC_SOFT_REGISTER_mc_block_delay           0x98
+#define SI_SMC_SOFT_REGISTER_ticks_per_us             0xA8
+#define SI_SMC_SOFT_REGISTER_crtc_index               0xC4
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC
+#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width  0xF4
+#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen   0xFC
+#define SI_SMC_SOFT_REGISTER_vr_hot_gpio              0x100
+#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type     0x118
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd   0x11c
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc   0x120
+
+struct PP_SIslands_FanTable
+{
+       uint8_t  fdo_mode;
+       uint8_t  padding;
+       int16_t  temp_min;
+       int16_t  temp_med;
+       int16_t  temp_max;
+       int16_t  slope1;
+       int16_t  slope2;
+       int16_t  fdo_min;
+       int16_t  hys_up;
+       int16_t  hys_down;
+       int16_t  hys_slope;
+       int16_t  temp_resp_lim;
+       int16_t  temp_curr;
+       int16_t  slope_curr;
+       int16_t  pwm_curr;
+       uint32_t refresh_period;
+       int16_t  fdo_max;
+       uint8_t  temp_src;
+       int8_t  padding2;
+};
+
+typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
+
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
+
+#define SMC_SISLANDS_SCALE_I  7
+#define SMC_SISLANDS_SCALE_R 12
+
+struct PP_SIslands_CacConfig
+{
+    uint16_t   cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
+    uint32_t   lkge_lut_V0;
+    uint32_t   lkge_lut_Vstep;
+    uint32_t   WinTime;
+    uint32_t   R_LL;
+    uint32_t   calculation_repeats;
+    uint32_t   l2numWin_TDP;
+    uint32_t   dc_cac;
+    uint8_t    lts_truncate_n;
+    uint8_t    SHIFT_N;
+    uint8_t    log2_PG_LKG_SCALE;
+    uint8_t    cac_temp;
+    uint32_t   lkge_lut_T0;
+    uint32_t   lkge_lut_Tstep;
+};
+
+typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
+
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+
+struct SMC_SIslands_MCRegisterAddress
+{
+    uint16_t s0;
+    uint16_t s1;
+};
+
+typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
+
+struct SMC_SIslands_MCRegisterSet
+{
+    uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
+
+struct SMC_SIslands_MCRegisters
+{
+    uint8_t                             last;
+    uint8_t                             reserved[3];
+    SMC_SIslands_MCRegisterAddress      address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+    SMC_SIslands_MCRegisterSet          data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
+
+struct SMC_SIslands_MCArbDramTimingRegisterSet
+{
+    uint32_t mc_arb_dram_timing;
+    uint32_t mc_arb_dram_timing2;
+    uint8_t  mc_arb_rfsh_rate;
+    uint8_t  mc_arb_burst_time;
+    uint8_t  padding[2];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
+
+struct SMC_SIslands_MCArbDramTimingRegisters
+{
+    uint8_t                                     arb_current;
+    uint8_t                                     reserved[3];
+    SMC_SIslands_MCArbDramTimingRegisterSet     data[16];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
+
+struct SMC_SISLANDS_SPLL_DIV_TABLE
+{
+    uint32_t    freq[256];
+    uint32_t    ss[256];
+};
+
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK  0x01ffffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK   0xfe000000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT  25
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK   0x000fffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT  0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK   0xfff00000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT  20
+
+typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
+
+#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5
+
+#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
+
+struct Smc_SIslands_DTE_Configuration
+{
+    uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+    uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+    uint32_t K;
+    uint32_t T0;
+    uint32_t MaxT;
+    uint8_t  WindowSize;
+    uint8_t  Tdep_count;
+    uint8_t  temp_select;
+    uint8_t  DTE_mode;
+    uint8_t  T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+    uint32_t Tthreshold;
+};
+
+typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
+
+#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_version                   0x0
+#define SISLANDS_SMC_FIRMWARE_HEADER_flags                     0x4
+#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters             0xC
+#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable                0x10
+#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable                  0x14
+#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable            0x18
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable           0x24
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30
+#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable                 0x38
+#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration          0x40
+#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters            0x48
+
+#pragma pack(pop)
+
+int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
+                               u32 smc_start_address,
+                               const u8 *src, u32 byte_count, u32 limit);
+void amdgpu_si_start_smc(struct amdgpu_device *adev);
+void amdgpu_si_reset_smc(struct amdgpu_device *adev);
+int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev);
+void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable);
+bool amdgpu_si_is_smc_running(struct amdgpu_device *adev);
+PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
+PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev);
+int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
+int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+                                 u32 *value, u32 limit);
+int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+                                  u32 value, u32 limit);
+
+#endif
+
index 10c0407..f6c9415 100644 (file)
@@ -526,6 +526,20 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
+static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               4; /* uvd_v4_2_ring_emit_ib */
+}
+
+static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               2 + /* uvd_v4_2_ring_emit_hdp_flush */
+               2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+               14; /* uvd_v4_2_ring_emit_fence  x1 no user fence */
+}
+
 /**
  * uvd_v4_2_mc_resume - memory controller programming
  *
@@ -756,6 +770,8 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_uvd_ring_begin_use,
        .end_use = amdgpu_uvd_ring_end_use,
+       .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
+       .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
 };
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
index 8513376..400c16f 100644 (file)
@@ -577,6 +577,20 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, ib->length_dw);
 }
 
+static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               6; /* uvd_v5_0_ring_emit_ib */
+}
+
+static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               2 + /* uvd_v5_0_ring_emit_hdp_flush */
+               2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+               14; /* uvd_v5_0_ring_emit_fence  x1 no user fence */
+}
+
 static bool uvd_v5_0_is_idle(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -807,6 +821,8 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_uvd_ring_begin_use,
        .end_use = amdgpu_uvd_ring_end_use,
+       .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
 };
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
index 2abe8a9..e0fd9f2 100644 (file)
@@ -725,6 +725,31 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, 0xE);
 }
 
+static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               8; /* uvd_v6_0_ring_emit_ib */
+}
+
+static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               2 + /* uvd_v6_0_ring_emit_hdp_flush */
+               2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+               10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+               14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
+}
+
+static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
+{
+       return
+               2 + /* uvd_v6_0_ring_emit_hdp_flush */
+               2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+               10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+               20 + /* uvd_v6_0_ring_emit_vm_flush */
+               14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
+}
+
 static bool uvd_v6_0_is_idle(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1037,6 +1062,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_uvd_ring_begin_use,
        .end_use = amdgpu_uvd_ring_end_use,
+       .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
 };
 
 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
@@ -1056,6 +1083,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_uvd_ring_begin_use,
        .end_use = amdgpu_uvd_ring_end_use,
+       .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
 };
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
index 5fa55b5..76e64ad 100644 (file)
 #include "amdgpu.h"
 #include "amdgpu_vce.h"
 #include "cikd.h"
-
 #include "vce/vce_2_0_d.h"
 #include "vce/vce_2_0_sh_mask.h"
-
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
@@ -193,6 +193,8 @@ static int vce_v2_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->vce.num_rings = 2;
+
        vce_v2_0_set_ring_funcs(adev);
        vce_v2_0_set_irq_funcs(adev);
 
@@ -202,7 +204,7 @@ static int vce_v2_0_early_init(void *handle)
 static int vce_v2_0_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
-       int r;
+       int r, i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* VCE */
@@ -219,19 +221,14 @@ static int vce_v2_0_sw_init(void *handle)
        if (r)
                return r;
 
-       ring = &adev->vce.ring[0];
-       sprintf(ring->name, "vce0");
-       r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-                            &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-       if (r)
-               return r;
-
-       ring = &adev->vce.ring[1];
-       sprintf(ring->name, "vce1");
-       r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-                            &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-       if (r)
-               return r;
+       for (i = 0; i < adev->vce.num_rings; i++) {
+               ring = &adev->vce.ring[i];
+               sprintf(ring->name, "vce%d", i);
+               r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+                                    &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+               if (r)
+                       return r;
+       }
 
        return r;
 }
@@ -254,29 +251,23 @@ static int vce_v2_0_sw_fini(void *handle)
 
 static int vce_v2_0_hw_init(void *handle)
 {
-       struct amdgpu_ring *ring;
-       int r;
+       int r, i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        r = vce_v2_0_start(adev);
+       /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
        if (r)
-/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
                return 0;
 
-       ring = &adev->vce.ring[0];
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
-               return r;
-       }
+       for (i = 0; i < adev->vce.num_rings; i++)
+               adev->vce.ring[i].ready = false;
 
-       ring = &adev->vce.ring[1];
-       ring->ready = true;
-       r = amdgpu_ring_test_ring(ring);
-       if (r) {
-               ring->ready = false;
-               return r;
+       for (i = 0; i < adev->vce.num_rings; i++) {
+               r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+               if (r)
+                       return r;
+               else
+                       adev->vce.ring[i].ready = true;
        }
 
        DRM_INFO("VCE initialized successfully.\n");
@@ -548,11 +539,28 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
        return 0;
 }
 
+static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+       u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+       if (enable)
+               tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+       else
+               tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+
+       WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
+
 static int vce_v2_0_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
        bool gate = false;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+
+       vce_v2_0_set_bypass_mode(adev, enable);
 
        if (state == AMD_CG_STATE_GATE)
                gate = true;
@@ -614,12 +622,16 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_vce_ring_begin_use,
        .end_use = amdgpu_vce_ring_end_use,
+       .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
+       .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
 };
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs;
-       adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs;
+       int i;
+
+       for (i = 0; i < adev->vce.num_rings; i++)
+               adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
index 615b8b1..a6b4e27 100644 (file)
@@ -70,8 +70,10 @@ static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 
        if (ring == &adev->vce.ring[0])
                return RREG32(mmVCE_RB_RPTR);
-       else
+       else if (ring == &adev->vce.ring[1])
                return RREG32(mmVCE_RB_RPTR2);
+       else
+               return RREG32(mmVCE_RB_RPTR3);
 }
 
 /**
@@ -87,8 +89,10 @@ static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
 
        if (ring == &adev->vce.ring[0])
                return RREG32(mmVCE_RB_WPTR);
-       else
+       else if (ring == &adev->vce.ring[1])
                return RREG32(mmVCE_RB_WPTR2);
+       else
+               return RREG32(mmVCE_RB_WPTR3);
 }
 
 /**
@@ -104,8 +108,10 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 
        if (ring == &adev->vce.ring[0])
                WREG32(mmVCE_RB_WPTR, ring->wptr);
-       else
+       else if (ring == &adev->vce.ring[1])
                WREG32(mmVCE_RB_WPTR2, ring->wptr);
+       else
+               WREG32(mmVCE_RB_WPTR3, ring->wptr);
 }
 
 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
@@ -229,6 +235,13 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
        WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
        WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 
+       ring = &adev->vce.ring[2];
+       WREG32(mmVCE_RB_RPTR3, ring->wptr);
+       WREG32(mmVCE_RB_WPTR3, ring->wptr);
+       WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
+       WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+       WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
+
        mutex_lock(&adev->grbm_idx_mutex);
        for (idx = 0; idx < 2; ++idx) {
                if (adev->vce.harvest_config & (1 << idx))
@@ -345,6 +358,8 @@ static int vce_v3_0_early_init(void *handle)
            (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
                return -ENOENT;
 
+       adev->vce.num_rings = 3;
+
        vce_v3_0_set_ring_funcs(adev);
        vce_v3_0_set_irq_funcs(adev);
 
@@ -355,7 +370,7 @@ static int vce_v3_0_sw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct amdgpu_ring *ring;
-       int r;
+       int r, i;
 
        /* VCE */
        r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
@@ -371,19 +386,14 @@ static int vce_v3_0_sw_init(void *handle)
        if (r)
                return r;
 
-       ring = &adev->vce.ring[0];
-       sprintf(ring->name, "vce0");
-       r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-                            &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-       if (r)
-               return r;
-
-       ring = &adev->vce.ring[1];
-       sprintf(ring->name, "vce1");
-       r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-                            &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
-       if (r)
-               return r;
+       for (i = 0; i < adev->vce.num_rings; i++) {
+               ring = &adev->vce.ring[i];
+               sprintf(ring->name, "vce%d", i);
+               r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+                                    &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+               if (r)
+                       return r;
+       }
 
        return r;
 }
@@ -413,10 +423,10 @@ static int vce_v3_0_hw_init(void *handle)
        if (r)
                return r;
 
-       adev->vce.ring[0].ready = false;
-       adev->vce.ring[1].ready = false;
+       for (i = 0; i < adev->vce.num_rings; i++)
+               adev->vce.ring[i].ready = false;
 
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < adev->vce.num_rings; i++) {
                r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
                if (r)
                        return r;
@@ -674,6 +684,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
        switch (entry->src_data) {
        case 0:
        case 1:
+       case 2:
                amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
                break;
        default:
@@ -685,7 +696,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
        return 0;
 }
 
-static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
 {
        u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
 
@@ -704,8 +715,9 @@ static int vce_v3_0_set_clockgating_state(void *handle,
        bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
        int i;
 
-       if (adev->asic_type == CHIP_POLARIS10)
-               vce_v3_set_bypass_mode(adev, enable);
+       if ((adev->asic_type == CHIP_POLARIS10) ||
+           (adev->asic_type == CHIP_TONGA))
+               vce_v3_0_set_bypass_mode(adev, enable);
 
        if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
                return 0;
@@ -763,6 +775,60 @@ static int vce_v3_0_set_powergating_state(void *handle,
                return vce_v3_0_start(adev);
 }
 
+static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+               struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+{
+       amdgpu_ring_write(ring, VCE_CMD_IB_VM);
+       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, ib->length_dw);
+}
+
+static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
+                        unsigned int vm_id, uint64_t pd_addr)
+{
+       amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
+       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, pd_addr >> 12);
+
+       amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
+       amdgpu_ring_write(ring, vm_id);
+       amdgpu_ring_write(ring, VCE_CMD_END);
+}
+
+static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+       uint32_t seq = ring->fence_drv.sync_seq;
+       uint64_t addr = ring->fence_drv.gpu_addr;
+
+       amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+       amdgpu_ring_write(ring, seq);
+}
+
+static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+       return
+               5; /* vce_v3_0_ring_emit_ib */
+}
+
+static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+       return
+               4 + /* vce_v3_0_emit_pipeline_sync */
+               6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
+}
+
+static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
+{
+       return
+               6 + /* vce_v3_0_emit_vm_flush */
+               4 + /* vce_v3_0_emit_pipeline_sync */
+               6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
+}
+
 const struct amd_ip_funcs vce_v3_0_ip_funcs = {
        .name = "vce_v3_0",
        .early_init = vce_v3_0_early_init,
@@ -783,7 +849,7 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
        .set_powergating_state = vce_v3_0_set_powergating_state,
 };
 
-static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
+static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
        .get_rptr = vce_v3_0_ring_get_rptr,
        .get_wptr = vce_v3_0_ring_get_wptr,
        .set_wptr = vce_v3_0_ring_set_wptr,
@@ -796,12 +862,42 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_vce_ring_begin_use,
        .end_use = amdgpu_vce_ring_end_use,
+       .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
+};
+
+static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+       .get_rptr = vce_v3_0_ring_get_rptr,
+       .get_wptr = vce_v3_0_ring_get_wptr,
+       .set_wptr = vce_v3_0_ring_set_wptr,
+       .parse_cs = NULL,
+       .emit_ib = vce_v3_0_ring_emit_ib,
+       .emit_vm_flush = vce_v3_0_emit_vm_flush,
+       .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
+       .emit_fence = amdgpu_vce_ring_emit_fence,
+       .test_ring = amdgpu_vce_ring_test_ring,
+       .test_ib = amdgpu_vce_ring_test_ib,
+       .insert_nop = amdgpu_ring_insert_nop,
+       .pad_ib = amdgpu_ring_generic_pad_ib,
+       .begin_use = amdgpu_vce_ring_begin_use,
+       .end_use = amdgpu_vce_ring_end_use,
+       .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
+       .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
 };
 
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
-       adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
+       int i;
+
+       if (adev->asic_type >= CHIP_STONEY) {
+               for (i = 0; i < adev->vce.num_rings; i++)
+                       adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
+               DRM_INFO("VCE enabled in VM mode\n");
+       } else {
+               for (i = 0; i < adev->vce.num_rings; i++)
+                       adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
+               DRM_INFO("VCE enabled in physical mode\n");
+       }
 }
 
 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
index f2e8aa1..b688e2f 100644 (file)
@@ -1650,7 +1650,7 @@ static int vi_common_early_init(void *handle)
                        AMD_PG_SUPPORT_GFX_PIPELINE |
                        AMD_PG_SUPPORT_UVD |
                        AMD_PG_SUPPORT_VCE;
-               adev->external_rev_id = adev->rev_id + 0x1;
+               adev->external_rev_id = adev->rev_id + 0x61;
                break;
        default:
                /* FIXME: not supported yet */
index 062ee16..f62b261 100644 (file)
 #define VCE_CMD_IB_AUTO        0x00000005
 #define VCE_CMD_SEMAPHORE      0x00000006
 
+#define VCE_CMD_IB_VM           0x00000102
+#define VCE_CMD_WAIT_GE         0x00000106
+#define VCE_CMD_UPDATE_PTB      0x00000107
+#define VCE_CMD_FLUSH_TLB       0x00000108
 #endif
index e621eba..ad494b8 100644 (file)
@@ -142,13 +142,15 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-       pr_debug("mapping doorbell page:\n");
-       pr_debug("     target user address == 0x%08llX\n",
-                       (unsigned long long) vma->vm_start);
-       pr_debug("     physical address    == 0x%08llX\n", address);
-       pr_debug("     vm_flags            == 0x%04lX\n", vma->vm_flags);
-       pr_debug("     size                == 0x%04lX\n",
-                        doorbell_process_allocation());
+       pr_debug("kfd: mapping doorbell page in %s\n"
+                "     target user address == 0x%08llX\n"
+                "     physical address    == 0x%08llX\n"
+                "     vm_flags            == 0x%04lX\n"
+                "     size                == 0x%04lX\n",
+                __func__,
+                (unsigned long long) vma->vm_start, address, vma->vm_flags,
+                doorbell_process_allocation());
+
 
        return io_remap_pfn_range(vma,
                                vma->vm_start,
index 9beae87..d135cd0 100644 (file)
@@ -47,6 +47,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
        pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
                        __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
 
+       memset(&prop, 0, sizeof(prop));
+       memset(&nop, 0, sizeof(nop));
+
        nop.opcode = IT_NOP;
        nop.type = PM4_TYPE_3;
        nop.u32all |= PM4_COUNT_ZERO;
@@ -121,7 +124,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
        prop.eop_ring_buffer_address = kq->eop_gpu_addr;
        prop.eop_ring_buffer_size = PAGE_SIZE;
 
-       if (init_queue(&kq->queue, prop) != 0)
+       if (init_queue(&kq->queue, &prop) != 0)
                goto err_init_queue;
 
        kq->queue->device = dev;
index 80113c3..4750cab 100644 (file)
@@ -619,7 +619,7 @@ int kfd_init_apertures(struct kfd_process *process);
 /* Queue Context Management */
 struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
 
-int init_queue(struct queue **q, struct queue_properties properties);
+int init_queue(struct queue **q, const struct queue_properties *properties);
 void uninit_queue(struct queue *q);
 void print_queue_properties(struct queue_properties *q);
 void print_queue(struct queue *q);
index 4f3849a..ef7c8de 100644 (file)
@@ -404,58 +404,47 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
 {
        struct kfd_process *p;
        struct kfd_process_device *pdd;
-       int idx, i;
 
        BUG_ON(dev == NULL);
 
-       idx = srcu_read_lock(&kfd_processes_srcu);
-
        /*
         * Look for the process that matches the pasid. If there is no such
         * process, we either released it in amdkfd's own notifier, or there
         * is a bug. Unfortunately, there is no way to tell...
         */
-       hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
-               if (p->pasid == pasid) {
-
-                       srcu_read_unlock(&kfd_processes_srcu, idx);
-
-                       pr_debug("Unbinding process %d from IOMMU\n", pasid);
+       p = kfd_lookup_process_by_pasid(pasid);
+       if (!p)
+               return;
 
-                       mutex_lock(&p->mutex);
-
-                       if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
-                               kfd_dbgmgr_destroy(dev->dbgmgr);
-
-                       pqm_uninit(&p->pqm);
+       pr_debug("Unbinding process %d from IOMMU\n", pasid);
 
-                       pdd = kfd_get_process_device_data(dev, p);
+       if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+               kfd_dbgmgr_destroy(dev->dbgmgr);
 
-                       if (!pdd) {
-                               mutex_unlock(&p->mutex);
-                               return;
-                       }
+       pqm_uninit(&p->pqm);
 
-                       if (pdd->reset_wavefronts) {
-                               dbgdev_wave_reset_wavefronts(pdd->dev, p);
-                               pdd->reset_wavefronts = false;
-                       }
+       pdd = kfd_get_process_device_data(dev, p);
 
-                       /*
-                        * Just mark pdd as unbound, because we still need it
-                        * to call amd_iommu_unbind_pasid() in when the
-                        * process exits.
-                        * We don't call amd_iommu_unbind_pasid() here
-                        * because the IOMMU called us.
-                        */
-                       pdd->bound = false;
+       if (!pdd) {
+               mutex_unlock(&p->mutex);
+               return;
+       }
 
-                       mutex_unlock(&p->mutex);
+       if (pdd->reset_wavefronts) {
+               dbgdev_wave_reset_wavefronts(pdd->dev, p);
+               pdd->reset_wavefronts = false;
+       }
 
-                       return;
-               }
+       /*
+        * Just mark pdd as unbound, because we still need it
+        * to call amd_iommu_unbind_pasid() in when the
+        * process exits.
+        * We don't call amd_iommu_unbind_pasid() here
+        * because the IOMMU called us.
+        */
+       pdd->bound = false;
 
-       srcu_read_unlock(&kfd_processes_srcu, idx);
+       mutex_unlock(&p->mutex);
 }
 
 struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
index 7b69070..e1fb40b 100644 (file)
@@ -129,7 +129,7 @@ static int create_cp_queue(struct process_queue_manager *pqm,
        q_properties->vmid = 0;
        q_properties->queue_id = qid;
 
-       retval = init_queue(q, *q_properties);
+       retval = init_queue(q, q_properties);
        if (retval != 0)
                goto err_init_queue;
 
index 9a0c90b..0ab1970 100644 (file)
@@ -63,7 +63,7 @@ void print_queue(struct queue *q)
        pr_debug("Queue Device Address: 0x%p\n", q->device);
 }
 
-int init_queue(struct queue **q, struct queue_properties properties)
+int init_queue(struct queue **q, const struct queue_properties *properties)
 {
        struct queue *tmp;
 
@@ -73,7 +73,7 @@ int init_queue(struct queue **q, struct queue_properties properties)
        if (!tmp)
                return -ENOMEM;
 
-       memcpy(&tmp->properties, &properties, sizeof(struct queue_properties));
+       memcpy(&tmp->properties, properties, sizeof(struct queue_properties));
 
        *q = tmp;
        return 0;
index 884c96f..1e50647 100644 (file)
@@ -1090,19 +1090,21 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
 {
        uint32_t hashout;
        uint32_t buf[7];
+       uint64_t local_mem_size;
        int i;
 
        if (!gpu)
                return 0;
 
+       local_mem_size = gpu->kfd2kgd->get_vmem_size(gpu->kgd);
+
        buf[0] = gpu->pdev->devfn;
        buf[1] = gpu->pdev->subsystem_vendor;
        buf[2] = gpu->pdev->subsystem_device;
        buf[3] = gpu->pdev->device;
        buf[4] = gpu->pdev->bus->number;
-       buf[5] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd)
-                       & 0xffffffff);
-       buf[6] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
+       buf[5] = lower_32_bits(local_mem_size);
+       buf[6] = upper_32_bits(local_mem_size);
 
        for (i = 0, hashout = 0; i < 7; i++)
                hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
index db71041..c934b78 100644 (file)
  * Supported ASIC types
  */
 enum amd_asic_type {
-       CHIP_BONAIRE = 0,
+       CHIP_TAHITI = 0,
+       CHIP_PITCAIRN,
+       CHIP_VERDE,
+       CHIP_OLAND,
+       CHIP_HAINAN,
+       CHIP_BONAIRE,
        CHIP_KAVERI,
        CHIP_KABINI,
        CHIP_HAWAII,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h b/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
new file mode 100644 (file)
index 0000000..66e39cd
--- /dev/null
@@ -0,0 +1,941 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const u32 si_SECT_CONTEXT_def_1[] =
+{
+    0x00000000, // DB_RENDER_CONTROL
+    0x00000000, // DB_COUNT_CONTROL
+    0x00000000, // DB_DEPTH_VIEW
+    0x00000000, // DB_RENDER_OVERRIDE
+    0x00000000, // DB_RENDER_OVERRIDE2
+    0x00000000, // DB_HTILE_DATA_BASE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_DEPTH_BOUNDS_MIN
+    0x00000000, // DB_DEPTH_BOUNDS_MAX
+    0x00000000, // DB_STENCIL_CLEAR
+    0x00000000, // DB_DEPTH_CLEAR
+    0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+    0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+    0, // HOLE
+    0x00000000, // DB_DEPTH_INFO
+    0x00000000, // DB_Z_INFO
+    0x00000000, // DB_STENCIL_INFO
+    0x00000000, // DB_Z_READ_BASE
+    0x00000000, // DB_STENCIL_READ_BASE
+    0x00000000, // DB_Z_WRITE_BASE
+    0x00000000, // DB_STENCIL_WRITE_BASE
+    0x00000000, // DB_DEPTH_SIZE
+    0x00000000, // DB_DEPTH_SLICE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // TA_BC_BASE_ADDR
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // COHER_DEST_BASE_2
+    0x00000000, // COHER_DEST_BASE_3
+    0x00000000, // PA_SC_WINDOW_OFFSET
+    0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+    0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+    0x0000ffff, // PA_SC_CLIPRECT_RULE
+    0x00000000, // PA_SC_CLIPRECT_0_TL
+    0x40004000, // PA_SC_CLIPRECT_0_BR
+    0x00000000, // PA_SC_CLIPRECT_1_TL
+    0x40004000, // PA_SC_CLIPRECT_1_BR
+    0x00000000, // PA_SC_CLIPRECT_2_TL
+    0x40004000, // PA_SC_CLIPRECT_2_BR
+    0x00000000, // PA_SC_CLIPRECT_3_TL
+    0x40004000, // PA_SC_CLIPRECT_3_BR
+    0xaa99aaaa, // PA_SC_EDGERULE
+    0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+    0xffffffff, // CB_TARGET_MASK
+    0xffffffff, // CB_SHADER_MASK
+    0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+    0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+    0x00000000, // COHER_DEST_BASE_0
+    0x00000000, // COHER_DEST_BASE_1
+    0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+    0x00000000, // PA_SC_VPORT_ZMIN_0
+    0x3f800000, // PA_SC_VPORT_ZMAX_0
+    0x00000000, // PA_SC_VPORT_ZMIN_1
+    0x3f800000, // PA_SC_VPORT_ZMAX_1
+    0x00000000, // PA_SC_VPORT_ZMIN_2
+    0x3f800000, // PA_SC_VPORT_ZMAX_2
+    0x00000000, // PA_SC_VPORT_ZMIN_3
+    0x3f800000, // PA_SC_VPORT_ZMAX_3
+    0x00000000, // PA_SC_VPORT_ZMIN_4
+    0x3f800000, // PA_SC_VPORT_ZMAX_4
+    0x00000000, // PA_SC_VPORT_ZMIN_5
+    0x3f800000, // PA_SC_VPORT_ZMAX_5
+    0x00000000, // PA_SC_VPORT_ZMIN_6
+    0x3f800000, // PA_SC_VPORT_ZMAX_6
+    0x00000000, // PA_SC_VPORT_ZMIN_7
+    0x3f800000, // PA_SC_VPORT_ZMAX_7
+    0x00000000, // PA_SC_VPORT_ZMIN_8
+    0x3f800000, // PA_SC_VPORT_ZMAX_8
+    0x00000000, // PA_SC_VPORT_ZMIN_9
+    0x3f800000, // PA_SC_VPORT_ZMAX_9
+    0x00000000, // PA_SC_VPORT_ZMIN_10
+    0x3f800000, // PA_SC_VPORT_ZMAX_10
+    0x00000000, // PA_SC_VPORT_ZMIN_11
+    0x3f800000, // PA_SC_VPORT_ZMAX_11
+    0x00000000, // PA_SC_VPORT_ZMIN_12
+    0x3f800000, // PA_SC_VPORT_ZMAX_12
+    0x00000000, // PA_SC_VPORT_ZMIN_13
+    0x3f800000, // PA_SC_VPORT_ZMAX_13
+    0x00000000, // PA_SC_VPORT_ZMIN_14
+    0x3f800000, // PA_SC_VPORT_ZMAX_14
+    0x00000000, // PA_SC_VPORT_ZMIN_15
+    0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const u32 si_SECT_CONTEXT_def_2[] =
+{
+    0x00000000, // CP_PERFMON_CNTX_CNTL
+    0x00000000, // CP_RINGID
+    0x00000000, // CP_VMID
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0xffffffff, // VGT_MAX_VTX_INDX
+    0x00000000, // VGT_MIN_VTX_INDX
+    0x00000000, // VGT_INDX_OFFSET
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+    0, // HOLE
+    0x00000000, // CB_BLEND_RED
+    0x00000000, // CB_BLEND_GREEN
+    0x00000000, // CB_BLEND_BLUE
+    0x00000000, // CB_BLEND_ALPHA
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_STENCIL_CONTROL
+    0x00000000, // DB_STENCILREFMASK
+    0x00000000, // DB_STENCILREFMASK_BF
+    0, // HOLE
+    0x00000000, // PA_CL_VPORT_XSCALE
+    0x00000000, // PA_CL_VPORT_XOFFSET
+    0x00000000, // PA_CL_VPORT_YSCALE
+    0x00000000, // PA_CL_VPORT_YOFFSET
+    0x00000000, // PA_CL_VPORT_ZSCALE
+    0x00000000, // PA_CL_VPORT_ZOFFSET
+    0x00000000, // PA_CL_VPORT_XSCALE_1
+    0x00000000, // PA_CL_VPORT_XOFFSET_1
+    0x00000000, // PA_CL_VPORT_YSCALE_1
+    0x00000000, // PA_CL_VPORT_YOFFSET_1
+    0x00000000, // PA_CL_VPORT_ZSCALE_1
+    0x00000000, // PA_CL_VPORT_ZOFFSET_1
+    0x00000000, // PA_CL_VPORT_XSCALE_2
+    0x00000000, // PA_CL_VPORT_XOFFSET_2
+    0x00000000, // PA_CL_VPORT_YSCALE_2
+    0x00000000, // PA_CL_VPORT_YOFFSET_2
+    0x00000000, // PA_CL_VPORT_ZSCALE_2
+    0x00000000, // PA_CL_VPORT_ZOFFSET_2
+    0x00000000, // PA_CL_VPORT_XSCALE_3
+    0x00000000, // PA_CL_VPORT_XOFFSET_3
+    0x00000000, // PA_CL_VPORT_YSCALE_3
+    0x00000000, // PA_CL_VPORT_YOFFSET_3
+    0x00000000, // PA_CL_VPORT_ZSCALE_3
+    0x00000000, // PA_CL_VPORT_ZOFFSET_3
+    0x00000000, // PA_CL_VPORT_XSCALE_4
+    0x00000000, // PA_CL_VPORT_XOFFSET_4
+    0x00000000, // PA_CL_VPORT_YSCALE_4
+    0x00000000, // PA_CL_VPORT_YOFFSET_4
+    0x00000000, // PA_CL_VPORT_ZSCALE_4
+    0x00000000, // PA_CL_VPORT_ZOFFSET_4
+    0x00000000, // PA_CL_VPORT_XSCALE_5
+    0x00000000, // PA_CL_VPORT_XOFFSET_5
+    0x00000000, // PA_CL_VPORT_YSCALE_5
+    0x00000000, // PA_CL_VPORT_YOFFSET_5
+    0x00000000, // PA_CL_VPORT_ZSCALE_5
+    0x00000000, // PA_CL_VPORT_ZOFFSET_5
+    0x00000000, // PA_CL_VPORT_XSCALE_6
+    0x00000000, // PA_CL_VPORT_XOFFSET_6
+    0x00000000, // PA_CL_VPORT_YSCALE_6
+    0x00000000, // PA_CL_VPORT_YOFFSET_6
+    0x00000000, // PA_CL_VPORT_ZSCALE_6
+    0x00000000, // PA_CL_VPORT_ZOFFSET_6
+    0x00000000, // PA_CL_VPORT_XSCALE_7
+    0x00000000, // PA_CL_VPORT_XOFFSET_7
+    0x00000000, // PA_CL_VPORT_YSCALE_7
+    0x00000000, // PA_CL_VPORT_YOFFSET_7
+    0x00000000, // PA_CL_VPORT_ZSCALE_7
+    0x00000000, // PA_CL_VPORT_ZOFFSET_7
+    0x00000000, // PA_CL_VPORT_XSCALE_8
+    0x00000000, // PA_CL_VPORT_XOFFSET_8
+    0x00000000, // PA_CL_VPORT_YSCALE_8
+    0x00000000, // PA_CL_VPORT_YOFFSET_8
+    0x00000000, // PA_CL_VPORT_ZSCALE_8
+    0x00000000, // PA_CL_VPORT_ZOFFSET_8
+    0x00000000, // PA_CL_VPORT_XSCALE_9
+    0x00000000, // PA_CL_VPORT_XOFFSET_9
+    0x00000000, // PA_CL_VPORT_YSCALE_9
+    0x00000000, // PA_CL_VPORT_YOFFSET_9
+    0x00000000, // PA_CL_VPORT_ZSCALE_9
+    0x00000000, // PA_CL_VPORT_ZOFFSET_9
+    0x00000000, // PA_CL_VPORT_XSCALE_10
+    0x00000000, // PA_CL_VPORT_XOFFSET_10
+    0x00000000, // PA_CL_VPORT_YSCALE_10
+    0x00000000, // PA_CL_VPORT_YOFFSET_10
+    0x00000000, // PA_CL_VPORT_ZSCALE_10
+    0x00000000, // PA_CL_VPORT_ZOFFSET_10
+    0x00000000, // PA_CL_VPORT_XSCALE_11
+    0x00000000, // PA_CL_VPORT_XOFFSET_11
+    0x00000000, // PA_CL_VPORT_YSCALE_11
+    0x00000000, // PA_CL_VPORT_YOFFSET_11
+    0x00000000, // PA_CL_VPORT_ZSCALE_11
+    0x00000000, // PA_CL_VPORT_ZOFFSET_11
+    0x00000000, // PA_CL_VPORT_XSCALE_12
+    0x00000000, // PA_CL_VPORT_XOFFSET_12
+    0x00000000, // PA_CL_VPORT_YSCALE_12
+    0x00000000, // PA_CL_VPORT_YOFFSET_12
+    0x00000000, // PA_CL_VPORT_ZSCALE_12
+    0x00000000, // PA_CL_VPORT_ZOFFSET_12
+    0x00000000, // PA_CL_VPORT_XSCALE_13
+    0x00000000, // PA_CL_VPORT_XOFFSET_13
+    0x00000000, // PA_CL_VPORT_YSCALE_13
+    0x00000000, // PA_CL_VPORT_YOFFSET_13
+    0x00000000, // PA_CL_VPORT_ZSCALE_13
+    0x00000000, // PA_CL_VPORT_ZOFFSET_13
+    0x00000000, // PA_CL_VPORT_XSCALE_14
+    0x00000000, // PA_CL_VPORT_XOFFSET_14
+    0x00000000, // PA_CL_VPORT_YSCALE_14
+    0x00000000, // PA_CL_VPORT_YOFFSET_14
+    0x00000000, // PA_CL_VPORT_ZSCALE_14
+    0x00000000, // PA_CL_VPORT_ZOFFSET_14
+    0x00000000, // PA_CL_VPORT_XSCALE_15
+    0x00000000, // PA_CL_VPORT_XOFFSET_15
+    0x00000000, // PA_CL_VPORT_YSCALE_15
+    0x00000000, // PA_CL_VPORT_YOFFSET_15
+    0x00000000, // PA_CL_VPORT_ZSCALE_15
+    0x00000000, // PA_CL_VPORT_ZOFFSET_15
+    0x00000000, // PA_CL_UCP_0_X
+    0x00000000, // PA_CL_UCP_0_Y
+    0x00000000, // PA_CL_UCP_0_Z
+    0x00000000, // PA_CL_UCP_0_W
+    0x00000000, // PA_CL_UCP_1_X
+    0x00000000, // PA_CL_UCP_1_Y
+    0x00000000, // PA_CL_UCP_1_Z
+    0x00000000, // PA_CL_UCP_1_W
+    0x00000000, // PA_CL_UCP_2_X
+    0x00000000, // PA_CL_UCP_2_Y
+    0x00000000, // PA_CL_UCP_2_Z
+    0x00000000, // PA_CL_UCP_2_W
+    0x00000000, // PA_CL_UCP_3_X
+    0x00000000, // PA_CL_UCP_3_Y
+    0x00000000, // PA_CL_UCP_3_Z
+    0x00000000, // PA_CL_UCP_3_W
+    0x00000000, // PA_CL_UCP_4_X
+    0x00000000, // PA_CL_UCP_4_Y
+    0x00000000, // PA_CL_UCP_4_Z
+    0x00000000, // PA_CL_UCP_4_W
+    0x00000000, // PA_CL_UCP_5_X
+    0x00000000, // PA_CL_UCP_5_Y
+    0x00000000, // PA_CL_UCP_5_Z
+    0x00000000, // PA_CL_UCP_5_W
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_CNTL_0
+    0x00000000, // SPI_PS_INPUT_CNTL_1
+    0x00000000, // SPI_PS_INPUT_CNTL_2
+    0x00000000, // SPI_PS_INPUT_CNTL_3
+    0x00000000, // SPI_PS_INPUT_CNTL_4
+    0x00000000, // SPI_PS_INPUT_CNTL_5
+    0x00000000, // SPI_PS_INPUT_CNTL_6
+    0x00000000, // SPI_PS_INPUT_CNTL_7
+    0x00000000, // SPI_PS_INPUT_CNTL_8
+    0x00000000, // SPI_PS_INPUT_CNTL_9
+    0x00000000, // SPI_PS_INPUT_CNTL_10
+    0x00000000, // SPI_PS_INPUT_CNTL_11
+    0x00000000, // SPI_PS_INPUT_CNTL_12
+    0x00000000, // SPI_PS_INPUT_CNTL_13
+    0x00000000, // SPI_PS_INPUT_CNTL_14
+    0x00000000, // SPI_PS_INPUT_CNTL_15
+    0x00000000, // SPI_PS_INPUT_CNTL_16
+    0x00000000, // SPI_PS_INPUT_CNTL_17
+    0x00000000, // SPI_PS_INPUT_CNTL_18
+    0x00000000, // SPI_PS_INPUT_CNTL_19
+    0x00000000, // SPI_PS_INPUT_CNTL_20
+    0x00000000, // SPI_PS_INPUT_CNTL_21
+    0x00000000, // SPI_PS_INPUT_CNTL_22
+    0x00000000, // SPI_PS_INPUT_CNTL_23
+    0x00000000, // SPI_PS_INPUT_CNTL_24
+    0x00000000, // SPI_PS_INPUT_CNTL_25
+    0x00000000, // SPI_PS_INPUT_CNTL_26
+    0x00000000, // SPI_PS_INPUT_CNTL_27
+    0x00000000, // SPI_PS_INPUT_CNTL_28
+    0x00000000, // SPI_PS_INPUT_CNTL_29
+    0x00000000, // SPI_PS_INPUT_CNTL_30
+    0x00000000, // SPI_PS_INPUT_CNTL_31
+    0x00000000, // SPI_VS_OUT_CONFIG
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_ENA
+    0x00000000, // SPI_PS_INPUT_ADDR
+    0x00000000, // SPI_INTERP_CONTROL_0
+    0x00000002, // SPI_PS_IN_CONTROL
+    0, // HOLE
+    0x00000000, // SPI_BARYC_CNTL
+    0, // HOLE
+    0x00000000, // SPI_TMPRING_SIZE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_WAVE_MGMT_1
+    0x00000000, // SPI_WAVE_MGMT_2
+    0x00000000, // SPI_SHADER_POS_FORMAT
+    0x00000000, // SPI_SHADER_Z_FORMAT
+    0x00000000, // SPI_SHADER_COL_FORMAT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_BLEND0_CONTROL
+    0x00000000, // CB_BLEND1_CONTROL
+    0x00000000, // CB_BLEND2_CONTROL
+    0x00000000, // CB_BLEND3_CONTROL
+    0x00000000, // CB_BLEND4_CONTROL
+    0x00000000, // CB_BLEND5_CONTROL
+    0x00000000, // CB_BLEND6_CONTROL
+    0x00000000, // CB_BLEND7_CONTROL
+};
+static const u32 si_SECT_CONTEXT_def_3[] =
+{
+    0x00000000, // PA_CL_POINT_X_RAD
+    0x00000000, // PA_CL_POINT_Y_RAD
+    0x00000000, // PA_CL_POINT_SIZE
+    0x00000000, // PA_CL_POINT_CULL_RAD
+    0x00000000, // VGT_DMA_BASE_HI
+    0x00000000, // VGT_DMA_BASE
+};
+static const u32 si_SECT_CONTEXT_def_4[] =
+{
+    0x00000000, // DB_DEPTH_CONTROL
+    0x00000000, // DB_EQAA
+    0x00000000, // CB_COLOR_CONTROL
+    0x00000000, // DB_SHADER_CONTROL
+    0x00090000, // PA_CL_CLIP_CNTL
+    0x00000004, // PA_SU_SC_MODE_CNTL
+    0x00000000, // PA_CL_VTE_CNTL
+    0x00000000, // PA_CL_VS_OUT_CNTL
+    0x00000000, // PA_CL_NANINF_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+    0x00000000, // PA_SU_PRIM_FILTER_CNTL
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SU_POINT_SIZE
+    0x00000000, // PA_SU_POINT_MINMAX
+    0x00000000, // PA_SU_LINE_CNTL
+    0x00000000, // PA_SC_LINE_STIPPLE
+    0x00000000, // VGT_OUTPUT_PATH_CNTL
+    0x00000000, // VGT_HOS_CNTL
+    0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+    0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+    0x00000000, // VGT_HOS_REUSE_DEPTH
+    0x00000000, // VGT_GROUP_PRIM_TYPE
+    0x00000000, // VGT_GROUP_FIRST_DECR
+    0x00000000, // VGT_GROUP_DECR
+    0x00000000, // VGT_GROUP_VECT_0_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_CNTL
+    0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+    0x00000000, // VGT_GS_MODE
+    0, // HOLE
+    0x00000000, // PA_SC_MODE_CNTL_0
+    0x00000000, // PA_SC_MODE_CNTL_1
+    0x00000000, // VGT_ENHANCE
+    0x00000100, // VGT_GS_PER_ES
+    0x00000080, // VGT_ES_PER_GS
+    0x00000002, // VGT_GS_PER_VS
+    0x00000000, // VGT_GSVS_RING_OFFSET_1
+    0x00000000, // VGT_GSVS_RING_OFFSET_2
+    0x00000000, // VGT_GSVS_RING_OFFSET_3
+    0x00000000, // VGT_GS_OUT_PRIM_TYPE
+    0x00000000, // IA_ENHANCE
+};
+static const u32 si_SECT_CONTEXT_def_5[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const u32 si_SECT_CONTEXT_def_6[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const u32 si_SECT_CONTEXT_def_7[] =
+{
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_INSTANCE_STEP_RATE_0
+    0x00000000, // VGT_INSTANCE_STEP_RATE_1
+    0x000000ff, // IA_MULTI_VGT_PARAM
+    0x00000000, // VGT_ESGS_RING_ITEMSIZE
+    0x00000000, // VGT_GSVS_RING_ITEMSIZE
+    0x00000000, // VGT_REUSE_OFF
+    0x00000000, // VGT_VTX_CNT_EN
+    0x00000000, // DB_HTILE_SURFACE
+    0x00000000, // DB_SRESULTS_COMPARE_STATE0
+    0x00000000, // DB_SRESULTS_COMPARE_STATE1
+    0x00000000, // DB_PRELOAD_CONTROL
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+    0, // HOLE
+    0x00000000, // VGT_GS_MAX_VERT_OUT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_SHADER_STAGES_EN
+    0x00000000, // VGT_LS_HS_CONFIG
+    0x00000000, // VGT_GS_VERT_ITEMSIZE
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+    0x00000000, // VGT_TF_PARAM
+    0x00000000, // DB_ALPHA_TO_MASK
+    0, // HOLE
+    0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+    0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+    0x00000000, // VGT_GS_INSTANCE_CNT
+    0x00000000, // VGT_STRMOUT_CONFIG
+    0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SC_CENTROID_PRIORITY_0
+    0x00000000, // PA_SC_CENTROID_PRIORITY_1
+    0x00001000, // PA_SC_LINE_CNTL
+    0x00000000, // PA_SC_AA_CONFIG
+    0x00000005, // PA_SU_VTX_CNTL
+    0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+    0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+    0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+    0x00000010, // VGT_OUT_DEALLOC_CNTL
+    0x00000000, // CB_COLOR0_BASE
+    0x00000000, // CB_COLOR0_PITCH
+    0x00000000, // CB_COLOR0_SLICE
+    0x00000000, // CB_COLOR0_VIEW
+    0x00000000, // CB_COLOR0_INFO
+    0x00000000, // CB_COLOR0_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR0_CMASK
+    0x00000000, // CB_COLOR0_CMASK_SLICE
+    0x00000000, // CB_COLOR0_FMASK
+    0x00000000, // CB_COLOR0_FMASK_SLICE
+    0x00000000, // CB_COLOR0_CLEAR_WORD0
+    0x00000000, // CB_COLOR0_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR1_BASE
+    0x00000000, // CB_COLOR1_PITCH
+    0x00000000, // CB_COLOR1_SLICE
+    0x00000000, // CB_COLOR1_VIEW
+    0x00000000, // CB_COLOR1_INFO
+    0x00000000, // CB_COLOR1_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR1_CMASK
+    0x00000000, // CB_COLOR1_CMASK_SLICE
+    0x00000000, // CB_COLOR1_FMASK
+    0x00000000, // CB_COLOR1_FMASK_SLICE
+    0x00000000, // CB_COLOR1_CLEAR_WORD0
+    0x00000000, // CB_COLOR1_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR2_BASE
+    0x00000000, // CB_COLOR2_PITCH
+    0x00000000, // CB_COLOR2_SLICE
+    0x00000000, // CB_COLOR2_VIEW
+    0x00000000, // CB_COLOR2_INFO
+    0x00000000, // CB_COLOR2_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR2_CMASK
+    0x00000000, // CB_COLOR2_CMASK_SLICE
+    0x00000000, // CB_COLOR2_FMASK
+    0x00000000, // CB_COLOR2_FMASK_SLICE
+    0x00000000, // CB_COLOR2_CLEAR_WORD0
+    0x00000000, // CB_COLOR2_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR3_BASE
+    0x00000000, // CB_COLOR3_PITCH
+    0x00000000, // CB_COLOR3_SLICE
+    0x00000000, // CB_COLOR3_VIEW
+    0x00000000, // CB_COLOR3_INFO
+    0x00000000, // CB_COLOR3_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR3_CMASK
+    0x00000000, // CB_COLOR3_CMASK_SLICE
+    0x00000000, // CB_COLOR3_FMASK
+    0x00000000, // CB_COLOR3_FMASK_SLICE
+    0x00000000, // CB_COLOR3_CLEAR_WORD0
+    0x00000000, // CB_COLOR3_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR4_BASE
+    0x00000000, // CB_COLOR4_PITCH
+    0x00000000, // CB_COLOR4_SLICE
+    0x00000000, // CB_COLOR4_VIEW
+    0x00000000, // CB_COLOR4_INFO
+    0x00000000, // CB_COLOR4_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR4_CMASK
+    0x00000000, // CB_COLOR4_CMASK_SLICE
+    0x00000000, // CB_COLOR4_FMASK
+    0x00000000, // CB_COLOR4_FMASK_SLICE
+    0x00000000, // CB_COLOR4_CLEAR_WORD0
+    0x00000000, // CB_COLOR4_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR5_BASE
+    0x00000000, // CB_COLOR5_PITCH
+    0x00000000, // CB_COLOR5_SLICE
+    0x00000000, // CB_COLOR5_VIEW
+    0x00000000, // CB_COLOR5_INFO
+    0x00000000, // CB_COLOR5_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR5_CMASK
+    0x00000000, // CB_COLOR5_CMASK_SLICE
+    0x00000000, // CB_COLOR5_FMASK
+    0x00000000, // CB_COLOR5_FMASK_SLICE
+    0x00000000, // CB_COLOR5_CLEAR_WORD0
+    0x00000000, // CB_COLOR5_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR6_BASE
+    0x00000000, // CB_COLOR6_PITCH
+    0x00000000, // CB_COLOR6_SLICE
+    0x00000000, // CB_COLOR6_VIEW
+    0x00000000, // CB_COLOR6_INFO
+    0x00000000, // CB_COLOR6_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR6_CMASK
+    0x00000000, // CB_COLOR6_CMASK_SLICE
+    0x00000000, // CB_COLOR6_FMASK
+    0x00000000, // CB_COLOR6_FMASK_SLICE
+    0x00000000, // CB_COLOR6_CLEAR_WORD0
+    0x00000000, // CB_COLOR6_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR7_BASE
+    0x00000000, // CB_COLOR7_PITCH
+    0x00000000, // CB_COLOR7_SLICE
+    0x00000000, // CB_COLOR7_VIEW
+    0x00000000, // CB_COLOR7_INFO
+    0x00000000, // CB_COLOR7_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR7_CMASK
+    0x00000000, // CB_COLOR7_CMASK_SLICE
+    0x00000000, // CB_COLOR7_FMASK
+    0x00000000, // CB_COLOR7_FMASK_SLICE
+    0x00000000, // CB_COLOR7_CLEAR_WORD0
+    0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
+{
+    {si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+    {si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
+    {si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+    {si_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+    {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
+    {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+    {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+    { NULL, 0, 0 }
+};
+static const struct cs_section_def si_cs_data[] = {
+    { si_SECT_CONTEXT_defs, SECT_CONTEXT },
+    { NULL, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h b/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
new file mode 100644 (file)
index 0000000..895c8e2
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __SI_REG_H__
+#define __SI_REG_H__
+
+/* SI */
+#define SI_DC_GPIO_HPD_MASK                      0x196c
+#define SI_DC_GPIO_HPD_A                         0x196d
+#define SI_DC_GPIO_HPD_EN                        0x196e
+#define SI_DC_GPIO_HPD_Y                         0x196f
+
+#define SI_GRPH_CONTROL                          0x1a01
+#       define SI_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define SI_GRPH_DEPTH_8BPP                0
+#       define SI_GRPH_DEPTH_16BPP               1
+#       define SI_GRPH_DEPTH_32BPP               2
+#       define SI_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define SI_ADDR_SURF_2_BANK               0
+#       define SI_ADDR_SURF_4_BANK               1
+#       define SI_ADDR_SURF_8_BANK               2
+#       define SI_ADDR_SURF_16_BANK              3
+#       define SI_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define SI_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define SI_ADDR_SURF_BANK_WIDTH_1         0
+#       define SI_ADDR_SURF_BANK_WIDTH_2         1
+#       define SI_ADDR_SURF_BANK_WIDTH_4         2
+#       define SI_ADDR_SURF_BANK_WIDTH_8         3
+#       define SI_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define SI_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define SI_GRPH_FORMAT_ARGB1555           0
+#       define SI_GRPH_FORMAT_ARGB565            1
+#       define SI_GRPH_FORMAT_ARGB4444           2
+#       define SI_GRPH_FORMAT_AI88               3
+#       define SI_GRPH_FORMAT_MONO16             4
+#       define SI_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define SI_GRPH_FORMAT_ARGB8888           0
+#       define SI_GRPH_FORMAT_ARGB2101010        1
+#       define SI_GRPH_FORMAT_32BPP_DIG          2
+#       define SI_GRPH_FORMAT_8B_ARGB2101010     3
+#       define SI_GRPH_FORMAT_BGRA1010102        4
+#       define SI_GRPH_FORMAT_8B_BGRA1010102     5
+#       define SI_GRPH_FORMAT_RGB111110          6
+#       define SI_GRPH_FORMAT_BGR101111          7
+#       define SI_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define SI_ADDR_SURF_BANK_HEIGHT_1        0
+#       define SI_ADDR_SURF_BANK_HEIGHT_2        1
+#       define SI_ADDR_SURF_BANK_HEIGHT_4        2
+#       define SI_ADDR_SURF_BANK_HEIGHT_8        3
+#       define SI_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define SI_ADDR_SURF_TILE_SPLIT_64B       0
+#       define SI_ADDR_SURF_TILE_SPLIT_128B      1
+#       define SI_ADDR_SURF_TILE_SPLIT_256B      2
+#       define SI_ADDR_SURF_TILE_SPLIT_512B      3
+#       define SI_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define SI_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define SI_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define SI_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define SI_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define SI_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define SI_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define SI_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define SI_GRPH_ARRAY_2D_TILED_THIN1      4
+#       define SI_GRPH_PIPE_CONFIG(x)           (((x) & 0x1f) << 24)
+#       define SI_ADDR_SURF_P2                  0
+#       define SI_ADDR_SURF_P4_8x16             4
+#       define SI_ADDR_SURF_P4_16x16            5
+#       define SI_ADDR_SURF_P4_16x32            6
+#       define SI_ADDR_SURF_P4_32x32            7
+#       define SI_ADDR_SURF_P8_16x16_8x16       8
+#       define SI_ADDR_SURF_P8_16x32_8x16       9
+#       define SI_ADDR_SURF_P8_32x32_8x16       10
+#       define SI_ADDR_SURF_P8_16x32_16x16      11
+#       define SI_ADDR_SURF_P8_32x32_16x16      12
+#       define SI_ADDR_SURF_P8_32x32_16x32      13
+#       define SI_ADDR_SURF_P8_32x64_32x32      14
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
new file mode 100644 (file)
index 0000000..8c5608a
--- /dev/null
@@ -0,0 +1,2426 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef SI_H
+#define SI_H
+
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH  2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN        0x02010001
+
+#define SI_MAX_SH_GPRS                 256
+#define SI_MAX_TEMP_GPRS               16
+#define SI_MAX_SH_THREADS              256
+#define SI_MAX_SH_STACK_ENTRIES        4096
+#define SI_MAX_FRC_EOV_CNT             16384
+#define SI_MAX_BACKENDS                8
+#define SI_MAX_BACKENDS_MASK           0xFF
+#define SI_MAX_BACKENDS_PER_SE_MASK     0x0F
+#define SI_MAX_SIMDS                   12
+#define SI_MAX_SIMDS_MASK              0x0FFF
+#define SI_MAX_SIMDS_PER_SE_MASK        0x00FF
+#define SI_MAX_PIPES                   8
+#define SI_MAX_PIPES_MASK              0xFF
+#define SI_MAX_PIPES_PER_SIMD_MASK      0x3F
+#define SI_MAX_LDS_NUM                 0xFFFF
+#define SI_MAX_TCC                     16
+#define SI_MAX_TCC_MASK                0xFFFF
+
+#define AMDGPU_NUM_OF_VMIDS            8
+
+/* SMC IND accessor regs */
+#define SMC_IND_INDEX_0                              0x80
+#define SMC_IND_DATA_0                               0x81
+
+#define SMC_IND_ACCESS_CNTL                          0x8A
+#       define AUTO_INCREMENT_IND_0                  (1 << 0)
+#define SMC_MESSAGE_0                                0x8B
+#define SMC_RESP_0                                   0x8C
+
+/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
+#define SMC_CG_IND_START                    0xc0030000
+#define SMC_CG_IND_END                      0xc0040000
+
+#define        CG_CGTT_LOCAL_0                         0x400
+#define        CG_CGTT_LOCAL_1                         0x401
+
+/* SMC IND registers */
+#define        SMC_SYSCON_RESET_CNTL                           0x80000000
+#       define RST_REG                                  (1 << 0)
+#define        SMC_SYSCON_CLOCK_CNTL_0                         0x80000004
+#       define CK_DISABLE                               (1 << 0)
+#       define CKEN                                     (1 << 24)
+
+#define VGA_HDP_CONTROL                                0xCA
+#define                VGA_MEMORY_DISABLE                              (1 << 4)
+
+#define DCCG_DISP_SLOW_SELECT_REG                       0x13F
+#define                DCCG_DISP1_SLOW_SELECT(x)               ((x) << 0)
+#define                DCCG_DISP1_SLOW_SELECT_MASK             (7 << 0)
+#define                DCCG_DISP1_SLOW_SELECT_SHIFT            0
+#define                DCCG_DISP2_SLOW_SELECT(x)               ((x) << 4)
+#define                DCCG_DISP2_SLOW_SELECT_MASK             (7 << 4)
+#define                DCCG_DISP2_SLOW_SELECT_SHIFT            4
+
+#define        CG_SPLL_FUNC_CNTL                               0x180
+#define                SPLL_RESET                              (1 << 0)
+#define                SPLL_SLEEP                              (1 << 1)
+#define                SPLL_BYPASS_EN                          (1 << 3)
+#define                SPLL_REF_DIV(x)                         ((x) << 4)
+#define                SPLL_REF_DIV_MASK                       (0x3f << 4)
+#define                SPLL_PDIV_A(x)                          ((x) << 20)
+#define                SPLL_PDIV_A_MASK                        (0x7f << 20)
+#define                SPLL_PDIV_A_SHIFT                       20
+#define        CG_SPLL_FUNC_CNTL_2                             0x181
+#define                SCLK_MUX_SEL(x)                         ((x) << 0)
+#define                SCLK_MUX_SEL_MASK                       (0x1ff << 0)
+#define                SPLL_CTLREQ_CHG                         (1 << 23)
+#define                SCLK_MUX_UPDATE                         (1 << 26)
+#define        CG_SPLL_FUNC_CNTL_3                             0x182
+#define                SPLL_FB_DIV(x)                          ((x) << 0)
+#define                SPLL_FB_DIV_MASK                        (0x3ffffff << 0)
+#define                SPLL_FB_DIV_SHIFT                       0
+#define                SPLL_DITHEN                             (1 << 28)
+#define        CG_SPLL_FUNC_CNTL_4                             0x183
+
+#define        SPLL_STATUS                                     0x185
+#define                SPLL_CHG_STATUS                         (1 << 1)
+#define        SPLL_CNTL_MODE                                  0x186
+#define                SPLL_SW_DIR_CONTROL                     (1 << 0)
+#      define SPLL_REFCLK_SEL(x)                       ((x) << 26)
+#      define SPLL_REFCLK_SEL_MASK                     (3 << 26)
+
+#define        CG_SPLL_SPREAD_SPECTRUM                         0x188
+#define                SSEN                                    (1 << 0)
+#define                CLK_S(x)                                ((x) << 4)
+#define                CLK_S_MASK                              (0xfff << 4)
+#define                CLK_S_SHIFT                             4
+#define        CG_SPLL_SPREAD_SPECTRUM_2                       0x189
+#define                CLK_V(x)                                ((x) << 0)
+#define                CLK_V_MASK                              (0x3ffffff << 0)
+#define                CLK_V_SHIFT                             0
+
+#define        CG_SPLL_AUTOSCALE_CNTL                          0x18b
+#       define AUTOSCALE_ON_SS_CLEAR                    (1 << 9)
+
+/* discrete uvd clocks */
+#define        CG_UPLL_FUNC_CNTL                               0x18d
+#      define UPLL_RESET_MASK                          0x00000001
+#      define UPLL_SLEEP_MASK                          0x00000002
+#      define UPLL_BYPASS_EN_MASK                      0x00000004
+#      define UPLL_CTLREQ_MASK                         0x00000008
+#      define UPLL_VCO_MODE_MASK                       0x00000600
+#      define UPLL_REF_DIV_MASK                        0x003F0000
+#      define UPLL_CTLACK_MASK                         0x40000000
+#      define UPLL_CTLACK2_MASK                        0x80000000
+#define        CG_UPLL_FUNC_CNTL_2                             0x18e
+#      define UPLL_PDIV_A(x)                           ((x) << 0)
+#      define UPLL_PDIV_A_MASK                         0x0000007F
+#      define UPLL_PDIV_B(x)                           ((x) << 8)
+#      define UPLL_PDIV_B_MASK                         0x00007F00
+#      define VCLK_SRC_SEL(x)                          ((x) << 20)
+#      define VCLK_SRC_SEL_MASK                        0x01F00000
+#      define DCLK_SRC_SEL(x)                          ((x) << 25)
+#      define DCLK_SRC_SEL_MASK                        0x3E000000
+#define        CG_UPLL_FUNC_CNTL_3                             0x18f
+#      define UPLL_FB_DIV(x)                           ((x) << 0)
+#      define UPLL_FB_DIV_MASK                         0x01FFFFFF
+#define        CG_UPLL_FUNC_CNTL_4                             0x191
+#      define UPLL_SPARE_ISPARE9                       0x00020000
+#define        CG_UPLL_FUNC_CNTL_5                             0x192
+#      define RESET_ANTI_MUX_MASK                      0x00000200
+#define        CG_UPLL_SPREAD_SPECTRUM                         0x194
+#      define SSEN_MASK                                0x00000001
+
+#define        MPLL_BYPASSCLK_SEL                              0x197
+#      define MPLL_CLKOUT_SEL(x)                       ((x) << 8)
+#      define MPLL_CLKOUT_SEL_MASK                     0xFF00
+
+#define CG_CLKPIN_CNTL                                    0x198
+#       define XTALIN_DIVIDE                              (1 << 1)
+#       define BCLK_AS_XCLK                               (1 << 2)
+#define CG_CLKPIN_CNTL_2                                  0x199
+#       define FORCE_BIF_REFCLK_EN                        (1 << 3)
+#       define MUX_TCLK_TO_XCLK                           (1 << 8)
+
+#define        THM_CLK_CNTL                                    0x19b
+#      define CMON_CLK_SEL(x)                          ((x) << 0)
+#      define CMON_CLK_SEL_MASK                        0xFF
+#      define TMON_CLK_SEL(x)                          ((x) << 8)
+#      define TMON_CLK_SEL_MASK                        0xFF00
+#define        MISC_CLK_CNTL                                   0x19c
+#      define DEEP_SLEEP_CLK_SEL(x)                    ((x) << 0)
+#      define DEEP_SLEEP_CLK_SEL_MASK                  0xFF
+#      define ZCLK_SEL(x)                              ((x) << 8)
+#      define ZCLK_SEL_MASK                            0xFF00
+
+#define        CG_THERMAL_CTRL                                 0x1c0
+#define        DPM_EVENT_SRC(x)                        ((x) << 0)
+#define        DPM_EVENT_SRC_MASK                      (7 << 0)
+#define                DIG_THERM_DPM(x)                        ((x) << 14)
+#define                DIG_THERM_DPM_MASK                      0x003FC000
+#define                DIG_THERM_DPM_SHIFT                     14
+#define        CG_THERMAL_STATUS                               0x1c1
+#define                FDO_PWM_DUTY(x)                         ((x) << 9)
+#define                FDO_PWM_DUTY_MASK                       (0xff << 9)
+#define                FDO_PWM_DUTY_SHIFT                      9
+#define        CG_THERMAL_INT                                  0x1c2
+#define                DIG_THERM_INTH(x)                       ((x) << 8)
+#define                DIG_THERM_INTH_MASK                     0x0000FF00
+#define                DIG_THERM_INTH_SHIFT                    8
+#define                DIG_THERM_INTL(x)                       ((x) << 16)
+#define                DIG_THERM_INTL_MASK                     0x00FF0000
+#define                DIG_THERM_INTL_SHIFT                    16
+#define        THERM_INT_MASK_HIGH                     (1 << 24)
+#define        THERM_INT_MASK_LOW                      (1 << 25)
+
+#define        CG_MULT_THERMAL_CTRL                                    0x1c4
+#define                TEMP_SEL(x)                                     ((x) << 20)
+#define                TEMP_SEL_MASK                                   (0xff << 20)
+#define                TEMP_SEL_SHIFT                                  20
+#define        CG_MULT_THERMAL_STATUS                                  0x1c5
+#define                ASIC_MAX_TEMP(x)                                ((x) << 0)
+#define                ASIC_MAX_TEMP_MASK                              0x000001ff
+#define                ASIC_MAX_TEMP_SHIFT                             0
+#define                CTF_TEMP(x)                                     ((x) << 9)
+#define                CTF_TEMP_MASK                                   0x0003fe00
+#define                CTF_TEMP_SHIFT                                  9
+
+#define        CG_FDO_CTRL0                                    0x1d5
+#define                FDO_STATIC_DUTY(x)                      ((x) << 0)
+#define                FDO_STATIC_DUTY_MASK                    0x000000FF
+#define                FDO_STATIC_DUTY_SHIFT                   0
+#define        CG_FDO_CTRL1                                    0x1d6
+#define                FMAX_DUTY100(x)                         ((x) << 0)
+#define                FMAX_DUTY100_MASK                       0x000000FF
+#define                FMAX_DUTY100_SHIFT                      0
+#define        CG_FDO_CTRL2                                    0x1d7
+#define                TMIN(x)                                 ((x) << 0)
+#define                TMIN_MASK                               0x000000FF
+#define                TMIN_SHIFT                              0
+#define                FDO_PWM_MODE(x)                         ((x) << 11)
+#define                FDO_PWM_MODE_MASK                       (7 << 11)
+#define                FDO_PWM_MODE_SHIFT                      11
+#define                TACH_PWM_RESP_RATE(x)                   ((x) << 25)
+#define                TACH_PWM_RESP_RATE_MASK                 (0x7f << 25)
+#define                TACH_PWM_RESP_RATE_SHIFT                25
+
+#define CG_TACH_CTRL                                    0x1dc
+#       define EDGE_PER_REV(x)                          ((x) << 0)
+#       define EDGE_PER_REV_MASK                        (0x7 << 0)
+#       define EDGE_PER_REV_SHIFT                       0
+#       define TARGET_PERIOD(x)                         ((x) << 3)
+#       define TARGET_PERIOD_MASK                       0xfffffff8
+#       define TARGET_PERIOD_SHIFT                      3
+#define CG_TACH_STATUS                                  0x1dd
+#       define TACH_PERIOD(x)                           ((x) << 0)
+#       define TACH_PERIOD_MASK                         0xffffffff
+#       define TACH_PERIOD_SHIFT                        0
+
+#define GENERAL_PWRMGT                                  0x1e0
+#       define GLOBAL_PWRMGT_EN                         (1 << 0)
+#       define STATIC_PM_EN                             (1 << 1)
+#       define THERMAL_PROTECTION_DIS                   (1 << 2)
+#       define THERMAL_PROTECTION_TYPE                  (1 << 3)
+#       define SW_SMIO_INDEX(x)                         ((x) << 6)
+#       define SW_SMIO_INDEX_MASK                       (1 << 6)
+#       define SW_SMIO_INDEX_SHIFT                      6
+#       define VOLT_PWRMGT_EN                           (1 << 10)
+#       define DYN_SPREAD_SPECTRUM_EN                   (1 << 23)
+#define CG_TPC                                            0x1e1
+#define SCLK_PWRMGT_CNTL                                  0x1e2
+#       define SCLK_PWRMGT_OFF                            (1 << 0)
+#       define SCLK_LOW_D1                                (1 << 1)
+#       define FIR_RESET                                  (1 << 4)
+#       define FIR_FORCE_TREND_SEL                        (1 << 5)
+#       define FIR_TREND_MODE                             (1 << 6)
+#       define DYN_GFX_CLK_OFF_EN                         (1 << 7)
+#       define GFX_CLK_FORCE_ON                           (1 << 8)
+#       define GFX_CLK_REQUEST_OFF                        (1 << 9)
+#       define GFX_CLK_FORCE_OFF                          (1 << 10)
+#       define GFX_CLK_OFF_ACPI_D1                        (1 << 11)
+#       define GFX_CLK_OFF_ACPI_D2                        (1 << 12)
+#       define GFX_CLK_OFF_ACPI_D3                        (1 << 13)
+#       define DYN_LIGHT_SLEEP_EN                         (1 << 14)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX                  0x1e6
+#       define CURRENT_STATE_INDEX_MASK                   (0xf << 4)
+#       define CURRENT_STATE_INDEX_SHIFT                  4
+
+#define CG_FTV                                            0x1ef
+
+#define CG_FFCT_0                                         0x1f0
+#       define UTC_0(x)                                   ((x) << 0)
+#       define UTC_0_MASK                                 (0x3ff << 0)
+#       define DTC_0(x)                                   ((x) << 10)
+#       define DTC_0_MASK                                 (0x3ff << 10)
+
+#define CG_BSP                                          0x1ff
+#       define BSP(x)                                  ((x) << 0)
+#       define BSP_MASK                                        (0xffff << 0)
+#       define BSU(x)                                  ((x) << 16)
+#       define BSU_MASK                                        (0xf << 16)
+#define CG_AT                                           0x200
+#       define CG_R(x)                                 ((x) << 0)
+#       define CG_R_MASK                               (0xffff << 0)
+#       define CG_L(x)                                 ((x) << 16)
+#       define CG_L_MASK                               (0xffff << 16)
+
+#define CG_GIT                                          0x201
+#       define CG_GICST(x)                              ((x) << 0)
+#       define CG_GICST_MASK                            (0xffff << 0)
+#       define CG_GIPOT(x)                              ((x) << 16)
+#       define CG_GIPOT_MASK                            (0xffff << 16)
+
+#define CG_SSP                                            0x203
+#       define SST(x)                                     ((x) << 0)
+#       define SST_MASK                                   (0xffff << 0)
+#       define SSTU(x)                                    ((x) << 16)
+#       define SSTU_MASK                                  (0xf << 16)
+
+#define CG_DISPLAY_GAP_CNTL                               0x20a
+#       define DISP1_GAP(x)                               ((x) << 0)
+#       define DISP1_GAP_MASK                             (3 << 0)
+#       define DISP2_GAP(x)                               ((x) << 2)
+#       define DISP2_GAP_MASK                             (3 << 2)
+#       define VBI_TIMER_COUNT(x)                         ((x) << 4)
+#       define VBI_TIMER_COUNT_MASK                       (0x3fff << 4)
+#       define VBI_TIMER_UNIT(x)                          ((x) << 20)
+#       define VBI_TIMER_UNIT_MASK                        (7 << 20)
+#       define DISP1_GAP_MCHG(x)                          ((x) << 24)
+#       define DISP1_GAP_MCHG_MASK                        (3 << 24)
+#       define DISP2_GAP_MCHG(x)                          ((x) << 26)
+#       define DISP2_GAP_MCHG_MASK                        (3 << 26)
+
+#define        CG_ULV_CONTROL                                  0x21e
+#define        CG_ULV_PARAMETER                                0x21f
+
+#define        SMC_SCRATCH0                                    0x221
+
+#define        CG_CAC_CTRL                                     0x22e
+#      define CAC_WINDOW(x)                            ((x) << 0)
+#      define CAC_WINDOW_MASK                          0x00ffffff
+
+#define DMIF_ADDR_CONFIG                               0x2F5
+
+#define DMIF_ADDR_CALC                                 0x300
+
+#define        PIPE0_DMIF_BUFFER_CONTROL                         0x0328
+#       define DMIF_BUFFERS_ALLOCATED(x)                  ((x) << 0)
+#       define DMIF_BUFFERS_ALLOCATED_COMPLETED           (1 << 4)
+
+#define        SRBM_STATUS                                     0x394
+#define                GRBM_RQ_PENDING                         (1 << 5)
+#define                VMC_BUSY                                (1 << 8)
+#define                MCB_BUSY                                (1 << 9)
+#define                MCB_NON_DISPLAY_BUSY                    (1 << 10)
+#define                MCC_BUSY                                (1 << 11)
+#define                MCD_BUSY                                (1 << 12)
+#define                SEM_BUSY                                (1 << 14)
+#define                IH_BUSY                                 (1 << 17)
+
+#define        SRBM_SOFT_RESET                                 0x398
+#define                SOFT_RESET_BIF                          (1 << 1)
+#define                SOFT_RESET_DC                           (1 << 5)
+#define                SOFT_RESET_DMA1                         (1 << 6)
+#define                SOFT_RESET_GRBM                         (1 << 8)
+#define                SOFT_RESET_HDP                          (1 << 9)
+#define                SOFT_RESET_IH                           (1 << 10)
+#define                SOFT_RESET_MC                           (1 << 11)
+#define                SOFT_RESET_ROM                          (1 << 14)
+#define                SOFT_RESET_SEM                          (1 << 15)
+#define                SOFT_RESET_VMC                          (1 << 17)
+#define                SOFT_RESET_DMA                          (1 << 20)
+#define                SOFT_RESET_TST                          (1 << 21)
+#define                SOFT_RESET_REGBB                        (1 << 22)
+#define                SOFT_RESET_ORB                          (1 << 23)
+
+#define        CC_SYS_RB_BACKEND_DISABLE                       0x3A0
+#define        GC_USER_SYS_RB_BACKEND_DISABLE                  0x3A1
+
+#define SRBM_READ_ERROR                                        0x3A6
+#define SRBM_INT_CNTL                                  0x3A8
+#define SRBM_INT_ACK                                   0x3AA
+
+#define        SRBM_STATUS2                                    0x3B1
+#define                DMA_BUSY                                (1 << 5)
+#define                DMA1_BUSY                               (1 << 6)
+
+#define VM_L2_CNTL                                     0x500
+#define                ENABLE_L2_CACHE                                 (1 << 0)
+#define                ENABLE_L2_FRAGMENT_PROCESSING                   (1 << 1)
+#define                L2_CACHE_PTE_ENDIAN_SWAP_MODE(x)                ((x) << 2)
+#define                L2_CACHE_PDE_ENDIAN_SWAP_MODE(x)                ((x) << 4)
+#define                ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE         (1 << 9)
+#define                ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE        (1 << 10)
+#define                EFFECTIVE_L2_QUEUE_SIZE(x)                      (((x) & 7) << 15)
+#define                CONTEXT1_IDENTITY_ACCESS_MODE(x)                (((x) & 3) << 19)
+#define VM_L2_CNTL2                                    0x501
+#define                INVALIDATE_ALL_L1_TLBS                          (1 << 0)
+#define                INVALIDATE_L2_CACHE                             (1 << 1)
+#define                INVALIDATE_CACHE_MODE(x)                        ((x) << 26)
+#define                        INVALIDATE_PTE_AND_PDE_CACHES           0
+#define                        INVALIDATE_ONLY_PTE_CACHES              1
+#define                        INVALIDATE_ONLY_PDE_CACHES              2
+#define VM_L2_CNTL3                                    0x502
+#define                BANK_SELECT(x)                                  ((x) << 0)
+#define                L2_CACHE_UPDATE_MODE(x)                         ((x) << 6)
+#define                L2_CACHE_BIGK_FRAGMENT_SIZE(x)                  ((x) << 15)
+#define                L2_CACHE_BIGK_ASSOCIATIVITY                     (1 << 20)
+#define        VM_L2_STATUS                                    0x503
+#define                L2_BUSY                                         (1 << 0)
+#define VM_CONTEXT0_CNTL                               0x504
+#define                ENABLE_CONTEXT                                  (1 << 0)
+#define                PAGE_TABLE_DEPTH(x)                             (((x) & 3) << 1)
+#define                RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 3)
+#define                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 4)
+#define                DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT    (1 << 6)
+#define                DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT      (1 << 7)
+#define                PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT          (1 << 9)
+#define                PDE0_PROTECTION_FAULT_ENABLE_DEFAULT            (1 << 10)
+#define                VALID_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 12)
+#define                VALID_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 13)
+#define                READ_PROTECTION_FAULT_ENABLE_INTERRUPT          (1 << 15)
+#define                READ_PROTECTION_FAULT_ENABLE_DEFAULT            (1 << 16)
+#define                WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 18)
+#define                WRITE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 19)
+#define                PAGE_TABLE_BLOCK_SIZE(x)                        (((x) & 0xF) << 24)
+#define VM_CONTEXT1_CNTL                               0x505
+#define VM_CONTEXT0_CNTL2                              0x50C
+#define VM_CONTEXT1_CNTL2                              0x50D
+#define        VM_CONTEXT8_PAGE_TABLE_BASE_ADDR                0x50E
+#define        VM_CONTEXT9_PAGE_TABLE_BASE_ADDR                0x50F
+#define        VM_CONTEXT10_PAGE_TABLE_BASE_ADDR               0x510
+#define        VM_CONTEXT11_PAGE_TABLE_BASE_ADDR               0x511
+#define        VM_CONTEXT12_PAGE_TABLE_BASE_ADDR               0x512
+#define        VM_CONTEXT13_PAGE_TABLE_BASE_ADDR               0x513
+#define        VM_CONTEXT14_PAGE_TABLE_BASE_ADDR               0x514
+#define        VM_CONTEXT15_PAGE_TABLE_BASE_ADDR               0x515
+
+#define        VM_CONTEXT1_PROTECTION_FAULT_ADDR               0x53f
+#define        VM_CONTEXT1_PROTECTION_FAULT_STATUS             0x537
+#define                PROTECTIONS_MASK                        (0xf << 0)
+#define                PROTECTIONS_SHIFT                       0
+               /* bit 0: range
+                * bit 1: pde0
+                * bit 2: valid
+                * bit 3: read
+                * bit 4: write
+                */
+#define                MEMORY_CLIENT_ID_MASK                   (0xff << 12)
+#define                MEMORY_CLIENT_ID_SHIFT                  12
+#define                MEMORY_CLIENT_RW_MASK                   (1 << 24)
+#define                MEMORY_CLIENT_RW_SHIFT                  24
+#define                FAULT_VMID_MASK                         (0xf << 25)
+#define                FAULT_VMID_SHIFT                        25
+
+#define VM_INVALIDATE_REQUEST                          0x51E
+#define VM_INVALIDATE_RESPONSE                         0x51F
+
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR      0x546
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR      0x547
+
+#define        VM_CONTEXT0_PAGE_TABLE_BASE_ADDR                0x54F
+#define        VM_CONTEXT1_PAGE_TABLE_BASE_ADDR                0x550
+#define        VM_CONTEXT2_PAGE_TABLE_BASE_ADDR                0x551
+#define        VM_CONTEXT3_PAGE_TABLE_BASE_ADDR                0x552
+#define        VM_CONTEXT4_PAGE_TABLE_BASE_ADDR                0x553
+#define        VM_CONTEXT5_PAGE_TABLE_BASE_ADDR                0x554
+#define        VM_CONTEXT6_PAGE_TABLE_BASE_ADDR                0x555
+#define        VM_CONTEXT7_PAGE_TABLE_BASE_ADDR                0x556
+#define        VM_CONTEXT0_PAGE_TABLE_START_ADDR               0x557
+#define        VM_CONTEXT1_PAGE_TABLE_START_ADDR               0x558
+
+#define        VM_CONTEXT0_PAGE_TABLE_END_ADDR                 0x55F
+#define        VM_CONTEXT1_PAGE_TABLE_END_ADDR                 0x560
+
+#define VM_L2_CG                                       0x570
+#define                MC_CG_ENABLE                            (1 << 18)
+#define                MC_LS_ENABLE                            (1 << 19)
+
+#define MC_SHARED_CHMAP                                                0x801
+#define                NOOFCHAN_SHIFT                                  12
+#define                NOOFCHAN_MASK                                   0x0000f000
+#define MC_SHARED_CHREMAP                                      0x802
+
+#define        MC_VM_FB_LOCATION                               0x809
+#define        MC_VM_AGP_TOP                                   0x80A
+#define        MC_VM_AGP_BOT                                   0x80B
+#define        MC_VM_AGP_BASE                                  0x80C
+#define        MC_VM_SYSTEM_APERTURE_LOW_ADDR                  0x80D
+#define        MC_VM_SYSTEM_APERTURE_HIGH_ADDR                 0x80E
+#define        MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR              0x80F
+
+#define        MC_VM_MX_L1_TLB_CNTL                            0x819
+#define                ENABLE_L1_TLB                                   (1 << 0)
+#define                ENABLE_L1_FRAGMENT_PROCESSING                   (1 << 1)
+#define                SYSTEM_ACCESS_MODE_PA_ONLY                      (0 << 3)
+#define                SYSTEM_ACCESS_MODE_USE_SYS_MAP                  (1 << 3)
+#define                SYSTEM_ACCESS_MODE_IN_SYS                       (2 << 3)
+#define                SYSTEM_ACCESS_MODE_NOT_IN_SYS                   (3 << 3)
+#define                SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU       (0 << 5)
+#define                ENABLE_ADVANCED_DRIVER_MODEL                    (1 << 6)
+
+#define MC_SHARED_BLACKOUT_CNTL                        0x82B
+
+#define MC_HUB_MISC_HUB_CG                             0x82E
+#define MC_HUB_MISC_VM_CG                              0x82F
+
+#define MC_HUB_MISC_SIP_CG                             0x830
+
+#define MC_XPB_CLK_GAT                                 0x91E
+
+#define MC_CITF_MISC_RD_CG                             0x992
+#define MC_CITF_MISC_WR_CG                             0x993
+#define MC_CITF_MISC_VM_CG                             0x994
+
+#define        MC_ARB_RAMCFG                                   0x9D8
+#define                NOOFBANK_SHIFT                                  0
+#define                NOOFBANK_MASK                                   0x00000003
+#define                NOOFRANK_SHIFT                                  2
+#define                NOOFRANK_MASK                                   0x00000004
+#define                NOOFROWS_SHIFT                                  3
+#define                NOOFROWS_MASK                                   0x00000038
+#define                NOOFCOLS_SHIFT                                  6
+#define                NOOFCOLS_MASK                                   0x000000C0
+#define                CHANSIZE_SHIFT                                  8
+#define                CHANSIZE_MASK                                   0x00000100
+#define                CHANSIZE_OVERRIDE                               (1 << 11)
+#define                NOOFGROUPS_SHIFT                                12
+#define                NOOFGROUPS_MASK                                 0x00001000
+
+#define        MC_ARB_DRAM_TIMING                              0x9DD
+#define        MC_ARB_DRAM_TIMING2                             0x9DE
+
+#define MC_ARB_BURST_TIME                               0xA02
+#define                STATE0(x)                               ((x) << 0)
+#define                STATE0_MASK                             (0x1f << 0)
+#define                STATE0_SHIFT                            0
+#define                STATE1(x)                               ((x) << 5)
+#define                STATE1_MASK                             (0x1f << 5)
+#define                STATE1_SHIFT                            5
+#define                STATE2(x)                               ((x) << 10)
+#define                STATE2_MASK                             (0x1f << 10)
+#define                STATE2_SHIFT                            10
+#define                STATE3(x)                               ((x) << 15)
+#define                STATE3_MASK                             (0x1f << 15)
+#define                STATE3_SHIFT                            15
+
+#define        MC_SEQ_TRAIN_WAKEUP_CNTL                        0xA3A
+#define                TRAIN_DONE_D0                           (1 << 30)
+#define                TRAIN_DONE_D1                           (1 << 31)
+
+#define MC_SEQ_SUP_CNTL                                0xA32
+#define                RUN_MASK                                (1 << 0)
+#define MC_SEQ_SUP_PGM                                 0xA33
+#define MC_PMG_AUTO_CMD                                0xA34
+
+#define MC_IO_PAD_CNTL_D0                              0xA74
+#define                MEM_FALL_OUT_CMD                        (1 << 8)
+
+#define MC_SEQ_RAS_TIMING                               0xA28
+#define MC_SEQ_CAS_TIMING                               0xA29
+#define MC_SEQ_MISC_TIMING                              0xA2A
+#define MC_SEQ_MISC_TIMING2                             0xA2B
+#define MC_SEQ_PMG_TIMING                               0xA2C
+#define MC_SEQ_RD_CTL_D0                                0xA2D
+#define MC_SEQ_RD_CTL_D1                                0xA2E
+#define MC_SEQ_WR_CTL_D0                                0xA2F
+#define MC_SEQ_WR_CTL_D1                                0xA30
+
+#define MC_SEQ_MISC0                                           0xA80
+#define        MC_SEQ_MISC0_VEN_ID_SHIFT               8
+#define        MC_SEQ_MISC0_VEN_ID_MASK                0x00000f00
+#define        MC_SEQ_MISC0_VEN_ID_VALUE               3
+#define        MC_SEQ_MISC0_REV_ID_SHIFT               12
+#define        MC_SEQ_MISC0_REV_ID_MASK                0x0000f000
+#define        MC_SEQ_MISC0_REV_ID_VALUE               1
+#define        MC_SEQ_MISC0_GDDR5_SHIFT                28
+#define        MC_SEQ_MISC0_GDDR5_MASK                 0xf0000000
+#define        MC_SEQ_MISC0_GDDR5_VALUE                5
+#define MC_SEQ_MISC1                                    0xA81
+#define MC_SEQ_RESERVE_M                                0xA82
+#define MC_PMG_CMD_EMRS                                 0xA83
+
+#define MC_SEQ_IO_DEBUG_INDEX                          0xA91
+#define MC_SEQ_IO_DEBUG_DATA                                   0xA92
+
+#define MC_SEQ_MISC5                                    0xA95
+#define MC_SEQ_MISC6                                    0xA96
+
+#define MC_SEQ_MISC7                                    0xA99
+
+#define MC_SEQ_RAS_TIMING_LP                            0xA9B
+#define MC_SEQ_CAS_TIMING_LP                            0xA9C
+#define MC_SEQ_MISC_TIMING_LP                           0xA9D
+#define MC_SEQ_MISC_TIMING2_LP                          0xA9E
+#define MC_SEQ_WR_CTL_D0_LP                             0xA9F
+#define MC_SEQ_WR_CTL_D1_LP                             0xAA0
+#define MC_SEQ_PMG_CMD_EMRS_LP                          0xAA1
+#define MC_SEQ_PMG_CMD_MRS_LP                           0xAA2
+
+#define MC_PMG_CMD_MRS                                  0xAAB
+
+#define MC_SEQ_RD_CTL_D0_LP                             0xAC7
+#define MC_SEQ_RD_CTL_D1_LP                             0xAC8
+
+#define MC_PMG_CMD_MRS1                                 0xAD1
+#define MC_SEQ_PMG_CMD_MRS1_LP                          0xAD2
+#define MC_SEQ_PMG_TIMING_LP                            0xAD3
+
+#define MC_SEQ_WR_CTL_2                                 0xAD5
+#define MC_SEQ_WR_CTL_2_LP                              0xAD6
+#define MC_PMG_CMD_MRS2                                 0xAD7
+#define MC_SEQ_PMG_CMD_MRS2_LP                          0xAD8
+
+#define        MCLK_PWRMGT_CNTL                                0xAE8
+#       define DLL_SPEED(x)                            ((x) << 0)
+#       define DLL_SPEED_MASK                          (0x1f << 0)
+#       define DLL_READY                                (1 << 6)
+#       define MC_INT_CNTL                              (1 << 7)
+#       define MRDCK0_PDNB                              (1 << 8)
+#       define MRDCK1_PDNB                              (1 << 9)
+#       define MRDCK0_RESET                             (1 << 16)
+#       define MRDCK1_RESET                             (1 << 17)
+#       define DLL_READY_READ                           (1 << 24)
+#define        DLL_CNTL                                        0xAE9
+#       define MRDCK0_BYPASS                            (1 << 24)
+#       define MRDCK1_BYPASS                            (1 << 25)
+
+#define        MPLL_CNTL_MODE                                  0xAEC
+#       define MPLL_MCLK_SEL                            (1 << 11)
+#define        MPLL_FUNC_CNTL                                  0xAED
+#define                BWCTRL(x)                               ((x) << 20)
+#define                BWCTRL_MASK                             (0xff << 20)
+#define        MPLL_FUNC_CNTL_1                                0xAEE
+#define                VCO_MODE(x)                             ((x) << 0)
+#define                VCO_MODE_MASK                           (3 << 0)
+#define                CLKFRAC(x)                              ((x) << 4)
+#define                CLKFRAC_MASK                            (0xfff << 4)
+#define                CLKF(x)                                 ((x) << 16)
+#define                CLKF_MASK                               (0xfff << 16)
+#define        MPLL_FUNC_CNTL_2                                0xAEF
+#define        MPLL_AD_FUNC_CNTL                               0xAF0
+#define                YCLK_POST_DIV(x)                        ((x) << 0)
+#define                YCLK_POST_DIV_MASK                      (7 << 0)
+#define        MPLL_DQ_FUNC_CNTL                               0xAF1
+#define                YCLK_SEL(x)                             ((x) << 4)
+#define                YCLK_SEL_MASK                           (1 << 4)
+
+#define        MPLL_SS1                                        0xAF3
+#define                CLKV(x)                                 ((x) << 0)
+#define                CLKV_MASK                               (0x3ffffff << 0)
+#define        MPLL_SS2                                        0xAF4
+#define                CLKS(x)                                 ((x) << 0)
+#define                CLKS_MASK                               (0xfff << 0)
+
+#define        HDP_HOST_PATH_CNTL                              0xB00
+#define        CLOCK_GATING_DIS                        (1 << 23)
+#define        HDP_NONSURFACE_BASE                             0xB01
+#define        HDP_NONSURFACE_INFO                             0xB02
+#define        HDP_NONSURFACE_SIZE                             0xB03
+
+#define HDP_DEBUG0                                     0xBCC
+
+#define HDP_ADDR_CONFIG                                0xBD2
+#define HDP_MISC_CNTL                                  0xBD3
+#define        HDP_FLUSH_INVALIDATE_CACHE                      (1 << 0)
+#define HDP_MEM_POWER_LS                               0xBD4
+#define        HDP_LS_ENABLE                           (1 << 0)
+
+#define ATC_MISC_CG                                    0xCD4
+
+#define IH_RB_CNTL                                        0xF80
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0xF81
+#define IH_RB_RPTR                                        0xF82
+#define IH_RB_WPTR                                        0xF83
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0xF84
+#define IH_RB_WPTR_ADDR_LO                                0xF85
+#define IH_CNTL                                           0xF86
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+#       define MC_VMID(x)                                 ((x) << 25)
+
+#define        CONFIG_MEMSIZE                                  0x150A
+
+#define INTERRUPT_CNTL                                    0x151A
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x151B
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL                   0x1520
+
+#define        BIF_FB_EN                                               0x1524
+#define                FB_READ_EN                                      (1 << 0)
+#define                FB_WRITE_EN                                     (1 << 1)
+
+#define HDP_REG_COHERENCY_FLUSH_CNTL                   0x1528
+
+/* DCE6 ELD audio interface */
+#define AZ_F0_CODEC_ENDPOINT_INDEX                       0x1780
+#       define AZ_ENDPOINT_REG_INDEX(x)                  (((x) & 0xff) << 0)
+#       define AZ_ENDPOINT_REG_WRITE_EN                  (1 << 8)
+#define AZ_F0_CODEC_ENDPOINT_DATA                        0x1781
+
+#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER          0x25
+#define                SPEAKER_ALLOCATION(x)                   (((x) & 0x7f) << 0)
+#define                SPEAKER_ALLOCATION_MASK                 (0x7f << 0)
+#define                SPEAKER_ALLOCATION_SHIFT                0
+#define                HDMI_CONNECTION                         (1 << 16)
+#define                DP_CONNECTION                           (1 << 17)
+
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0        0x28 /* LPCM */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1        0x29 /* AC3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2        0x2A /* MPEG1 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3        0x2B /* MP3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4        0x2C /* MPEG2 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5        0x2D /* AAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6        0x2E /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7        0x2F /* ATRAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8        0x30 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9        0x31 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10       0x32 /* DTS-HD */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11       0x33 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12       0x34 /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13       0x35 /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC         0x37
+#       define VIDEO_LIPSYNC(x)                           (((x) & 0xff) << 0)
+#       define AUDIO_LIPSYNC(x)                           (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0   = invalid
+ * x   = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR             0x38
+#       define HBR_CAPABLE                                (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0               0x3a
+#       define MANUFACTURER_ID(x)                        (((x) & 0xffff) << 0)
+#       define PRODUCT_ID(x)                             (((x) & 0xffff) << 16)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1               0x3b
+#       define SINK_DESCRIPTION_LEN(x)                   (((x) & 0xff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2               0x3c
+#       define PORT_ID0(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3               0x3d
+#       define PORT_ID1(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4               0x3e
+#       define DESCRIPTION0(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION1(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION2(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION3(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5               0x3f
+#       define DESCRIPTION4(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION5(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION6(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION7(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6               0x40
+#       define DESCRIPTION8(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION9(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION10(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION11(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7               0x41
+#       define DESCRIPTION12(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION13(x)                          (((x) & 0xff) << 8)
+#       define DESCRIPTION14(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION15(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8               0x42
+#       define DESCRIPTION16(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION17(x)                          (((x) & 0xff) << 8)
+
+#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL         0x54
+#       define AUDIO_ENABLED                             (1 << 31)
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT  0x56
+#define                PORT_CONNECTIVITY_MASK                          (3 << 30)
+#define                PORT_CONNECTIVITY_SHIFT                         30
+
+#define        DC_LB_MEMORY_SPLIT                                      0x1AC3
+#define                DC_LB_MEMORY_CONFIG(x)                          ((x) << 20)
+
+#define        PRIORITY_A_CNT                                          0x1AC6
+#define                PRIORITY_MARK_MASK                              0x7fff
+#define                PRIORITY_OFF                                    (1 << 16)
+#define                PRIORITY_ALWAYS_ON                              (1 << 20)
+#define        PRIORITY_B_CNT                                          0x1AC7
+
+#define        DPG_PIPE_ARBITRATION_CONTROL3                           0x1B32
+#       define LATENCY_WATERMARK_MASK(x)                       ((x) << 16)
+#define        DPG_PIPE_LATENCY_CONTROL                                0x1B33
+#       define LATENCY_LOW_WATERMARK(x)                                ((x) << 0)
+#       define LATENCY_HIGH_WATERMARK(x)                       ((x) << 16)
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS                                    0x1AEE
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS                                   0x1AEF
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK                                        0x1AD0
+#       define VBLANK_INT_MASK                          (1 << 0)
+#       define VLINE_INT_MASK                           (1 << 4)
+
+#define DISP_INTERRUPT_STATUS                           0x183D
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x183E
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x183F
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x1840
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x1853
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x1854
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x1A16
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define        GRPH_INT_CONTROL                                0x1A17
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define        DAC_AUTODETECT_INT_CONTROL                      0x19F2
+
+#define DC_HPD1_INT_STATUS                              0x1807
+#define DC_HPD2_INT_STATUS                              0x180A
+#define DC_HPD3_INT_STATUS                              0x180D
+#define DC_HPD4_INT_STATUS                              0x1810
+#define DC_HPD5_INT_STATUS                              0x1813
+#define DC_HPD6_INT_STATUS                              0x1816
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x1808
+#define DC_HPD2_INT_CONTROL                             0x180B
+#define DC_HPD3_INT_CONTROL                             0x180E
+#define DC_HPD4_INT_CONTROL                             0x1811
+#define DC_HPD5_INT_CONTROL                             0x1814
+#define DC_HPD6_INT_CONTROL                             0x1817
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x1809
+#define DC_HPD2_CONTROL                                   0x180C
+#define DC_HPD3_CONTROL                                   0x180F
+#define DC_HPD4_CONTROL                                   0x1812
+#define DC_HPD5_CONTROL                                   0x1815
+#define DC_HPD6_CONTROL                                   0x1818
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+#define DPG_PIPE_STUTTER_CONTROL                          0x1B35
+#       define STUTTER_ENABLE                             (1 << 0)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT                         0x1BA6
+
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE                           0x05ac
+#       define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+#       define DCCG_AUDIO_DTO_SEL            (1 << 4)   /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE                           0x05b0
+#define DCCG_AUDIO_DTO0_MODULE                          0x05b4
+#define DCCG_AUDIO_DTO1_PHASE                           0x05c0
+#define DCCG_AUDIO_DTO1_MODULE                          0x05c4
+
+#define AFMT_AUDIO_SRC_CONTROL                          0x1c4f
+#define                AFMT_AUDIO_SRC_SELECT(x)                (((x) & 7) << 0)
+/* AFMT_AUDIO_SRC_SELECT
+ * 0 = stream0
+ * 1 = stream1
+ * 2 = stream2
+ * 3 = stream3
+ * 4 = stream4
+ * 5 = stream5
+ */
+
+#define        GRBM_CNTL                                       0x2000
+#define                GRBM_READ_TIMEOUT(x)                            ((x) << 0)
+
+#define        GRBM_STATUS2                                    0x2002
+#define                RLC_RQ_PENDING                                  (1 << 0)
+#define                RLC_BUSY                                        (1 << 8)
+#define                TC_BUSY                                         (1 << 9)
+
+#define        GRBM_STATUS                                     0x2004
+#define                CMDFIFO_AVAIL_MASK                              0x0000000F
+#define                RING2_RQ_PENDING                                (1 << 4)
+#define                SRBM_RQ_PENDING                                 (1 << 5)
+#define                RING1_RQ_PENDING                                (1 << 6)
+#define                CF_RQ_PENDING                                   (1 << 7)
+#define                PF_RQ_PENDING                                   (1 << 8)
+#define                GDS_DMA_RQ_PENDING                              (1 << 9)
+#define                GRBM_EE_BUSY                                    (1 << 10)
+#define                DB_CLEAN                                        (1 << 12)
+#define                CB_CLEAN                                        (1 << 13)
+#define                TA_BUSY                                         (1 << 14)
+#define                GDS_BUSY                                        (1 << 15)
+#define                VGT_BUSY                                        (1 << 17)
+#define                IA_BUSY_NO_DMA                                  (1 << 18)
+#define                IA_BUSY                                         (1 << 19)
+#define                SX_BUSY                                         (1 << 20)
+#define                SPI_BUSY                                        (1 << 22)
+#define                BCI_BUSY                                        (1 << 23)
+#define                SC_BUSY                                         (1 << 24)
+#define                PA_BUSY                                         (1 << 25)
+#define                DB_BUSY                                         (1 << 26)
+#define                CP_COHERENCY_BUSY                               (1 << 28)
+#define                CP_BUSY                                         (1 << 29)
+#define                CB_BUSY                                         (1 << 30)
+#define                GUI_ACTIVE                                      (1 << 31)
+#define        GRBM_STATUS_SE0                                 0x2005
+#define        GRBM_STATUS_SE1                                 0x2006
+#define                SE_DB_CLEAN                                     (1 << 1)
+#define                SE_CB_CLEAN                                     (1 << 2)
+#define                SE_BCI_BUSY                                     (1 << 22)
+#define                SE_VGT_BUSY                                     (1 << 23)
+#define                SE_PA_BUSY                                      (1 << 24)
+#define                SE_TA_BUSY                                      (1 << 25)
+#define                SE_SX_BUSY                                      (1 << 26)
+#define                SE_SPI_BUSY                                     (1 << 27)
+#define                SE_SC_BUSY                                      (1 << 29)
+#define                SE_DB_BUSY                                      (1 << 30)
+#define                SE_CB_BUSY                                      (1 << 31)
+
+#define        GRBM_SOFT_RESET                                 0x2008
+#define                SOFT_RESET_CP                                   (1 << 0)
+#define                SOFT_RESET_CB                                   (1 << 1)
+#define                SOFT_RESET_RLC                                  (1 << 2)
+#define                SOFT_RESET_DB                                   (1 << 3)
+#define                SOFT_RESET_GDS                                  (1 << 4)
+#define                SOFT_RESET_PA                                   (1 << 5)
+#define                SOFT_RESET_SC                                   (1 << 6)
+#define                SOFT_RESET_BCI                                  (1 << 7)
+#define                SOFT_RESET_SPI                                  (1 << 8)
+#define                SOFT_RESET_SX                                   (1 << 10)
+#define                SOFT_RESET_TC                                   (1 << 11)
+#define                SOFT_RESET_TA                                   (1 << 12)
+#define                SOFT_RESET_VGT                                  (1 << 14)
+#define                SOFT_RESET_IA                                   (1 << 15)
+
+#define GRBM_GFX_INDEX                                 0x200B
+#define                INSTANCE_INDEX(x)                       ((x) << 0)
+#define                SH_INDEX(x)                             ((x) << 8)
+#define                SE_INDEX(x)                             ((x) << 16)
+#define                SH_BROADCAST_WRITES                     (1 << 29)
+#define                INSTANCE_BROADCAST_WRITES               (1 << 30)
+#define                SE_BROADCAST_WRITES                     (1 << 31)
+
+#define GRBM_INT_CNTL                                   0x2018
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+#define        CP_STRMOUT_CNTL                                 0x213F
+#define        SCRATCH_REG0                                    0x2140
+#define        SCRATCH_REG1                                    0x2141
+#define        SCRATCH_REG2                                    0x2142
+#define        SCRATCH_REG3                                    0x2143
+#define        SCRATCH_REG4                                    0x2144
+#define        SCRATCH_REG5                                    0x2145
+#define        SCRATCH_REG6                                    0x2146
+#define        SCRATCH_REG7                                    0x2147
+
+#define        SCRATCH_UMSK                                    0x2150
+#define        SCRATCH_ADDR                                    0x2151
+
+#define        CP_SEM_WAIT_TIMER                               0x216F
+
+#define        CP_SEM_INCOMPLETE_TIMER_CNTL                    0x2172
+
+#define CP_ME_CNTL                                     0x21B6
+#define                CP_CE_HALT                                      (1 << 24)
+#define                CP_PFP_HALT                                     (1 << 26)
+#define                CP_ME_HALT                                      (1 << 28)
+
+#define        CP_COHER_CNTL2                                  0x217A
+
+#define        CP_RB2_RPTR                                     0x21BE
+#define        CP_RB1_RPTR                                     0x21BF
+#define        CP_RB0_RPTR                                     0x21C0
+#define        CP_RB_WPTR_DELAY                                0x21C1
+
+#define        CP_QUEUE_THRESHOLDS                             0x21D8
+#define                ROQ_IB1_START(x)                                ((x) << 0)
+#define                ROQ_IB2_START(x)                                ((x) << 8)
+#define CP_MEQ_THRESHOLDS                              0x21D9
+#define                MEQ1_START(x)                           ((x) << 0)
+#define                MEQ2_START(x)                           ((x) << 8)
+
+#define        CP_PERFMON_CNTL                                 0x21FF
+
+#define        VGT_VTX_VECT_EJECT_REG                          0x222C
+
+#define        VGT_CACHE_INVALIDATION                          0x2231
+#define                CACHE_INVALIDATION(x)                           ((x) << 0)
+#define                        VC_ONLY                                         0
+#define                        TC_ONLY                                         1
+#define                        VC_AND_TC                                       2
+#define                AUTO_INVLD_EN(x)                                ((x) << 6)
+#define                        NO_AUTO                                         0
+#define                        ES_AUTO                                         1
+#define                        GS_AUTO                                         2
+#define                        ES_AND_GS_AUTO                                  3
+#define        VGT_ESGS_RING_SIZE                              0x2232
+#define        VGT_GSVS_RING_SIZE                              0x2233
+
+#define        VGT_GS_VERTEX_REUSE                             0x2235
+
+#define        VGT_PRIMITIVE_TYPE                              0x2256
+#define        VGT_INDEX_TYPE                                  0x2257
+
+#define        VGT_NUM_INDICES                                 0x225C
+#define        VGT_NUM_INSTANCES                               0x225D
+
+#define        VGT_TF_RING_SIZE                                0x2262
+
+#define        VGT_HS_OFFCHIP_PARAM                            0x226C
+
+#define        VGT_TF_MEMORY_BASE                              0x226E
+
+#define CC_GC_SHADER_ARRAY_CONFIG                      0x226F
+#define                INACTIVE_CUS_MASK                       0xFFFF0000
+#define                INACTIVE_CUS_SHIFT                      16
+#define GC_USER_SHADER_ARRAY_CONFIG                    0x2270
+
+#define        PA_CL_ENHANCE                                   0x2285
+#define                CLIP_VTX_REORDER_ENA                            (1 << 0)
+#define                NUM_CLIP_SEQ(x)                                 ((x) << 1)
+
+#define        PA_SU_LINE_STIPPLE_VALUE                        0x2298
+
+#define        PA_SC_LINE_STIPPLE_STATE                        0x22C4
+
+#define        PA_SC_FORCE_EOV_MAX_CNTS                        0x22C9
+#define                FORCE_EOV_MAX_CLK_CNT(x)                        ((x) << 0)
+#define                FORCE_EOV_MAX_REZ_CNT(x)                        ((x) << 16)
+
+#define        PA_SC_FIFO_SIZE                                 0x22F3
+#define                SC_FRONTEND_PRIM_FIFO_SIZE(x)                   ((x) << 0)
+#define                SC_BACKEND_PRIM_FIFO_SIZE(x)                    ((x) << 6)
+#define                SC_HIZ_TILE_FIFO_SIZE(x)                        ((x) << 15)
+#define                SC_EARLYZ_TILE_FIFO_SIZE(x)                     ((x) << 23)
+
+#define        PA_SC_ENHANCE                                   0x22FC
+
+#define        SQ_CONFIG                                       0x2300
+
+#define        SQC_CACHES                                      0x2302
+
+#define SQ_POWER_THROTTLE                               0x2396
+#define                MIN_POWER(x)                            ((x) << 0)
+#define                MIN_POWER_MASK                          (0x3fff << 0)
+#define                MIN_POWER_SHIFT                         0
+#define                MAX_POWER(x)                            ((x) << 16)
+#define                MAX_POWER_MASK                          (0x3fff << 16)
+#define                MAX_POWER_SHIFT                         0
+#define SQ_POWER_THROTTLE2                              0x2397
+#define                MAX_POWER_DELTA(x)                      ((x) << 0)
+#define                MAX_POWER_DELTA_MASK                    (0x3fff << 0)
+#define                MAX_POWER_DELTA_SHIFT                   0
+#define                STI_SIZE(x)                             ((x) << 16)
+#define                STI_SIZE_MASK                           (0x3ff << 16)
+#define                STI_SIZE_SHIFT                          16
+#define                LTI_RATIO(x)                            ((x) << 27)
+#define                LTI_RATIO_MASK                          (0xf << 27)
+#define                LTI_RATIO_SHIFT                         27
+
+#define        SX_DEBUG_1                                      0x2418
+
+#define        SPI_STATIC_THREAD_MGMT_1                        0x2438
+#define        SPI_STATIC_THREAD_MGMT_2                        0x2439
+#define        SPI_STATIC_THREAD_MGMT_3                        0x243A
+#define        SPI_PS_MAX_WAVE_ID                              0x243B
+
+#define        SPI_CONFIG_CNTL                                 0x2440
+
+#define        SPI_CONFIG_CNTL_1                               0x244F
+#define                VTX_DONE_DELAY(x)                               ((x) << 0)
+#define                INTERP_ONE_PRIM_PER_ROW                         (1 << 4)
+
+#define        CGTS_TCC_DISABLE                                0x2452
+#define        CGTS_USER_TCC_DISABLE                           0x2453
+#define                TCC_DISABLE_MASK                                0xFFFF0000
+#define                TCC_DISABLE_SHIFT                               16
+#define        CGTS_SM_CTRL_REG                                0x2454
+#define                OVERRIDE                                (1 << 21)
+#define                LS_OVERRIDE                             (1 << 22)
+
+#define        SPI_LB_CU_MASK                                  0x24D5
+
+#define        TA_CNTL_AUX                                     0x2542
+
+#define CC_RB_BACKEND_DISABLE                          0x263D
+#define                BACKEND_DISABLE(x)                      ((x) << 16)
+#define GB_ADDR_CONFIG                                 0x263E
+#define                NUM_PIPES(x)                            ((x) << 0)
+#define                NUM_PIPES_MASK                          0x00000007
+#define                NUM_PIPES_SHIFT                         0
+#define                PIPE_INTERLEAVE_SIZE(x)                 ((x) << 4)
+#define                PIPE_INTERLEAVE_SIZE_MASK               0x00000070
+#define                PIPE_INTERLEAVE_SIZE_SHIFT              4
+#define                NUM_SHADER_ENGINES(x)                   ((x) << 12)
+#define                NUM_SHADER_ENGINES_MASK                 0x00003000
+#define                NUM_SHADER_ENGINES_SHIFT                12
+#define                SHADER_ENGINE_TILE_SIZE(x)              ((x) << 16)
+#define                SHADER_ENGINE_TILE_SIZE_MASK            0x00070000
+#define                SHADER_ENGINE_TILE_SIZE_SHIFT           16
+#define                NUM_GPUS(x)                             ((x) << 20)
+#define                NUM_GPUS_MASK                           0x00700000
+#define                NUM_GPUS_SHIFT                          20
+#define                MULTI_GPU_TILE_SIZE(x)                  ((x) << 24)
+#define                MULTI_GPU_TILE_SIZE_MASK                0x03000000
+#define                MULTI_GPU_TILE_SIZE_SHIFT               24
+#define                ROW_SIZE(x)                             ((x) << 28)
+#define                ROW_SIZE_MASK                           0x30000000
+#define                ROW_SIZE_SHIFT                          28
+
+#define        GB_TILE_MODE0                                   0x2644
+#       define MICRO_TILE_MODE(x)                              ((x) << 0)
+#              define  ADDR_SURF_DISPLAY_MICRO_TILING          0
+#              define  ADDR_SURF_THIN_MICRO_TILING             1
+#              define  ADDR_SURF_DEPTH_MICRO_TILING            2
+#       define ARRAY_MODE(x)                                   ((x) << 2)
+#              define  ARRAY_LINEAR_GENERAL                    0
+#              define  ARRAY_LINEAR_ALIGNED                    1
+#              define  ARRAY_1D_TILED_THIN1                    2
+#              define  ARRAY_2D_TILED_THIN1                    4
+#       define PIPE_CONFIG(x)                                  ((x) << 6)
+#              define  ADDR_SURF_P2                            0
+#              define  ADDR_SURF_P4_8x16                       4
+#              define  ADDR_SURF_P4_16x16                      5
+#              define  ADDR_SURF_P4_16x32                      6
+#              define  ADDR_SURF_P4_32x32                      7
+#              define  ADDR_SURF_P8_16x16_8x16                 8
+#              define  ADDR_SURF_P8_16x32_8x16                 9
+#              define  ADDR_SURF_P8_32x32_8x16                 10
+#              define  ADDR_SURF_P8_16x32_16x16                11
+#              define  ADDR_SURF_P8_32x32_16x16                12
+#              define  ADDR_SURF_P8_32x32_16x32                13
+#              define  ADDR_SURF_P8_32x64_32x32                14
+#       define TILE_SPLIT(x)                                   ((x) << 11)
+#              define  ADDR_SURF_TILE_SPLIT_64B                0
+#              define  ADDR_SURF_TILE_SPLIT_128B               1
+#              define  ADDR_SURF_TILE_SPLIT_256B               2
+#              define  ADDR_SURF_TILE_SPLIT_512B               3
+#              define  ADDR_SURF_TILE_SPLIT_1KB                4
+#              define  ADDR_SURF_TILE_SPLIT_2KB                5
+#              define  ADDR_SURF_TILE_SPLIT_4KB                6
+#       define BANK_WIDTH(x)                                   ((x) << 14)
+#              define  ADDR_SURF_BANK_WIDTH_1                  0
+#              define  ADDR_SURF_BANK_WIDTH_2                  1
+#              define  ADDR_SURF_BANK_WIDTH_4                  2
+#              define  ADDR_SURF_BANK_WIDTH_8                  3
+#       define BANK_HEIGHT(x)                                  ((x) << 16)
+#              define  ADDR_SURF_BANK_HEIGHT_1                 0
+#              define  ADDR_SURF_BANK_HEIGHT_2                 1
+#              define  ADDR_SURF_BANK_HEIGHT_4                 2
+#              define  ADDR_SURF_BANK_HEIGHT_8                 3
+#       define MACRO_TILE_ASPECT(x)                            ((x) << 18)
+#              define  ADDR_SURF_MACRO_ASPECT_1                0
+#              define  ADDR_SURF_MACRO_ASPECT_2                1
+#              define  ADDR_SURF_MACRO_ASPECT_4                2
+#              define  ADDR_SURF_MACRO_ASPECT_8                3
+#       define NUM_BANKS(x)                                    ((x) << 20)
+#              define  ADDR_SURF_2_BANK                        0
+#              define  ADDR_SURF_4_BANK                        1
+#              define  ADDR_SURF_8_BANK                        2
+#              define  ADDR_SURF_16_BANK                       3
+#define        GB_TILE_MODE1                                   0x2645
+#define        GB_TILE_MODE2                                   0x2646
+#define        GB_TILE_MODE3                                   0x2647
+#define        GB_TILE_MODE4                                   0x2648
+#define        GB_TILE_MODE5                                   0x2649
+#define        GB_TILE_MODE6                                   0x264a
+#define        GB_TILE_MODE7                                   0x264b
+#define        GB_TILE_MODE8                                   0x264c
+#define        GB_TILE_MODE9                                   0x264d
+#define        GB_TILE_MODE10                                  0x264e
+#define        GB_TILE_MODE11                                  0x264f
+#define        GB_TILE_MODE12                                  0x2650
+#define        GB_TILE_MODE13                                  0x2651
+#define        GB_TILE_MODE14                                  0x2652
+#define        GB_TILE_MODE15                                  0x2653
+#define        GB_TILE_MODE16                                  0x2654
+#define        GB_TILE_MODE17                                  0x2655
+#define        GB_TILE_MODE18                                  0x2656
+#define        GB_TILE_MODE19                                  0x2657
+#define        GB_TILE_MODE20                                  0x2658
+#define        GB_TILE_MODE21                                  0x2659
+#define        GB_TILE_MODE22                                  0x265a
+#define        GB_TILE_MODE23                                  0x265b
+#define        GB_TILE_MODE24                                  0x265c
+#define        GB_TILE_MODE25                                  0x265d
+#define        GB_TILE_MODE26                                  0x265e
+#define        GB_TILE_MODE27                                  0x265f
+#define        GB_TILE_MODE28                                  0x2660
+#define        GB_TILE_MODE29                                  0x2661
+#define        GB_TILE_MODE30                                  0x2662
+#define        GB_TILE_MODE31                                  0x2663
+
+#define        CB_PERFCOUNTER0_SELECT0                         0x2688
+#define        CB_PERFCOUNTER0_SELECT1                         0x2689
+#define        CB_PERFCOUNTER1_SELECT0                         0x268A
+#define        CB_PERFCOUNTER1_SELECT1                         0x268B
+#define        CB_PERFCOUNTER2_SELECT0                         0x268C
+#define        CB_PERFCOUNTER2_SELECT1                         0x268D
+#define        CB_PERFCOUNTER3_SELECT0                         0x268E
+#define        CB_PERFCOUNTER3_SELECT1                         0x268F
+
+#define        CB_CGTT_SCLK_CTRL                               0x2698
+
+#define        GC_USER_RB_BACKEND_DISABLE                      0x26DF
+#define                BACKEND_DISABLE_MASK                    0x00FF0000
+#define                BACKEND_DISABLE_SHIFT                   16
+
+#define        TCP_CHAN_STEER_LO                               0x2B03
+#define        TCP_CHAN_STEER_HI                               0x2B94
+
+#define        CP_RB0_BASE                                     0x3040
+#define        CP_RB0_CNTL                                     0x3041
+#define                RB_BUFSZ(x)                                     ((x) << 0)
+#define                RB_BLKSZ(x)                                     ((x) << 8)
+#define                BUF_SWAP_32BIT                                  (2 << 16)
+#define                RB_NO_UPDATE                                    (1 << 27)
+#define                RB_RPTR_WR_ENA                                  (1 << 31)
+
+#define        CP_RB0_RPTR_ADDR                                0x3043
+#define        CP_RB0_RPTR_ADDR_HI                             0x3044
+#define        CP_RB0_WPTR                                     0x3045
+
+#define        CP_PFP_UCODE_ADDR                               0x3054
+#define        CP_PFP_UCODE_DATA                               0x3055
+#define        CP_ME_RAM_RADDR                                 0x3056
+#define        CP_ME_RAM_WADDR                                 0x3057
+#define        CP_ME_RAM_DATA                                  0x3058
+
+#define        CP_CE_UCODE_ADDR                                0x305A
+#define        CP_CE_UCODE_DATA                                0x305B
+
+#define        CP_RB1_BASE                                     0x3060
+#define        CP_RB1_CNTL                                     0x3061
+#define        CP_RB1_RPTR_ADDR                                0x3062
+#define        CP_RB1_RPTR_ADDR_HI                             0x3063
+#define        CP_RB1_WPTR                                     0x3064
+#define        CP_RB2_BASE                                     0x3065
+#define        CP_RB2_CNTL                                     0x3066
+#define        CP_RB2_RPTR_ADDR                                0x3067
+#define        CP_RB2_RPTR_ADDR_HI                             0x3068
+#define        CP_RB2_WPTR                                     0x3069
+#define CP_INT_CNTL_RING0                               0x306A
+#define CP_INT_CNTL_RING1                               0x306B
+#define CP_INT_CNTL_RING2                               0x306C
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define WAIT_MEM_SEM_INT_ENABLE                  (1 << 21)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define CP_RINGID2_INT_ENABLE                    (1 << 29)
+#       define CP_RINGID1_INT_ENABLE                    (1 << 30)
+#       define CP_RINGID0_INT_ENABLE                    (1 << 31)
+#define CP_INT_STATUS_RING0                             0x306D
+#define CP_INT_STATUS_RING1                             0x306E
+#define CP_INT_STATUS_RING2                             0x306F
+#       define WAIT_MEM_SEM_INT_STAT                    (1 << 21)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define CP_RINGID2_INT_STAT                      (1 << 29)
+#       define CP_RINGID1_INT_STAT                      (1 << 30)
+#       define CP_RINGID0_INT_STAT                      (1 << 31)
+
+#define        CP_MEM_SLP_CNTL                                 0x3079
+#       define CP_MEM_LS_EN                             (1 << 0)
+
+#define        CP_DEBUG                                        0x307F
+
+#define RLC_CNTL                                          0x30C0
+#       define RLC_ENABLE                                 (1 << 0)
+#define RLC_RL_BASE                                       0x30C1
+#define RLC_RL_SIZE                                       0x30C2
+#define RLC_LB_CNTL                                       0x30C3
+#       define LOAD_BALANCE_ENABLE                        (1 << 0)
+#define RLC_SAVE_AND_RESTORE_BASE                         0x30C4
+#define RLC_LB_CNTR_MAX                                   0x30C5
+#define RLC_LB_CNTR_INIT                                  0x30C6
+
+#define RLC_CLEAR_STATE_RESTORE_BASE                      0x30C8
+
+#define RLC_UCODE_ADDR                                    0x30CB
+#define RLC_UCODE_DATA                                    0x30CC
+
+#define RLC_GPU_CLOCK_COUNT_LSB                           0x30CE
+#define RLC_GPU_CLOCK_COUNT_MSB                           0x30CF
+#define RLC_CAPTURE_GPU_CLOCK_COUNT                       0x30D0
+#define RLC_MC_CNTL                                       0x30D1
+#define RLC_UCODE_CNTL                                    0x30D2
+#define RLC_STAT                                          0x30D3
+#       define RLC_BUSY_STATUS                            (1 << 0)
+#       define GFX_POWER_STATUS                           (1 << 1)
+#       define GFX_CLOCK_STATUS                           (1 << 2)
+#       define GFX_LS_STATUS                              (1 << 3)
+
+#define        RLC_PG_CNTL                                     0x30D7
+#      define GFX_PG_ENABLE                            (1 << 0)
+#      define GFX_PG_SRC                               (1 << 1)
+
+#define        RLC_CGTT_MGCG_OVERRIDE                          0x3100
+#define        RLC_CGCG_CGLS_CTRL                              0x3101
+#      define CGCG_EN                                  (1 << 0)
+#      define CGLS_EN                                  (1 << 1)
+
+#define        RLC_TTOP_D                                      0x3105
+#      define RLC_PUD(x)                               ((x) << 0)
+#      define RLC_PUD_MASK                             (0xff << 0)
+#      define RLC_PDD(x)                               ((x) << 8)
+#      define RLC_PDD_MASK                             (0xff << 8)
+#      define RLC_TTPD(x)                              ((x) << 16)
+#      define RLC_TTPD_MASK                            (0xff << 16)
+#      define RLC_MSD(x)                               ((x) << 24)
+#      define RLC_MSD_MASK                             (0xff << 24)
+
+#define RLC_LB_INIT_CU_MASK                               0x3107
+
+#define        RLC_PG_AO_CU_MASK                               0x310B
+#define        RLC_MAX_PG_CU                                   0x310C
+#      define MAX_PU_CU(x)                             ((x) << 0)
+#      define MAX_PU_CU_MASK                           (0xff << 0)
+#define        RLC_AUTO_PG_CTRL                                0x310C
+#      define AUTO_PG_EN                               (1 << 0)
+#      define GRBM_REG_SGIT(x)                         ((x) << 3)
+#      define GRBM_REG_SGIT_MASK                       (0xffff << 3)
+#      define PG_AFTER_GRBM_REG_ST(x)                  ((x) << 19)
+#      define PG_AFTER_GRBM_REG_ST_MASK                (0x1fff << 19)
+
+#define RLC_SERDES_WR_MASTER_MASK_0                       0x3115
+#define RLC_SERDES_WR_MASTER_MASK_1                       0x3116
+#define RLC_SERDES_WR_CTRL                                0x3117
+
+#define RLC_SERDES_MASTER_BUSY_0                          0x3119
+#define RLC_SERDES_MASTER_BUSY_1                          0x311A
+
+#define RLC_GCPM_GENERAL_3                                0x311E
+
+#define        DB_RENDER_CONTROL                               0xA000
+
+#define DB_DEPTH_INFO                                   0xA00F
+
+#define PA_SC_RASTER_CONFIG                             0xA0D4
+#       define RASTER_CONFIG_RB_MAP_0                   0
+#       define RASTER_CONFIG_RB_MAP_1                   1
+#       define RASTER_CONFIG_RB_MAP_2                   2
+#       define RASTER_CONFIG_RB_MAP_3                   3
+
+#define VGT_EVENT_INITIATOR                             0xA2A4
+#       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
+#       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
+#       define SAMPLE_STREAMOUTSTATS3                   (3 << 0)
+#       define CACHE_FLUSH_TS                           (4 << 0)
+#       define CACHE_FLUSH                              (6 << 0)
+#       define CS_PARTIAL_FLUSH                         (7 << 0)
+#       define VGT_STREAMOUT_RESET                      (10 << 0)
+#       define END_OF_PIPE_INCR_DE                      (11 << 0)
+#       define END_OF_PIPE_IB_END                       (12 << 0)
+#       define RST_PIX_CNT                              (13 << 0)
+#       define VS_PARTIAL_FLUSH                         (15 << 0)
+#       define PS_PARTIAL_FLUSH                         (16 << 0)
+#       define CACHE_FLUSH_AND_INV_TS_EVENT             (20 << 0)
+#       define ZPASS_DONE                               (21 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                (22 << 0)
+#       define PERFCOUNTER_START                        (23 << 0)
+#       define PERFCOUNTER_STOP                         (24 << 0)
+#       define PIPELINESTAT_START                       (25 << 0)
+#       define PIPELINESTAT_STOP                        (26 << 0)
+#       define PERFCOUNTER_SAMPLE                       (27 << 0)
+#       define SAMPLE_PIPELINESTAT                      (30 << 0)
+#       define SAMPLE_STREAMOUTSTATS                    (32 << 0)
+#       define RESET_VTX_CNT                            (33 << 0)
+#       define VGT_FLUSH                                (36 << 0)
+#       define BOTTOM_OF_PIPE_TS                        (40 << 0)
+#       define DB_CACHE_FLUSH_AND_INV                   (42 << 0)
+#       define FLUSH_AND_INV_DB_DATA_TS                 (43 << 0)
+#       define FLUSH_AND_INV_DB_META                    (44 << 0)
+#       define FLUSH_AND_INV_CB_DATA_TS                 (45 << 0)
+#       define FLUSH_AND_INV_CB_META                    (46 << 0)
+#       define CS_DONE                                  (47 << 0)
+#       define PS_DONE                                  (48 << 0)
+#       define FLUSH_AND_INV_CB_PIXEL_DATA              (49 << 0)
+#       define THREAD_TRACE_START                       (51 << 0)
+#       define THREAD_TRACE_STOP                        (52 << 0)
+#       define THREAD_TRACE_FLUSH                       (54 << 0)
+#       define THREAD_TRACE_FINISH                      (55 << 0)
+
+/* PIF PHY0 registers idx/data 0x8/0xc */
+#define PB0_PIF_CNTL                                      0x10
+#       define LS2_EXIT_TIME(x)                           ((x) << 17)
+#       define LS2_EXIT_TIME_MASK                         (0x7 << 17)
+#       define LS2_EXIT_TIME_SHIFT                        17
+#define PB0_PIF_PAIRING                                   0x11
+#       define MULTI_PIF                                  (1 << 25)
+#define PB0_PIF_PWRDOWN_0                                 0x12
+#       define PLL_POWER_STATE_IN_TXS2_0(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_0_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_0(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_0_SHIFT             10
+#       define PLL_RAMP_UP_TIME_0(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_0_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_0_SHIFT                   24
+#define PB0_PIF_PWRDOWN_1                                 0x13
+#       define PLL_POWER_STATE_IN_TXS2_1(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_1_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_1(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_1_SHIFT             10
+#       define PLL_RAMP_UP_TIME_1(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_1_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_1_SHIFT                   24
+
+#define PB0_PIF_PWRDOWN_2                                 0x17
+#       define PLL_POWER_STATE_IN_TXS2_2(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_2_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_2_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_2(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_2_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_2_SHIFT             10
+#       define PLL_RAMP_UP_TIME_2(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_2_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_2_SHIFT                   24
+#define PB0_PIF_PWRDOWN_3                                 0x18
+#       define PLL_POWER_STATE_IN_TXS2_3(x)               ((x) << 7)
+#       define PLL_POWER_STATE_IN_TXS2_3_MASK             (0x7 << 7)
+#       define PLL_POWER_STATE_IN_TXS2_3_SHIFT            7
+#       define PLL_POWER_STATE_IN_OFF_3(x)                ((x) << 10)
+#       define PLL_POWER_STATE_IN_OFF_3_MASK              (0x7 << 10)
+#       define PLL_POWER_STATE_IN_OFF_3_SHIFT             10
+#       define PLL_RAMP_UP_TIME_3(x)                      ((x) << 24)
+#       define PLL_RAMP_UP_TIME_3_MASK                    (0x7 << 24)
+#       define PLL_RAMP_UP_TIME_3_SHIFT                   24
+/* PIF PHY1 registers idx/data 0x10/0x14 */
+#define PB1_PIF_CNTL                                      0x10
+#define PB1_PIF_PAIRING                                   0x11
+#define PB1_PIF_PWRDOWN_0                                 0x12
+#define PB1_PIF_PWRDOWN_1                                 0x13
+
+#define PB1_PIF_PWRDOWN_2                                 0x17
+#define PB1_PIF_PWRDOWN_3                                 0x18
+/* PCIE registers idx/data 0x30/0x34 */
+#define PCIE_CNTL2                                        0x1c /* PCIE */
+#       define SLV_MEM_LS_EN                              (1 << 16)
+#       define SLV_MEM_AGGRESSIVE_LS_EN                   (1 << 17)
+#       define MST_MEM_LS_EN                              (1 << 18)
+#       define REPLAY_MEM_LS_EN                           (1 << 19)
+#define PCIE_LC_STATUS1                                   0x28 /* PCIE */
+#       define LC_REVERSE_RCVR                            (1 << 0)
+#       define LC_REVERSE_XMIT                            (1 << 1)
+#       define LC_OPERATING_LINK_WIDTH_MASK               (0x7 << 2)
+#       define LC_OPERATING_LINK_WIDTH_SHIFT              2
+#       define LC_DETECTED_LINK_WIDTH_MASK                (0x7 << 5)
+#       define LC_DETECTED_LINK_WIDTH_SHIFT               5
+
+#define PCIE_P_CNTL                                       0x40 /* PCIE */
+#       define P_IGNORE_EDB_ERR                           (1 << 6)
+
+/* PCIE PORT registers idx/data 0x38/0x3c */
+#define PCIE_LC_CNTL                                      0xa0
+#       define LC_L0S_INACTIVITY(x)                       ((x) << 8)
+#       define LC_L0S_INACTIVITY_MASK                     (0xf << 8)
+#       define LC_L0S_INACTIVITY_SHIFT                    8
+#       define LC_L1_INACTIVITY(x)                        ((x) << 12)
+#       define LC_L1_INACTIVITY_MASK                      (0xf << 12)
+#       define LC_L1_INACTIVITY_SHIFT                     12
+#       define LC_PMI_TO_L1_DIS                           (1 << 16)
+#       define LC_ASPM_TO_L1_DIS                          (1 << 24)
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#       define LC_DYN_LANES_PWR_STATE(x)                  ((x) << 21)
+#       define LC_DYN_LANES_PWR_STATE_MASK                (0x3 << 21)
+#       define LC_DYN_LANES_PWR_STATE_SHIFT               21
+#define PCIE_LC_N_FTS_CNTL                                0xa3 /* PCIE_P */
+#       define LC_XMIT_N_FTS(x)                           ((x) << 0)
+#       define LC_XMIT_N_FTS_MASK                         (0xff << 0)
+#       define LC_XMIT_N_FTS_SHIFT                        0
+#       define LC_XMIT_N_FTS_OVERRIDE_EN                  (1 << 8)
+#       define LC_N_FTS_MASK                              (0xff << 24)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_GEN3_EN_STRAP                           (1 << 1)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 2)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_MASK         (0x3 << 3)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT        3
+#       define LC_FORCE_EN_SW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_SW_SPEED_CHANGE               (1 << 6)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 7)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 8)
+#       define LC_INITIATE_LINK_SPEED_CHANGE              (1 << 9)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 10)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     10
+#       define LC_CURRENT_DATA_RATE_MASK                  (0x3 << 13) /* 0/1/2 = gen1/2/3 */
+#       define LC_CURRENT_DATA_RATE_SHIFT                 13
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 16)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 18)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 19)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN3               (1 << 20)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN3                (1 << 21)
+
+#define PCIE_LC_CNTL2                                     0xb1
+#       define LC_ALLOW_PDWN_IN_L1                        (1 << 17)
+#       define LC_ALLOW_PDWN_IN_L23                       (1 << 18)
+
+#define PCIE_LC_CNTL3                                     0xb5 /* PCIE_P */
+#       define LC_GO_TO_RECOVERY                          (1 << 30)
+#define PCIE_LC_CNTL4                                     0xb6 /* PCIE_P */
+#       define LC_REDO_EQ                                 (1 << 5)
+#       define LC_SET_QUIESCE                             (1 << 13)
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG                           0x3bd3
+#define UVD_UDEC_DB_ADDR_CONFIG                                0x3bd4
+#define UVD_UDEC_DBW_ADDR_CONFIG                       0x3bd5
+#define UVD_RBC_RB_RPTR                                        0x3da4
+#define UVD_RBC_RB_WPTR                                        0x3da5
+#define UVD_STATUS                                     0x3daf
+
+#define        UVD_CGC_CTRL                                    0x3dc2
+#      define DCM                                      (1 << 0)
+#      define CG_DT(x)                                 ((x) << 2)
+#      define CG_DT_MASK                               (0xf << 2)
+#      define CLK_OD(x)                                ((x) << 6)
+#      define CLK_OD_MASK                              (0x1f << 6)
+
+ /* UVD CTX indirect */
+#define        UVD_CGC_MEM_CTRL                                0xC0
+#define        UVD_CGC_CTRL2                                   0xC1
+#      define DYN_OR_EN                                (1 << 0)
+#      define DYN_RR_EN                                (1 << 1)
+#      define G_DIV_ID(x)                              ((x) << 2)
+#      define G_DIV_ID_MASK                            (0x7 << 2)
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)        ((RADEON_PACKET_TYPE0 << 30) |                  \
+                        (((reg) >> 2) & 0xFFFF) |                      \
+                        ((n) & 0x3FFF) << 16)
+#define CP_PACKET2                     0x80000000
+#define                PACKET2_PAD_SHIFT               0
+#define                PACKET2_PAD_MASK                (0x3fffffff << 0)
+
+#define PACKET2(v)     (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define RADEON_PACKET_TYPE3 3
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) |                  \
+                        (((op) & 0xFF) << 8) |                         \
+                        ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define        PACKET3_NOP                                     0x10
+#define        PACKET3_SET_BASE                                0x11
+#define                PACKET3_BASE_INDEX(x)                  ((x) << 0)
+#define                        GDS_PARTITION_BASE              2
+#define                        CE_PARTITION_BASE               3
+#define        PACKET3_CLEAR_STATE                             0x12
+#define        PACKET3_INDEX_BUFFER_SIZE                       0x13
+#define        PACKET3_DISPATCH_DIRECT                         0x15
+#define        PACKET3_DISPATCH_INDIRECT                       0x16
+#define        PACKET3_ALLOC_GDS                               0x1B
+#define        PACKET3_WRITE_GDS_RAM                           0x1C
+#define        PACKET3_ATOMIC_GDS                              0x1D
+#define        PACKET3_ATOMIC                                  0x1E
+#define        PACKET3_OCCLUSION_QUERY                         0x1F
+#define        PACKET3_SET_PREDICATION                         0x20
+#define        PACKET3_REG_RMW                                 0x21
+#define        PACKET3_COND_EXEC                               0x22
+#define        PACKET3_PRED_EXEC                               0x23
+#define        PACKET3_DRAW_INDIRECT                           0x24
+#define        PACKET3_DRAW_INDEX_INDIRECT                     0x25
+#define        PACKET3_INDEX_BASE                              0x26
+#define        PACKET3_DRAW_INDEX_2                            0x27
+#define        PACKET3_CONTEXT_CONTROL                         0x28
+#define        PACKET3_INDEX_TYPE                              0x2A
+#define        PACKET3_DRAW_INDIRECT_MULTI                     0x2C
+#define        PACKET3_DRAW_INDEX_AUTO                         0x2D
+#define        PACKET3_DRAW_INDEX_IMMD                         0x2E
+#define        PACKET3_NUM_INSTANCES                           0x2F
+#define        PACKET3_DRAW_INDEX_MULTI_AUTO                   0x30
+#define        PACKET3_INDIRECT_BUFFER_CONST                   0x31
+#define        PACKET3_INDIRECT_BUFFER                         0x3F
+#define        PACKET3_STRMOUT_BUFFER_UPDATE                   0x34
+#define        PACKET3_DRAW_INDEX_OFFSET_2                     0x35
+#define        PACKET3_DRAW_INDEX_MULTI_ELEMENT                0x36
+#define        PACKET3_WRITE_DATA                              0x37
+#define                WRITE_DATA_DST_SEL(x)                   ((x) << 8)
+                /* 0 - register
+                * 1 - memory (sync - via GRBM)
+                * 2 - tc/l2
+                * 3 - gds
+                * 4 - reserved
+                * 5 - memory (async - direct)
+                */
+#define                WR_ONE_ADDR                             (1 << 16)
+#define                WR_CONFIRM                              (1 << 20)
+#define                WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
+                /* 0 - me
+                * 1 - pfp
+                * 2 - ce
+                */
+#define        PACKET3_DRAW_INDEX_INDIRECT_MULTI               0x38
+#define        PACKET3_MEM_SEMAPHORE                           0x39
+#define        PACKET3_MPEG_INDEX                              0x3A
+#define        PACKET3_COPY_DW                                 0x3B
+#define        PACKET3_WAIT_REG_MEM                            0x3C
+#define                WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
+                /* 0 - always
+                * 1 - <
+                * 2 - <=
+                * 3 - ==
+                * 4 - !=
+                * 5 - >=
+                * 6 - >
+                */
+#define                WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
+                /* 0 - reg
+                * 1 - mem
+                */
+#define                WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
+                /* 0 - me
+                * 1 - pfp
+                */
+#define        PACKET3_MEM_WRITE                               0x3D
+#define        PACKET3_COPY_DATA                               0x40
+#define        PACKET3_CP_DMA                                  0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - DST_ADDR
+                * 1 - GDS
+                */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+                * 1 - PFP
+                */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+                * 1 - GDS
+                * 2 - DATA
+                */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#              define PACKET3_CP_DMA_CMD_RAW_WAIT  (1 << 30)
+#define        PACKET3_PFP_SYNC_ME                             0x42
+#define        PACKET3_SURFACE_SYNC                            0x43
+#              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
+#              define PACKET3_DEST_BASE_1_ENA      (1 << 1)
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_DEST_BASE_2_ENA      (1 << 19)
+#              define PACKET3_DEST_BASE_3_ENA      (1 << 21)
+#              define PACKET3_TCL1_ACTION_ENA      (1 << 22)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+#              define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define        PACKET3_ME_INITIALIZE                           0x44
+#define                PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define        PACKET3_COND_WRITE                              0x45
+#define        PACKET3_EVENT_WRITE                             0x46
+#define                EVENT_TYPE(x)                           ((x) << 0)
+#define                EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+                * 1 - ZPASS_DONE
+                * 2 - SAMPLE_PIPELINESTAT
+                * 3 - SAMPLE_STREAMOUTSTAT*
+                * 4 - *S_PARTIAL_FLUSH
+                * 5 - EOP events
+                * 6 - EOS events
+                * 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
+                */
+#define                INV_L2                                  (1 << 20)
+                /* INV TC L2 cache when EVENT_INDEX = 7 */
+#define        PACKET3_EVENT_WRITE_EOP                         0x47
+#define                DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+                * 1 - send low 32bit data
+                * 2 - send 64bit data
+                * 3 - send 64bit counter value
+                */
+#define                INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+                * 1 - interrupt only (DATA_SEL = 0)
+                * 2 - interrupt when data write is confirmed
+                */
+#define        PACKET3_EVENT_WRITE_EOS                         0x48
+#define        PACKET3_PREAMBLE_CNTL                           0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define        PACKET3_ONE_REG_WRITE                           0x57
+#define        PACKET3_LOAD_CONFIG_REG                         0x5F
+#define        PACKET3_LOAD_CONTEXT_REG                        0x60
+#define        PACKET3_LOAD_SH_REG                             0x61
+#define        PACKET3_SET_CONFIG_REG                          0x68
+#define                PACKET3_SET_CONFIG_REG_START                    0x00002000
+#define                PACKET3_SET_CONFIG_REG_END                      0x00002c00
+#define        PACKET3_SET_CONTEXT_REG                         0x69
+#define                PACKET3_SET_CONTEXT_REG_START                   0x000a000
+#define                PACKET3_SET_CONTEXT_REG_END                     0x000a400
+#define        PACKET3_SET_CONTEXT_REG_INDIRECT                0x73
+#define        PACKET3_SET_RESOURCE_INDIRECT                   0x74
+#define        PACKET3_SET_SH_REG                              0x76
+#define                PACKET3_SET_SH_REG_START                        0x00002c00
+#define                PACKET3_SET_SH_REG_END                          0x00003000
+#define        PACKET3_SET_SH_REG_OFFSET                       0x77
+#define        PACKET3_ME_WRITE                                0x7A
+#define        PACKET3_SCRATCH_RAM_WRITE                       0x7D
+#define        PACKET3_SCRATCH_RAM_READ                        0x7E
+#define        PACKET3_CE_WRITE                                0x7F
+#define        PACKET3_LOAD_CONST_RAM                          0x80
+#define        PACKET3_WRITE_CONST_RAM                         0x81
+#define        PACKET3_WRITE_CONST_RAM_OFFSET                  0x82
+#define        PACKET3_DUMP_CONST_RAM                          0x83
+#define        PACKET3_INCREMENT_CE_COUNTER                    0x84
+#define        PACKET3_INCREMENT_DE_COUNTER                    0x85
+#define        PACKET3_WAIT_ON_CE_COUNTER                      0x86
+#define        PACKET3_WAIT_ON_DE_COUNTER                      0x87
+#define        PACKET3_WAIT_ON_DE_COUNTER_DIFF                 0x88
+#define        PACKET3_SET_CE_DE_COUNTERS                      0x89
+#define        PACKET3_WAIT_ON_AVAIL_BUFFER                    0x8A
+#define        PACKET3_SWITCH_BUFFER                           0x8B
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x200 /* not a register */
+
+#define DMA_RB_CNTL                                       0x3400
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0x3401
+#define DMA_RB_RPTR                                       0x3402
+#define DMA_RB_WPTR                                       0x3403
+
+#define DMA_RB_RPTR_ADDR_HI                               0x3407
+#define DMA_RB_RPTR_ADDR_LO                               0x3408
+
+#define DMA_IB_CNTL                                       0x3409
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#       define CMD_VMID_FORCE                             (1 << 31)
+#define DMA_IB_RPTR                                       0x340a
+#define DMA_CNTL                                          0x340b
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0x340d
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_TILING_CONFIG                                0x342e
+
+#define        DMA_POWER_CNTL                                  0x342f
+#       define MEM_POWER_OVERRIDE                       (1 << 8)
+#define        DMA_CLK_CTRL                                    0x3430
+
+#define        DMA_PG                                          0x3435
+#      define PG_CNTL_ENABLE                           (1 << 0)
+#define        DMA_PGFSM_CONFIG                                0x3436
+#define        DMA_PGFSM_WRITE                                 0x3437
+
+#define DMA_PACKET(cmd, b, t, s, n)    ((((cmd) & 0xF) << 28) |        \
+                                        (((b) & 0x1) << 26) |          \
+                                        (((t) & 0x1) << 23) |          \
+                                        (((s) & 0x1) << 22) |          \
+                                        (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)    ((((cmd) & 0xF) << 28) |        \
+                                        (((vmid) & 0xF) << 20) |       \
+                                        (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n)          ((2 << 28) |                    \
+                                        (1 << 26) |                    \
+                                        (1 << 21) |                    \
+                                        (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define        DMA_PACKET_WRITE                                  0x2
+#define        DMA_PACKET_COPY                                   0x3
+#define        DMA_PACKET_INDIRECT_BUFFER                        0x4
+#define        DMA_PACKET_SEMAPHORE                              0x5
+#define        DMA_PACKET_FENCE                                  0x6
+#define        DMA_PACKET_TRAP                                   0x7
+#define        DMA_PACKET_SRBM_WRITE                             0x9
+#define        DMA_PACKET_CONSTANT_FILL                          0xd
+#define        DMA_PACKET_POLL_REG_MEM                           0xe
+#define        DMA_PACKET_NOP                                    0xf
+
+#define VCE_STATUS                                     0x20004
+#define VCE_VCPU_CNTL                                  0x20014
+#define                VCE_CLK_EN                              (1 << 0)
+#define VCE_VCPU_CACHE_OFFSET0                         0x20024
+#define VCE_VCPU_CACHE_SIZE0                           0x20028
+#define VCE_VCPU_CACHE_OFFSET1                         0x2002c
+#define VCE_VCPU_CACHE_SIZE1                           0x20030
+#define VCE_VCPU_CACHE_OFFSET2                         0x20034
+#define VCE_VCPU_CACHE_SIZE2                           0x20038
+#define VCE_SOFT_RESET                                 0x20120
+#define        VCE_ECPU_SOFT_RESET                     (1 << 0)
+#define        VCE_FME_SOFT_RESET                      (1 << 2)
+#define VCE_RB_BASE_LO2                                        0x2016c
+#define VCE_RB_BASE_HI2                                        0x20170
+#define VCE_RB_SIZE2                                   0x20174
+#define VCE_RB_RPTR2                                   0x20178
+#define VCE_RB_WPTR2                                   0x2017c
+#define VCE_RB_BASE_LO                                 0x20180
+#define VCE_RB_BASE_HI                                 0x20184
+#define VCE_RB_SIZE                                    0x20188
+#define VCE_RB_RPTR                                    0x2018c
+#define VCE_RB_WPTR                                    0x20190
+#define VCE_CLOCK_GATING_A                             0x202f8
+#define VCE_CLOCK_GATING_B                             0x202fc
+#define VCE_UENC_CLOCK_GATING                          0x205bc
+#define VCE_UENC_REG_CLOCK_GATING                      0x205c0
+#define VCE_FW_REG_STATUS                              0x20e10
+#      define VCE_FW_REG_STATUS_BUSY                   (1 << 0)
+#      define VCE_FW_REG_STATUS_PASS                   (1 << 3)
+#      define VCE_FW_REG_STATUS_DONE                   (1 << 11)
+#define VCE_LMI_FW_START_KEYSEL                                0x20e18
+#define VCE_LMI_FW_PERIODIC_CTRL                       0x20e20
+#define VCE_LMI_CTRL2                                  0x20e74
+#define VCE_LMI_CTRL                                   0x20e98
+#define VCE_LMI_VM_CTRL                                        0x20ea0
+#define VCE_LMI_SWAP_CNTL                              0x20eb4
+#define VCE_LMI_SWAP_CNTL1                             0x20eb8
+#define VCE_LMI_CACHE_CTRL                             0x20ef4
+
+#define VCE_CMD_NO_OP                                  0x00000000
+#define VCE_CMD_END                                    0x00000001
+#define VCE_CMD_IB                                     0x00000002
+#define VCE_CMD_FENCE                                  0x00000003
+#define VCE_CMD_TRAP                                   0x00000004
+#define VCE_CMD_IB_AUTO                                        0x00000005
+#define VCE_CMD_SEMAPHORE                              0x00000006
+
+
+//#dce stupp
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define SI_CRTC0_REGISTER_OFFSET                0 //(0x6df0 - 0x6df0)/4
+#define SI_CRTC1_REGISTER_OFFSET                0x300 //(0x79f0 - 0x6df0)/4
+#define SI_CRTC2_REGISTER_OFFSET                0x2600 //(0x105f0 - 0x6df0)/4
+#define SI_CRTC3_REGISTER_OFFSET                0x2900 //(0x111f0 - 0x6df0)/4
+#define SI_CRTC4_REGISTER_OFFSET                0x2c00 //(0x11df0 - 0x6df0)/4
+#define SI_CRTC5_REGISTER_OFFSET                0x2f00 //(0x129f0 - 0x6df0)/4
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+#define AMDGPU_MM_INDEX                        0x0000
+#define AMDGPU_MM_DATA                 0x0001
+
+#define VERDE_NUM_CRTC 6
+#define        BLACKOUT_MODE_MASK                      0x00000007
+#define        VGA_RENDER_CONTROL                      0xC0
+#define R_000300_VGA_RENDER_CONTROL             0xC0
+#define C_000300_VGA_VSTATUS_CNTL               0xFFFCFFFF
+#define EVERGREEN_CRTC_STATUS                   0x1BA3
+#define EVERGREEN_CRTC_V_BLANK                  (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION          0x1BA4
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x1b8d
+#define EVERGREEN_CRTC_CONTROL                          0x1b9c
+#define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x1b9d
+#define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
+#define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
+#define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x1ba8
+#define EVERGREEN_CRTC_UPDATE_LOCK                      0x1bb5
+#define EVERGREEN_MASTER_UPDATE_LOCK                    0x1bbd
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x1bbe
+#define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x1a08
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS          0x1a04
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS        0x1a05
+#define EVERGREEN_GRPH_UPDATE                           0x1a11
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0xc4
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0xc9
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+
+#define EVERGREEN_DATA_FORMAT                           0x1ac0
+#       define EVERGREEN_INTERLEAVE_EN                  (1 << 0)
+
+#define MC_SHARED_CHMAP__NOOFCHAN_MASK 0xf000
+#define MC_SHARED_CHMAP__NOOFCHAN__SHIFT 0xc
+
+#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL            (0 << 20)
+#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED            (1 << 20)
+#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1            (2 << 20)
+#define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1            (4 << 20)
+
+#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x1a45
+#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x1845
+
+#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x1847
+#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x1a47
+
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK 0x8
+
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK 0x4
+
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x20000
+
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x1
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x100
+
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK 0x1
+
+#define R600_D1GRPH_SWAP_CONTROL                               0x1843
+#define R600_D1GRPH_SWAP_ENDIAN_NONE                    (0 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_16BIT                   (1 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_32BIT                   (2 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_64BIT                   (3 << 0)
+
+#define AVIVO_D1VGA_CONTROL                                    0x00cc
+#       define AVIVO_DVGA_CONTROL_MODE_ENABLE            (1 << 0)
+#       define AVIVO_DVGA_CONTROL_TIMING_SELECT          (1 << 8)
+#       define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT   (1 << 9)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN      (1 << 16)
+#       define AVIVO_DVGA_CONTROL_ROTATE                 (1 << 24)
+#define AVIVO_D2VGA_CONTROL                                    0x00ce
+
+#define R600_BUS_CNTL                                           0x1508
+#       define R600_BIOS_ROM_DIS                                (1 << 1)
+
+#define R600_ROM_CNTL                              0x580
+#       define R600_SCK_OVERWRITE                  (1 << 1)
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK  (0xf << 28)
+
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x1
+
+#define FMT_BIT_DEPTH_CONTROL                0x1bf2
+#define FMT_TRUNCATE_EN               (1 << 0)
+#define FMT_TRUNCATE_DEPTH            (1 << 4)
+#define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#define FMT_SPATIAL_DITHER_DEPTH      (1 << 12)
+#define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#define FMT_TEMPORAL_DITHER_DEPTH     (1 << 20)
+#define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#define FMT_TEMPORAL_LEVEL            (1 << 24)
+#define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#define FMT_25FRC_SEL(x)              ((x) << 26)
+#define FMT_50FRC_SEL(x)              ((x) << 28)
+#define FMT_75FRC_SEL(x)              ((x) << 30)
+
+#define EVERGREEN_DC_LUT_CONTROL                        0x1a80
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE              0x1a81
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN             0x1a82
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED               0x1a83
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE              0x1a84
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN             0x1a85
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED               0x1a86
+#define EVERGREEN_DC_LUT_30_COLOR                       0x1a7c
+#define EVERGREEN_DC_LUT_RW_INDEX                       0x1a79
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK                  0x1a7e
+#define EVERGREEN_DC_LUT_RW_MODE                        0x1a78
+
+#define EVERGREEN_GRPH_ENABLE                           0x1a00
+#define EVERGREEN_GRPH_CONTROL                          0x1a01
+#define EVERGREEN_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#define EVERGREEN_GRPH_DEPTH_8BPP                0
+#define EVERGREEN_GRPH_DEPTH_16BPP               1
+#define EVERGREEN_GRPH_DEPTH_32BPP               2
+#define EVERGREEN_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#define EVERGREEN_ADDR_SURF_2_BANK               0
+#define EVERGREEN_ADDR_SURF_4_BANK               1
+#define EVERGREEN_ADDR_SURF_8_BANK               2
+#define EVERGREEN_ADDR_SURF_16_BANK              3
+#define EVERGREEN_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#define EVERGREEN_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_1         0
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_2         1
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_4         2
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_8         3
+#define EVERGREEN_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+
+#define EVERGREEN_GRPH_FORMAT_INDEXED            0
+#define EVERGREEN_GRPH_FORMAT_ARGB1555           0
+#define EVERGREEN_GRPH_FORMAT_ARGB565            1
+#define EVERGREEN_GRPH_FORMAT_ARGB4444           2
+#define EVERGREEN_GRPH_FORMAT_AI88               3
+#define EVERGREEN_GRPH_FORMAT_MONO16             4
+#define EVERGREEN_GRPH_FORMAT_BGRA5551           5
+
+/* 32 BPP */
+#define EVERGREEN_GRPH_FORMAT_ARGB8888           0
+#define EVERGREEN_GRPH_FORMAT_ARGB2101010        1
+#define EVERGREEN_GRPH_FORMAT_32BPP_DIG          2
+#define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010     3
+#define EVERGREEN_GRPH_FORMAT_BGRA1010102        4
+#define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102     5
+#define EVERGREEN_GRPH_FORMAT_RGB111110          6
+#define EVERGREEN_GRPH_FORMAT_BGR101111          7
+#define EVERGREEN_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1        0
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2        1
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4        2
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8        3
+#define EVERGREEN_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B       0
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B      1
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B      2
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B      3
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB       4
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB       5
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB       6
+#define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#define EVERGREEN_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL      0
+#define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
+#define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1      2
+#define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1      4
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+
+#define EVERGREEN_GRPH_SWAP_CONTROL                     0x1a03
+#define EVERGREEN_GRPH_ENDIAN_SWAP(x)            (((x) & 0x3) << 0)
+#       define EVERGREEN_GRPH_ENDIAN_NONE               0
+#       define EVERGREEN_GRPH_ENDIAN_8IN16              1
+#       define EVERGREEN_GRPH_ENDIAN_8IN32              2
+#       define EVERGREEN_GRPH_ENDIAN_8IN64              3
+
+#define EVERGREEN_D3VGA_CONTROL                         0xf8
+#define EVERGREEN_D4VGA_CONTROL                         0xf9
+#define EVERGREEN_D5VGA_CONTROL                         0xfa
+#define EVERGREEN_D6VGA_CONTROL                         0xfb
+
+#define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK      0xffffff00
+
+#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL         0x1a02
+#define EVERGREEN_LUT_10BIT_BYPASS_EN            (1 << 8)
+
+#define EVERGREEN_GRPH_PITCH                            0x1a06
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x1a08
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X                 0x1a09
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y                 0x1a0a
+#define EVERGREEN_GRPH_X_START                          0x1a0b
+#define EVERGREEN_GRPH_Y_START                          0x1a0c
+#define EVERGREEN_GRPH_X_END                            0x1a0d
+#define EVERGREEN_GRPH_Y_END                            0x1a0e
+#define EVERGREEN_GRPH_UPDATE                           0x1a11
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+#define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL                     0x1a12
+#define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
+
+#define EVERGREEN_VIEWPORT_START                        0x1b5c
+#define EVERGREEN_VIEWPORT_SIZE                         0x1b5d
+#define EVERGREEN_DESKTOP_HEIGHT                        0x1ac1
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL                           0x1a66
+#       define EVERGREEN_CURSOR_EN                      (1 << 0)
+#       define EVERGREEN_CURSOR_MODE(x)                 (((x) & 0x3) << 8)
+#       define EVERGREEN_CURSOR_MONO                    0
+#       define EVERGREEN_CURSOR_24_1                    1
+#       define EVERGREEN_CURSOR_24_8_PRE_MULT           2
+#       define EVERGREEN_CURSOR_24_8_UNPRE_MULT         3
+#       define EVERGREEN_CURSOR_2X_MAGNIFY              (1 << 16)
+#       define EVERGREEN_CURSOR_FORCE_MC_ON             (1 << 20)
+#       define EVERGREEN_CURSOR_URGENT_CONTROL(x)       (((x) & 0x7) << 24)
+#       define EVERGREEN_CURSOR_URGENT_ALWAYS           0
+#       define EVERGREEN_CURSOR_URGENT_1_8              1
+#       define EVERGREEN_CURSOR_URGENT_1_4              2
+#       define EVERGREEN_CURSOR_URGENT_3_8              3
+#       define EVERGREEN_CURSOR_URGENT_1_2              4
+#define EVERGREEN_CUR_SURFACE_ADDRESS                   0x1a67
+#       define EVERGREEN_CUR_SURFACE_ADDRESS_MASK       0xfffff000
+#define EVERGREEN_CUR_SIZE                              0x1a68
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH              0x1a69
+#define EVERGREEN_CUR_POSITION                          0x1a6a
+#define EVERGREEN_CUR_HOT_SPOT                          0x1a6b
+#define EVERGREEN_CUR_COLOR1                            0x1a6c
+#define EVERGREEN_CUR_COLOR2                            0x1a6d
+#define EVERGREEN_CUR_UPDATE                            0x1a6e
+#       define EVERGREEN_CURSOR_UPDATE_PENDING          (1 << 0)
+#       define EVERGREEN_CURSOR_UPDATE_TAKEN            (1 << 1)
+#       define EVERGREEN_CURSOR_UPDATE_LOCK             (1 << 16)
+#       define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+
+#define NI_INPUT_CSC_CONTROL                           0x1a35
+#       define NI_INPUT_CSC_GRPH_MODE(x)               (((x) & 0x3) << 0)
+#       define NI_INPUT_CSC_BYPASS                     0
+#       define NI_INPUT_CSC_PROG_COEFF                 1
+#       define NI_INPUT_CSC_PROG_SHARED_MATRIXA        2
+#       define NI_INPUT_CSC_OVL_MODE(x)                (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL                          0x1a3c
+#       define NI_OUTPUT_CSC_GRPH_MODE(x)              (((x) & 0x7) << 0)
+#       define NI_OUTPUT_CSC_BYPASS                    0
+#       define NI_OUTPUT_CSC_TV_RGB                    1
+#       define NI_OUTPUT_CSC_YCBCR_601                 2
+#       define NI_OUTPUT_CSC_YCBCR_709                 3
+#       define NI_OUTPUT_CSC_PROG_COEFF                4
+#       define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB       5
+#       define NI_OUTPUT_CSC_OVL_MODE(x)               (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL                             0x1a58
+#       define NI_GRPH_DEGAMMA_MODE(x)                 (((x) & 0x3) << 0)
+#       define NI_DEGAMMA_BYPASS                       0
+#       define NI_DEGAMMA_SRGB_24                      1
+#       define NI_DEGAMMA_XVYCC_222                    2
+#       define NI_OVL_DEGAMMA_MODE(x)                  (((x) & 0x3) << 4)
+#       define NI_ICON_DEGAMMA_MODE(x)                 (((x) & 0x3) << 8)
+#       define NI_CURSOR_DEGAMMA_MODE(x)               (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL                         0x1a59
+#       define NI_GRPH_GAMUT_REMAP_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_GAMUT_REMAP_BYPASS                   0
+#       define NI_GAMUT_REMAP_PROG_COEFF               1
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA      2
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB      3
+#       define NI_OVL_GAMUT_REMAP_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL                             0x1aa0
+#       define NI_GRPH_REGAMMA_MODE(x)                 (((x) & 0x7) << 0)
+#       define NI_REGAMMA_BYPASS                       0
+#       define NI_REGAMMA_SRGB_24                      1
+#       define NI_REGAMMA_XVYCC_222                    2
+#       define NI_REGAMMA_PROG_A                       3
+#       define NI_REGAMMA_PROG_B                       4
+#       define NI_OVL_REGAMMA_MODE(x)                  (((x) & 0x7) << 4)
+
+
+#define NI_PRESCALE_GRPH_CONTROL                       0x1a2d
+#       define NI_GRPH_PRESCALE_BYPASS                 (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL                        0x1a31
+#       define NI_OVL_PRESCALE_BYPASS                  (1 << 4)
+
+#define NI_INPUT_GAMMA_CONTROL                         0x1a10
+#       define NI_GRPH_INPUT_GAMMA_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_INPUT_GAMMA_USE_LUT                  0
+#       define NI_INPUT_GAMMA_BYPASS                   1
+#       define NI_INPUT_GAMMA_SRGB_24                  2
+#       define NI_INPUT_GAMMA_XVYCC_222                3
+#       define NI_OVL_INPUT_GAMMA_MODE(x)              (((x) & 0x3) << 4)
+
+#define IH_RB_WPTR__RB_OVERFLOW_MASK   0x1
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000
+#define SRBM_STATUS__IH_BUSY_MASK      0x20000
+#define SRBM_SOFT_RESET__SOFT_RESET_IH_MASK    0x400
+
+#define        BLACKOUT_MODE_MASK                      0x00000007
+#define        VGA_RENDER_CONTROL                      0xC0
+#define R_000300_VGA_RENDER_CONTROL             0xC0
+#define C_000300_VGA_VSTATUS_CNTL               0xFFFCFFFF
+#define EVERGREEN_CRTC_STATUS                   0x1BA3
+#define EVERGREEN_CRTC_V_BLANK                  (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION          0x1BA4
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x1b8d
+#define EVERGREEN_CRTC_CONTROL                          0x1b9c
+#       define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x1b9d
+#       define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
+#       define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
+#define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x1ba8
+#define EVERGREEN_CRTC_UPDATE_LOCK                      0x1bb5
+#define EVERGREEN_MASTER_UPDATE_LOCK                    0x1bbd
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x1bbe
+#define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x1a08
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS          0x1a04
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS        0x1a05
+#define EVERGREEN_GRPH_UPDATE                           0x1a11
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0xc4
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0xc9
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+
+#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10
+#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80
+#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x400
+#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x2000
+#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xd
+#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10000
+#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80000
+#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x13
+
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID_MASK 0x1e000000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID__SHIFT 0x19
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS_MASK 0xff
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS__SHIFT 0x0
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID_MASK 0xff000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID__SHIFT 0xc
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW_MASK 0x1000000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW__SHIFT 0x18
+
+#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE_MASK 0x7
+#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE__SHIFT 0x0
+
+#define mmBIF_FB_EN__xxFB_READ_EN_MASK 0x1
+#define mmBIF_FB_EN__xxFB_READ_EN__SHIFT 0x0
+#define mmBIF_FB_EN__xxFB_WRITE_EN_MASK 0x2
+#define mmBIF_FB_EN__xxFB_WRITE_EN__SHIFT 0x1
+
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC_MASK 0x20000
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC__SHIFT 0x11
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb
+
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x3
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x6
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x200
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x1000
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xc
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8000
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40000
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x12
+
+#define MC_SEQ_MISC0__MT__MASK 0xf0000000
+#define MC_SEQ_MISC0__MT__GDDR1  0x10000000
+#define MC_SEQ_MISC0__MT__DDR2   0x20000000
+#define MC_SEQ_MISC0__MT__GDDR3  0x30000000
+#define MC_SEQ_MISC0__MT__GDDR4  0x40000000
+#define MC_SEQ_MISC0__MT__GDDR5  0x50000000
+#define MC_SEQ_MISC0__MT__HBM    0x60000000
+#define MC_SEQ_MISC0__MT__DDR3   0xB0000000
+
+#define SRBM_STATUS__MCB_BUSY_MASK 0x200
+#define SRBM_STATUS__MCB_BUSY__SHIFT 0x9
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK 0x400
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY__SHIFT 0xa
+#define SRBM_STATUS__MCC_BUSY_MASK 0x800
+#define SRBM_STATUS__MCC_BUSY__SHIFT 0xb
+#define SRBM_STATUS__MCD_BUSY_MASK 0x1000
+#define SRBM_STATUS__MCD_BUSY__SHIFT 0xc
+#define SRBM_STATUS__VMC_BUSY_MASK 0x100
+#define SRBM_STATUS__VMC_BUSY__SHIFT 0x8
+
+
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000
+#define CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK 0x4000000
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x800000
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x400000
+#define PACKET3_SEM_WAIT_ON_SIGNAL    (0x1 << 12)
+#define PACKET3_SEM_SEL_SIGNAL     (0x6 << 29)
+#define PACKET3_SEM_SEL_WAIT       (0x7 << 29)
+
+#define CONFIG_CNTL    0x1509
+#define CC_DRM_ID_STRAPS       0X1559
+#define AMDGPU_PCIE_INDEX      0xc
+#define AMDGPU_PCIE_DATA       0xd
+
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0x3411
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0x3412
+#define DMA_MODE                                          0x342f
+#define DMA_RB_RPTR_ADDR_HI                               0x3407
+#define DMA_RB_RPTR_ADDR_LO                               0x3408
+#define DMA_BUSY_MASK 0x20
+#define DMA1_BUSY_MASK 0X40
+#define SDMA_MAX_INSTANCE 2
+
+#define PCIE_BUS_CLK    10000
+#define TCLK            (PCIE_BUS_CLK / 10)
+#define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK              0xf0000000
+#define CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c
+#define        PCIE_PORT_INDEX                                 0xe
+#define        PCIE_PORT_DATA                                  0xf
+#define EVERGREEN_PIF_PHY0_INDEX                        0x8
+#define EVERGREEN_PIF_PHY0_DATA                         0xc
+#define EVERGREEN_PIF_PHY1_INDEX                        0x10
+#define EVERGREEN_PIF_PHY1_DATA                                0x14
+
+#define        MC_VM_FB_OFFSET                                 0x81a
+
+#endif
index 3493da5..4a4d379 100644 (file)
@@ -494,6 +494,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
   union
   {
     ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
     ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
   };
   UCHAR   ucRefDiv;                           //Output Parameter
@@ -526,6 +527,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
   union
   {
     ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
     ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
   };
   UCHAR   ucRefDiv;                           //Output Parameter
index b86aba9..6aa8938 100644 (file)
@@ -119,6 +119,8 @@ enum cgs_system_info_id {
        CGS_SYSTEM_INFO_PG_FLAGS,
        CGS_SYSTEM_INFO_GFX_CU_INFO,
        CGS_SYSTEM_INFO_GFX_SE_INFO,
+       CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
+       CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
        CGS_SYSTEM_INFO_ID_MAXIMUM,
 };
 
index 2de34a5..b1d1940 100644 (file)
@@ -538,7 +538,6 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
                ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
                break;
        case AMD_PP_EVENT_READJUST_POWER_STATE:
-               pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
                ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
                break;
        default:
@@ -765,15 +764,12 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
        PP_CHECK_HW(hwmgr);
 
        if (!hwmgr->hardcode_pp_table) {
-               hwmgr->hardcode_pp_table =
-                               kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+               hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
+                                                  hwmgr->soft_pp_table_size,
+                                                  GFP_KERNEL);
 
                if (!hwmgr->hardcode_pp_table)
                        return -ENOMEM;
-
-               /* to avoid powerplay crash when hardcode pptable is empty */
-               memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
-                               hwmgr->soft_pp_table_size);
        }
 
        memcpy(hwmgr->hardcode_pp_table, buf, size);
index a46225c..1d1875a 100644 (file)
@@ -70,11 +70,12 @@ int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id)
        int i;
 
        table_entries = hwmgr->num_ps;
+
        state = hwmgr->ps;
 
        for (i = 0; i < table_entries; i++) {
                if (state->id == *state_id) {
-                       hwmgr->request_ps = state;
+                       memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
                        return 0;
                }
                state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
@@ -106,7 +107,7 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
        if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
                phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
                phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
-               hwmgr->current_ps = requested;
+               memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
        }
        return 0;
 }
index abbcbc9..6e359c9 100644 (file)
@@ -5,7 +5,7 @@
 HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
               hardwaremanager.o pp_acpi.o cz_hwmgr.o \
                cz_clockpowergating.o tonga_powertune.o\
-              tonga_processpptables.o ppatomctrl.o \
+              process_pptables_v1_0.o ppatomctrl.o \
                tonga_hwmgr.o pppcielanes.o  tonga_thermal.o\
                fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
                fiji_clockpowergating.o fiji_thermal.o \
index 9368e21..74300d6 100644 (file)
@@ -44,8 +44,8 @@
 #include "dce/dce_10_0_sh_mask.h"
 #include "pppcielanes.h"
 #include "fiji_hwmgr.h"
-#include "tonga_processpptables.h"
-#include "tonga_pptable.h"
+#include "process_pptables_v1_0.h"
+#include "pptable_v1_0.h"
 #include "pp_debug.h"
 #include "pp_acpi.h"
 #include "amd_pcie_helpers.h"
@@ -112,7 +112,7 @@ static const uint8_t fiji_clock_stretch_amount_conversion[2][6] =
 
 static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
 
-struct fiji_power_state *cast_phw_fiji_power_state(
+static struct fiji_power_state *cast_phw_fiji_power_state(
                                  struct pp_hw_power_state *hw_ps)
 {
        PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
@@ -122,7 +122,8 @@ struct fiji_power_state *cast_phw_fiji_power_state(
        return (struct fiji_power_state *)hw_ps;
 }
 
-const struct fiji_power_state *cast_const_phw_fiji_power_state(
+static const struct
+fiji_power_state *cast_const_phw_fiji_power_state(
                                 const struct pp_hw_power_state *hw_ps)
 {
        PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
@@ -1626,7 +1627,7 @@ static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
  * @param voltage - voltage to look for
  * @return 0 on success
  */
-uint8_t fiji_get_voltage_index(
+static uint8_t fiji_get_voltage_index(
                struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
 {
        uint8_t count = (uint8_t) (lookup_table->count);
@@ -1690,7 +1691,7 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
 * @return   always  0
 */
 
-int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
                struct SMU73_Discrete_DpmTable *table)
 {
        int result;
@@ -2301,7 +2302,7 @@ static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
 * @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
 * @param    voltage     the SMC VOLTAGE structure to be populated
 */
-int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
                uint32_t mclk, SMIO_Pattern *smio_pat)
 {
        const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -4005,7 +4006,7 @@ static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr,
 
        ps = (struct fiji_power_state *)(&state->hardware);
 
-       result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
+       result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
                        fiji_get_pp_table_entry_callback_func);
 
        /* This is the earliest time we have all the dependency table and the VBIOS boot state
@@ -4622,7 +4623,7 @@ static int fiji_generate_dpm_level_enable_mask(
        return 0;
 }
 
-int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+static int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
        return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
                                  (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
@@ -4636,14 +4637,14 @@ int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
                        PPSMC_MSG_VCEDPM_Disable);
 }
 
-int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
+static int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
        return smum_send_msg_to_smc(hwmgr->smumgr, enable?
                        PPSMC_MSG_SAMUDPM_Enable :
                        PPSMC_MSG_SAMUDPM_Disable);
 }
 
-int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
+static int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
        return smum_send_msg_to_smc(hwmgr->smumgr, enable?
                        PPSMC_MSG_ACPDPM_Enable :
@@ -4880,7 +4881,7 @@ static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
        return;
 }
 
-int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
+static int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
 {
        int result;
        struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -5156,7 +5157,7 @@ static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+static int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 {
        return fiji_program_display_gap(hwmgr);
 }
@@ -5187,7 +5188,7 @@ static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr,
                        PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
 }
 
-int fiji_dpm_set_interrupt_state(void *private_data,
+static int fiji_dpm_set_interrupt_state(void *private_data,
                                         unsigned src_id, unsigned type,
                                         int enabled)
 {
@@ -5235,7 +5236,7 @@ int fiji_dpm_set_interrupt_state(void *private_data,
        return 0;
 }
 
-int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+static int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
                                        const void *thermal_interrupt_info)
 {
        int result;
@@ -5405,7 +5406,10 @@ static inline bool fiji_are_power_levels_equal(const struct fiji_performance_lev
                  (pl1->pcie_lane == pl2->pcie_lane));
 }
 
-int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+static int
+fiji_check_states_equal(struct pp_hwmgr *hwmgr,
+               const struct pp_hw_power_state *pstate1,
+               const struct pp_hw_power_state *pstate2, bool *equal)
 {
        const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
        const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
@@ -5437,7 +5441,8 @@ int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_sta
        return 0;
 }
 
-bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+static bool
+fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
 {
        struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
        bool is_update_required = false;
@@ -5547,7 +5552,7 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
        .dynamic_state_management_enable = &fiji_enable_dpm_tasks,
        .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
        .force_dpm_level = &fiji_dpm_force_dpm_level,
-       .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
+       .get_num_of_pp_table_entries = &get_number_of_powerplay_table_entries_v1_0,
        .get_power_state_size = &fiji_get_power_state_size,
        .get_pp_table_entry = &fiji_get_pp_table_entry,
        .patch_boot_state = &fiji_patch_boot_state,
@@ -5589,7 +5594,7 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
 int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
        hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
-       hwmgr->pptable_func = &tonga_pptable_funcs;
+       hwmgr->pptable_func = &pptable_v1_0_funcs;
        pp_fiji_thermal_initialize(hwmgr);
        return 0;
 }
index 92976b6..7f431e7 100644 (file)
@@ -152,7 +152,7 @@ int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+static int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
 {
        int result;
 
@@ -421,7 +421,7 @@ int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
 * @param    Result the last failure code
 * @return   result from set temperature range routine
 */
-int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+static int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
                void *input, void *output, void *storage, int result)
 {
        struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -533,7 +533,7 @@ int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
 * @param    Result the last failure code
 * @return   result from set temperature range routine
 */
-int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
+static int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
                void *input, void *output, void *storage, int result)
 {
 /* If the fantable setup has failed we could have disabled
index 789f98a..14f8c1f 100644 (file)
@@ -24,8 +24,6 @@
 #include "hwmgr.h"
 #include "hardwaremanager.h"
 #include "power_state.h"
-#include "pp_acpi.h"
-#include "amd_acpi.h"
 #include "pp_debug.h"
 
 #define PHM_FUNC_CHECK(hw) \
                        return -EINVAL;                         \
        } while (0)
 
-void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
-{
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
-
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
-
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
-
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
-
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
-
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
-
-       if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
-               acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
-}
-
 bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr)
 {
        return hwmgr->block_hw_access;
index d829076..524d0dd 100644 (file)
@@ -32,8 +32,8 @@
 #include "pp_debug.h"
 #include "ppatomctrl.h"
 #include "ppsmc.h"
-
-#define VOLTAGE_SCALE               4
+#include "pp_acpi.h"
+#include "amd_acpi.h"
 
 extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
 extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
@@ -41,23 +41,12 @@ extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
 extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
 extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr);
 
-static int hwmgr_set_features_platform_caps(struct pp_hwmgr *hwmgr)
-{
-       if (amdgpu_sclk_deep_sleep_en)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkDeepSleep);
-       else
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SclkDeepSleep);
-
-       if (amdgpu_powercontainment)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                           PHM_PlatformCaps_PowerContainment);
-       else
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                           PHM_PlatformCaps_PowerContainment);
+static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
+static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
 
-       return 0;
+uint8_t convert_to_vid(uint16_t vddc)
+{
+       return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
 }
 
 int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
@@ -76,13 +65,12 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
        hwmgr->device = pp_init->device;
        hwmgr->chip_family = pp_init->chip_family;
        hwmgr->chip_id = pp_init->chip_id;
-       hwmgr->hw_revision = pp_init->rev_id;
-       hwmgr->sub_sys_id = pp_init->sub_sys_id;
-       hwmgr->sub_vendor_id = pp_init->sub_vendor_id;
        hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
        hwmgr->power_source = PP_PowerSource_AC;
+       hwmgr->pp_table_version = PP_TABLE_V1;
 
-       hwmgr_set_features_platform_caps(hwmgr);
+       hwmgr_init_default_caps(hwmgr);
+       hwmgr_set_user_specify_caps(hwmgr);
 
        switch (hwmgr->chip_family) {
        case AMDGPU_FAMILY_CZ:
@@ -111,8 +99,6 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
                return -EINVAL;
        }
 
-       phm_init_dynamic_caps(hwmgr);
-
        return 0;
 }
 
@@ -131,6 +117,8 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
        kfree(hwmgr->set_temperature_range.function_list);
 
        kfree(hwmgr->ps);
+       kfree(hwmgr->current_ps);
+       kfree(hwmgr->request_ps);
        kfree(hwmgr);
        return 0;
 }
@@ -155,10 +143,17 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
                                          sizeof(struct pp_power_state);
 
        hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
-
        if (hwmgr->ps == NULL)
                return -ENOMEM;
 
+       hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
+       if (hwmgr->request_ps == NULL)
+               return -ENOMEM;
+
+       hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
+       if (hwmgr->current_ps == NULL)
+               return -ENOMEM;
+
        state = hwmgr->ps;
 
        for (i = 0; i < table_entries; i++) {
@@ -166,7 +161,8 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
 
                if (state->classification.flags & PP_StateClassificationFlag_Boot) {
                        hwmgr->boot_ps = state;
-                       hwmgr->current_ps = hwmgr->request_ps = state;
+                       memcpy(hwmgr->current_ps, state, size);
+                       memcpy(hwmgr->request_ps, state, size);
                }
 
                state->id = i + 1; /* assigned unique num for every power state id */
@@ -176,6 +172,7 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
                state = (struct pp_power_state *)((unsigned long)state + size);
        }
 
+
        return 0;
 }
 
@@ -209,8 +206,6 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
 }
 
 
-
-
 /**
  * Returns once the part of the register indicated by the mask has
  * reached the given value.The indirect space is described by giving
@@ -452,6 +447,27 @@ uint8_t phm_get_voltage_index(
        return i - 1;
 }
 
+uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
+               uint32_t voltage)
+{
+       uint8_t count = (uint8_t) (voltage_table->count);
+       uint8_t i = 0;
+
+       PP_ASSERT_WITH_CODE((NULL != voltage_table),
+               "Voltage Table empty.", return 0;);
+       PP_ASSERT_WITH_CODE((0 != count),
+               "Voltage Table empty.", return 0;);
+
+       for (i = 0; i < count; i++) {
+               /* find first voltage bigger than requested */
+               if (voltage_table->entries[i].value >= voltage)
+                       return i;
+       }
+
+       /* voltage is bigger than max voltage in the table */
+       return i - 1;
+}
+
 uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
 {
        uint32_t  i;
@@ -539,7 +555,8 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
                table_clk_vlt->entries[2].v = 810;
                table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
                table_clk_vlt->entries[3].v = 900;
-               pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
+               if (pptable_info != NULL)
+                       pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
                hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
        }
 
@@ -605,3 +622,94 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
        printk(KERN_ERR "DAL requested level can not"
                        " found a available voltage in VDDC DPM Table \n");
 }
+
+void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
+{
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
+
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
+
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
+
+       phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
+
+       if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
+               acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+               PHM_PlatformCaps_DynamicPatchPowerState);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+               PHM_PlatformCaps_EnableSMU7ThermalManagement);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_DynamicPowerManagement);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_SMC);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_DynamicUVDState);
+
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                               PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+       return;
+}
+
+int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
+{
+       if (amdgpu_sclk_deep_sleep_en)
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep);
+       else
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep);
+
+       if (amdgpu_powercontainment)
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                           PHM_PlatformCaps_PowerContainment);
+       else
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                           PHM_PlatformCaps_PowerContainment);
+
+       hwmgr->feature_mask = amdgpu_pp_feature_mask;
+
+       return 0;
+}
+
+int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+                               uint32_t sclk, uint16_t id, uint16_t *voltage)
+{
+       uint32_t vol;
+       int ret = 0;
+
+       if (hwmgr->chip_id < CHIP_POLARIS10) {
+               atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+               if (*voltage >= 2000 || *voltage == 0)
+                       *voltage = 1150;
+       } else {
+               ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
+               *voltage = (uint16_t)vol/100;
+       }
+       return ret;
+}
+
index 8a7ada5..5abe433 100644 (file)
@@ -781,7 +781,7 @@ static int iceland_upload_firmware(struct pp_hwmgr *hwmgr)
  * @param    hwmgr  the address of the powerplay hardware manager.
  * @return   always 0
  */
-int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
 {
        iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
 
@@ -1355,14 +1355,6 @@ static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
-/**
- * Convert a voltage value in mv unit to VID number required by SMU firmware
- */
-static uint8_t convert_to_vid(uint16_t vddc)
-{
-       return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
 int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
 {
        int i;
@@ -2606,7 +2598,7 @@ static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
  * @param    pInput  the pointer to input data (PowerState)
  * @return   always 0
  */
-int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
 {
        int result;
        iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
@@ -4629,7 +4621,7 @@ static int iceland_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
        return 0;
 }
 
-int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
 {
        iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
 
index b5edb51..7e405b0 100644 (file)
@@ -31,7 +31,7 @@ int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
+static int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
 {
        if (phm_cf_want_uvd_power_gating(hwmgr)) {
                if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -47,7 +47,7 @@ int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
+static int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
 {
        if (phm_cf_want_vce_power_gating(hwmgr))
                return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -55,7 +55,7 @@ int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
+static int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
 {
        if (phm_cf_want_vce_power_gating(hwmgr))
                return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -63,7 +63,7 @@ int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
+static int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
 {
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_SamuPowerGating))
@@ -72,7 +72,7 @@ int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
+static int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
 {
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_SamuPowerGating))
index b691322..191ed50 100644 (file)
 #include "pp_debug.h"
 #include "ppatomctrl.h"
 #include "atombios.h"
-#include "tonga_pptable.h"
+#include "pptable_v1_0.h"
 #include "pppcielanes.h"
 #include "amd_pcie_helpers.h"
 #include "hardwaremanager.h"
-#include "tonga_processpptables.h"
+#include "process_pptables_v1_0.h"
 #include "cgs_common.h"
 #include "smu74.h"
 #include "smu_ucode_xfer_vi.h"
@@ -108,7 +108,7 @@ enum DPM_EVENT_SRC {
 
 static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic);
 
-struct polaris10_power_state *cast_phw_polaris10_power_state(
+static struct polaris10_power_state *cast_phw_polaris10_power_state(
                                  struct pp_hw_power_state *hw_ps)
 {
        PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
@@ -118,7 +118,8 @@ struct polaris10_power_state *cast_phw_polaris10_power_state(
        return (struct polaris10_power_state *)hw_ps;
 }
 
-const struct polaris10_power_state *cast_const_phw_polaris10_power_state(
+static const struct polaris10_power_state *
+cast_const_phw_polaris10_power_state(
                                 const struct pp_hw_power_state *hw_ps)
 {
        PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
@@ -141,7 +142,7 @@ static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
  * @param    hwmgr  the address of the powerplay hardware manager.
  * @return   always 0
  */
-int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+static int phm_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
 {
        cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
 
@@ -150,7 +151,7 @@ int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+static uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
 {
        uint32_t speedCntl = 0;
 
@@ -161,7 +162,7 @@ uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
                        PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
 }
 
-int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+static int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
 {
        uint32_t link_width;
 
@@ -181,7 +182,7 @@ int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
 * @param    pHwMgr  the address of the powerplay hardware manager.
 * @return   always PP_Result_OK
 */
-int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+static int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
 {
        PP_ASSERT_WITH_CODE(
                (hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0),
@@ -661,7 +662,7 @@ static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
  * on the power policy or external client requests,
  * such as UVD request, etc.
  */
-int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+static int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
        struct phm_ppt_v1_information *table_info =
@@ -735,11 +736,6 @@ int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-uint8_t convert_to_vid(uint16_t vddc)
-{
-       return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
 /**
  * Mvdd table preparation for SMC.
  *
@@ -840,7 +836,7 @@ static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
 * @return   always  0
 */
 
-int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
                struct SMU74_Discrete_DpmTable *table)
 {
        polaris10_populate_smc_vddci_table(hwmgr, table);
@@ -1417,7 +1413,7 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
 * @param    mclk        the MCLK value to be used in the decision if MVDD should be high or low.
 * @param    voltage     the SMC VOLTAGE structure to be populated
 */
-int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
                uint32_t mclk, SMIO_Pattern *smio_pat)
 {
        const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -1931,7 +1927,7 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
 }
 
 
-int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
        SMU74_Discrete_DpmTable  *table = &(data->smc_state_table);
@@ -2560,7 +2556,7 @@ static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
        return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
 }
 
-int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
+static int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
        data->pcie_performance_request = true;
@@ -2568,7 +2564,7 @@ int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+static int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
        int tmp_result, result = 0;
        tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
@@ -2749,12 +2745,12 @@ int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+static int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 {
        return phm_hwmgr_backend_fini(hwmgr);
 }
 
-int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+static int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
 
@@ -3109,7 +3105,7 @@ static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+static int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
 {
        struct phm_ppt_v1_information *table_info =
                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -3118,11 +3114,27 @@ int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
        struct phm_ppt_v1_voltage_lookup_table *lookup_table =
                        table_info->vddc_lookup_table;
        uint32_t i;
+       uint32_t hw_revision, sub_vendor_id, sub_sys_id;
+       struct cgs_system_info sys_info = {0};
+
+       sys_info.size = sizeof(struct cgs_system_info);
 
-       if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7 &&
-                       ((hwmgr->sub_sys_id == 0xb37 && hwmgr->sub_vendor_id == 0x1002) ||
-                   (hwmgr->sub_sys_id == 0x4a8 && hwmgr->sub_vendor_id == 0x1043) ||
-                   (hwmgr->sub_sys_id == 0x9480 && hwmgr->sub_vendor_id == 0x1682))) {
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       hw_revision = (uint32_t)sys_info.value;
+
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       sub_sys_id = (uint32_t)sys_info.value;
+
+       sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
+       cgs_query_system_info(hwmgr->device, &sys_info);
+       sub_vendor_id = (uint32_t)sys_info.value;
+
+       if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
+                       ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
+                   (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
+                   (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
                if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
                        return 0;
 
@@ -3137,7 +3149,7 @@ int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
 }
 
 
-int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+static int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data;
        struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
@@ -3880,7 +3892,7 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
 
        ps = (struct polaris10_power_state *)(&state->hardware);
 
-       result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
+       result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
                        polaris10_get_pp_table_entry_callback_func);
 
        /* This is the earliest time we have all the dependency table and the VBIOS boot state
@@ -4347,7 +4359,8 @@ static int polaris10_generate_dpm_level_enable_mask(
        return 0;
 }
 
-int polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+static int
+polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
        return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
                        PPSMC_MSG_UVDDPM_Enable :
@@ -4361,7 +4374,8 @@ int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
                        PPSMC_MSG_VCEDPM_Disable);
 }
 
-int polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
+static int
+polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
 {
        return smum_send_msg_to_smc(hwmgr->smumgr, enable?
                        PPSMC_MSG_SAMUDPM_Enable :
@@ -4675,14 +4689,16 @@ static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_
 }
 
 
-int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+static int
+polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
 {
        PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
 
        return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
 }
 
-int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+static int
+polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
 {
        uint32_t num_active_displays = 0;
        struct cgs_display_info info = {0};
@@ -4705,7 +4721,7 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm
 * @param    hwmgr  the address of the powerplay hardware manager.
 * @return   always OK
 */
-int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
+static int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
        uint32_t num_active_displays = 0;
@@ -4750,7 +4766,7 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
 }
 
 
-int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+static int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 {
        return polaris10_program_display_gap(hwmgr);
 }
@@ -4774,13 +4790,15 @@ static int polaris10_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_
                        PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
 }
 
-int polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+static int
+polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
                                        const void *thermal_interrupt_info)
 {
        return 0;
 }
 
-bool polaris10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+static bool polaris10_check_smc_update_required_for_display_configuration(
+               struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
        bool is_update_required = false;
@@ -4810,7 +4828,9 @@ static inline bool polaris10_are_power_levels_equal(const struct polaris10_perfo
                  (pl1->pcie_lane == pl2->pcie_lane));
 }
 
-int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+static int polaris10_check_states_equal(struct pp_hwmgr *hwmgr,
+               const struct pp_hw_power_state *pstate1,
+               const struct pp_hw_power_state *pstate2, bool *equal)
 {
        const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1);
        const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2);
@@ -4841,7 +4861,7 @@ int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_powe
        return 0;
 }
 
-int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+static int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
 
@@ -4954,7 +4974,7 @@ static int polaris10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
+static int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
 {
        int tmp_result, result = 0;
 
@@ -5225,7 +5245,7 @@ static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
        .get_sclk = polaris10_dpm_get_sclk,
        .patch_boot_state = polaris10_dpm_patch_boot_state,
        .get_pp_table_entry = polaris10_get_pp_table_entry,
-       .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
+       .get_num_of_pp_table_entries = get_number_of_powerplay_table_entries_v1_0,
        .print_current_perforce_level = polaris10_print_current_perforce_level,
        .powerdown_uvd = polaris10_phm_powerdown_uvd,
        .powergate_uvd = polaris10_phm_powergate_uvd,
@@ -5262,7 +5282,7 @@ static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
 int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
        hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
-       hwmgr->pptable_func = &tonga_pptable_funcs;
+       hwmgr->pptable_func = &pptable_v1_0_funcs;
        pp_polaris10_thermal_initialize(hwmgr);
 
        return 0;
index 33c3394..378ab34 100644 (file)
@@ -30,6 +30,7 @@
 #include "ppatomctrl.h"
 #include "polaris10_ppsmc.h"
 #include "polaris10_powertune.h"
+#include "polaris10_smumgr.h"
 
 #define POLARIS10_MAX_HARDWARE_POWERLEVELS     2
 
@@ -165,10 +166,6 @@ struct polaris10_pcie_perf_range {
        uint16_t  max;
        uint16_t  min;
 };
-struct polaris10_range_table {
-       uint32_t trans_lower_frequency; /* in 10khz */
-       uint32_t trans_upper_frequency;
-};
 
 struct polaris10_hwmgr {
        struct polaris10_dpm_table                      dpm_table;
index bc78e28..329119d 100644 (file)
@@ -66,19 +66,6 @@ struct polaris10_pt_config_reg {
        enum polaris10_pt_config_reg_type       type;
 };
 
-struct polaris10_pt_defaults {
-       uint8_t   SviLoadLineEn;
-       uint8_t   SviLoadLineVddC;
-       uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
-       uint8_t   TDC_MAWt;
-       uint8_t   TdcWaterfallCtl;
-       uint8_t   DTEAmbientTempBase;
-
-       uint32_t  DisplayCac;
-       uint32_t  BAPM_TEMP_GRADIENT;
-       uint16_t  BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
-       uint16_t  BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
-};
 
 void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
 int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
index b206632..41f835a 100644 (file)
@@ -152,7 +152,7 @@ int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+static int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
 {
        int result;
 
@@ -425,7 +425,7 @@ int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
 * @param    Result the last failure code
 * @return   result from set temperature range routine
 */
-int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+static int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
                void *input, void *output, void *storage, int result)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -537,7 +537,7 @@ int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
 * @param    Result the last failure code
 * @return   result from set temperature range routine
 */
-int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
+static int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
                void *input, void *output, void *storage, int result)
 {
 /* If the fantable setup has failed we could have disabled
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
new file mode 100644 (file)
index 0000000..1e870f5
--- /dev/null
@@ -0,0 +1,436 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef TONGA_PPTABLE_H
+#define TONGA_PPTABLE_H
+
+/** \file
+ * This is a PowerPlay table header file
+ */
+#pragma pack(push, 1)
+
+#include "hwmgr.h"
+
+#define ATOM_TONGA_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_TONGA_PP_FANPARAMETERS_NOFAN                                 0x80    /* No fan is connected to this controller. */
+
+#define ATOM_TONGA_PP_THERMALCONTROLLER_NONE      0
+#define ATOM_TONGA_PP_THERMALCONTROLLER_LM96163   17
+#define ATOM_TONGA_PP_THERMALCONTROLLER_TONGA     21
+#define ATOM_TONGA_PP_THERMALCONTROLLER_FIJI      22
+
+/*
+ * Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+ * We probably should reserve the bit 0x80 for this use.
+ * To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+ * The driver can pick the correct internal controller based on the ASIC.
+ */
+
+#define ATOM_TONGA_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    /* ADT7473 Fan Control + Internal Thermal Controller */
+#define ATOM_TONGA_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    /* EMC2103 Fan Control + Internal Thermal Controller */
+
+/*/* ATOM_TONGA_POWERPLAYTABLE::ulPlatformCaps */
+#define ATOM_TONGA_PP_PLATFORM_CAP_VDDGFX_CONTROL              0x1            /* This cap indicates whether vddgfx will be a separated power rail. */
+#define ATOM_TONGA_PP_PLATFORM_CAP_POWERPLAY                   0x2            /* This cap indicates whether this is a mobile part and CCC need to show Powerplay page. */
+#define ATOM_TONGA_PP_PLATFORM_CAP_SBIOSPOWERSOURCE            0x4            /* This cap indicates whether power source notificaiton is done by SBIOS directly. */
+#define ATOM_TONGA_PP_PLATFORM_CAP_DISABLE_VOLTAGE_ISLAND      0x8            /* Enable the option to overwrite voltage island feature to be disabled, regardless of VddGfx power rail support. */
+#define ____RETIRE16____                                0x10
+#define ATOM_TONGA_PP_PLATFORM_CAP_HARDWAREDC                 0x20            /* This cap indicates whether power source notificaiton is done by GPIO directly. */
+#define ____RETIRE64____                                0x40
+#define ____RETIRE128____                               0x80
+#define ____RETIRE256____                              0x100
+#define ____RETIRE512____                              0x200
+#define ____RETIRE1024____                             0x400
+#define ____RETIRE2048____                             0x800
+#define ATOM_TONGA_PP_PLATFORM_CAP_MVDD_CONTROL             0x1000            /* This cap indicates dynamic MVDD is required. Uncheck to disable it. */
+#define ____RETIRE2000____                            0x2000
+#define ____RETIRE4000____                            0x4000
+#define ATOM_TONGA_PP_PLATFORM_CAP_VDDCI_CONTROL            0x8000            /* This cap indicates dynamic VDDCI is required. Uncheck to disable it. */
+#define ____RETIRE10000____                          0x10000
+#define ATOM_TONGA_PP_PLATFORM_CAP_BACO                    0x20000            /* Enable to indicate the driver supports BACO state. */
+
+#define ATOM_TONGA_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17         0x100000     /* Enable to indicate the driver supports thermal2GPIO17. */
+#define ATOM_TONGA_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL  0x1000000     /* Enable to indicate if thermal and PCC are sharing the same GPIO */
+#define ATOM_TONGA_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE       0x2000000
+
+/* ATOM_PPLIB_NONCLOCK_INFO::usClassification */
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK               0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT              0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE               0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY            1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED           3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE        5
+/* 2, 4, 6, 7 are reserved */
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT                  0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL               0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE    0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST                  0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED                0x0080
+#define ATOM_PPLIB_CLASSIFICATION_ACPI                  0x1000
+
+/* ATOM_PPLIB_NONCLOCK_INFO::usClassification2 */
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
+
+#define ATOM_Tonga_DISALLOW_ON_DC                       0x00004000
+#define ATOM_Tonga_ENABLE_VARIBRIGHT                    0x00008000
+
+#define ATOM_Tonga_TABLE_REVISION_TONGA                 7
+
+typedef struct _ATOM_Tonga_POWERPLAYTABLE {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+
+       UCHAR  ucTableRevision;
+       USHORT usTableSize;                                             /*the size of header structure */
+
+       ULONG   ulGoldenPPID;
+       ULONG   ulGoldenRevision;
+       USHORT  usFormatID;
+
+       USHORT  usVoltageTime;                                   /*in microseconds */
+       ULONG   ulPlatformCaps;                                   /*See ATOM_Tonga_CAPS_* */
+
+       ULONG   ulMaxODEngineClock;                        /*For Overdrive.  */
+       ULONG   ulMaxODMemoryClock;                        /*For Overdrive. */
+
+       USHORT  usPowerControlLimit;
+       USHORT  usUlvVoltageOffset;                               /*in mv units */
+
+       USHORT  usStateArrayOffset;                               /*points to ATOM_Tonga_State_Array */
+       USHORT  usFanTableOffset;                                 /*points to ATOM_Tonga_Fan_Table */
+       USHORT  usThermalControllerOffset;                 /*points to ATOM_Tonga_Thermal_Controller */
+       USHORT  usReserv;                                                  /*CustomThermalPolicy removed for Tonga. Keep this filed as reserved. */
+
+       USHORT  usMclkDependencyTableOffset;       /*points to ATOM_Tonga_MCLK_Dependency_Table */
+       USHORT  usSclkDependencyTableOffset;       /*points to ATOM_Tonga_SCLK_Dependency_Table */
+       USHORT  usVddcLookupTableOffset;                   /*points to ATOM_Tonga_Voltage_Lookup_Table */
+       USHORT  usVddgfxLookupTableOffset;              /*points to ATOM_Tonga_Voltage_Lookup_Table */
+
+       USHORT  usMMDependencyTableOffset;                /*points to ATOM_Tonga_MM_Dependency_Table */
+
+       USHORT  usVCEStateTableOffset;                     /*points to ATOM_Tonga_VCE_State_Table; */
+
+       USHORT  usPPMTableOffset;                                 /*points to ATOM_Tonga_PPM_Table */
+       USHORT  usPowerTuneTableOffset;                   /*points to ATOM_PowerTune_Table */
+
+       USHORT  usHardLimitTableOffset;                    /*points to ATOM_Tonga_Hard_Limit_Table */
+
+       USHORT  usPCIETableOffset;                                /*points to ATOM_Tonga_PCIE_Table */
+
+       USHORT  usGPIOTableOffset;                                /*points to ATOM_Tonga_GPIO_Table */
+
+       USHORT  usReserved[6];                                     /*TODO: modify reserved size to fit structure aligning */
+} ATOM_Tonga_POWERPLAYTABLE;
+
+typedef struct _ATOM_Tonga_State {
+       UCHAR  ucEngineClockIndexHigh;
+       UCHAR  ucEngineClockIndexLow;
+
+       UCHAR  ucMemoryClockIndexHigh;
+       UCHAR  ucMemoryClockIndexLow;
+
+       UCHAR  ucPCIEGenLow;
+       UCHAR  ucPCIEGenHigh;
+
+       UCHAR  ucPCIELaneLow;
+       UCHAR  ucPCIELaneHigh;
+
+       USHORT usClassification;
+       ULONG ulCapsAndSettings;
+       USHORT usClassification2;
+       UCHAR  ucUnused[4];
+} ATOM_Tonga_State;
+
+typedef struct _ATOM_Tonga_State_Array {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;             /* Number of entries. */
+       ATOM_Tonga_State entries[1];    /* Dynamically allocate entries. */
+} ATOM_Tonga_State_Array;
+
+typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
+       UCHAR  ucVddcInd;       /* Vddc voltage */
+       USHORT usVddci;
+       USHORT usVddgfxOffset;  /* Offset relative to Vddc voltage */
+       USHORT usMvdd;
+       ULONG ulMclk;
+       USHORT usReserved;
+} ATOM_Tonga_MCLK_Dependency_Record;
+
+typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                                             /* Number of entries. */
+       ATOM_Tonga_MCLK_Dependency_Record entries[1];                           /* Dynamically allocate entries. */
+} ATOM_Tonga_MCLK_Dependency_Table;
+
+typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
+       UCHAR  ucVddInd;                                                                                        /* Base voltage */
+       USHORT usVddcOffset;                                                                            /* Offset relative to base voltage */
+       ULONG ulSclk;
+       USHORT usEdcCurrent;
+       UCHAR  ucReliabilityTemperature;
+       UCHAR  ucCKSVOffsetandDisable;                                                    /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
+} ATOM_Tonga_SCLK_Dependency_Record;
+
+typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                                             /* Number of entries. */
+       ATOM_Tonga_SCLK_Dependency_Record entries[1];                            /* Dynamically allocate entries. */
+} ATOM_Tonga_SCLK_Dependency_Table;
+
+typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+       UCHAR  ucVddInd;                                                                                        /* Base voltage */
+       USHORT usVddcOffset;                                                                            /* Offset relative to base voltage */
+       ULONG ulSclk;
+       USHORT usEdcCurrent;
+       UCHAR  ucReliabilityTemperature;
+       UCHAR  ucCKSVOffsetandDisable;                  /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
+       ULONG  ulSclkOffset;
+} ATOM_Polaris_SCLK_Dependency_Record;
+
+typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                     /* Number of entries. */
+       ATOM_Polaris_SCLK_Dependency_Record entries[1];                          /* Dynamically allocate entries. */
+} ATOM_Polaris_SCLK_Dependency_Table;
+
+typedef struct _ATOM_Tonga_PCIE_Record {
+       UCHAR ucPCIEGenSpeed;
+       UCHAR usPCIELaneWidth;
+       UCHAR ucReserved[2];
+} ATOM_Tonga_PCIE_Record;
+
+typedef struct _ATOM_Tonga_PCIE_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                                             /* Number of entries. */
+       ATOM_Tonga_PCIE_Record entries[1];                                                      /* Dynamically allocate entries. */
+} ATOM_Tonga_PCIE_Table;
+
+typedef struct _ATOM_Polaris10_PCIE_Record {
+       UCHAR ucPCIEGenSpeed;
+       UCHAR usPCIELaneWidth;
+       UCHAR ucReserved[2];
+       ULONG ulPCIE_Sclk;
+} ATOM_Polaris10_PCIE_Record;
+
+typedef struct _ATOM_Polaris10_PCIE_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                         /* Number of entries. */
+       ATOM_Polaris10_PCIE_Record entries[1];                      /* Dynamically allocate entries. */
+} ATOM_Polaris10_PCIE_Table;
+
+
+typedef struct _ATOM_Tonga_MM_Dependency_Record {
+       UCHAR   ucVddcInd;                                                                                       /* VDDC voltage */
+       USHORT  usVddgfxOffset;                                                                   /* Offset relative to VDDC voltage */
+       ULONG  ulDClk;                                                                                          /* UVD D-clock */
+       ULONG  ulVClk;                                                                                          /* UVD V-clock */
+       ULONG  ulEClk;                                                                                          /* VCE clock */
+       ULONG  ulAClk;                                                                                          /* ACP clock */
+       ULONG  ulSAMUClk;                                                                                       /* SAMU clock */
+} ATOM_Tonga_MM_Dependency_Record;
+
+typedef struct _ATOM_Tonga_MM_Dependency_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                                             /* Number of entries. */
+       ATOM_Tonga_MM_Dependency_Record entries[1];                        /* Dynamically allocate entries. */
+} ATOM_Tonga_MM_Dependency_Table;
+
+typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
+       USHORT usVdd;                                                                                      /* Base voltage */
+       USHORT usCACLow;
+       USHORT usCACMid;
+       USHORT usCACHigh;
+} ATOM_Tonga_Voltage_Lookup_Record;
+
+typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                                             /* Number of entries. */
+       ATOM_Tonga_Voltage_Lookup_Record entries[1];                            /* Dynamically allocate entries. */
+} ATOM_Tonga_Voltage_Lookup_Table;
+
+typedef struct _ATOM_Tonga_Fan_Table {
+       UCHAR   ucRevId;                                                 /* Change this if the table format changes or version changes so that the other fields are not the same. */
+       UCHAR   ucTHyst;                                                 /* Temperature hysteresis. Integer. */
+       USHORT  usTMin;                                                  /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */
+       USHORT  usTMed;                                                  /* The middle temperature where we change slopes. */
+       USHORT  usTHigh;                                                 /* The high point above TMed for adjusting the second slope. */
+       USHORT  usPWMMin;                                                /* The minimum PWM value in percent (0.01% increments). */
+       USHORT  usPWMMed;                                                /* The PWM value (in percent) at TMed. */
+       USHORT  usPWMHigh;                                               /* The PWM value at THigh. */
+       USHORT  usTMax;                                                  /* The max temperature */
+       UCHAR   ucFanControlMode;                                 /* Legacy or Fuzzy Fan mode */
+       USHORT  usFanPWMMax;                                      /* Maximum allowed fan power in percent */
+       USHORT  usFanOutputSensitivity;           /* Sensitivity of fan reaction to temepature changes */
+       USHORT  usFanRPMMax;                                      /* The default value in RPM */
+       ULONG  ulMinFanSCLKAcousticLimit;          /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */
+       UCHAR   ucTargetTemperature;                     /* Advanced fan controller target temperature. */
+       UCHAR   ucMinimumPWMLimit;                        /* The minimum PWM that the advanced fan controller can set.  This should be set to the highest PWM that will run the fan at its lowest RPM. */
+       USHORT  usReserved;
+} ATOM_Tonga_Fan_Table;
+
+typedef struct _ATOM_Fiji_Fan_Table {
+       UCHAR   ucRevId;                                                 /* Change this if the table format changes or version changes so that the other fields are not the same. */
+       UCHAR   ucTHyst;                                                 /* Temperature hysteresis. Integer. */
+       USHORT  usTMin;                                                  /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */
+       USHORT  usTMed;                                                  /* The middle temperature where we change slopes. */
+       USHORT  usTHigh;                                                 /* The high point above TMed for adjusting the second slope. */
+       USHORT  usPWMMin;                                                /* The minimum PWM value in percent (0.01% increments). */
+       USHORT  usPWMMed;                                                /* The PWM value (in percent) at TMed. */
+       USHORT  usPWMHigh;                                               /* The PWM value at THigh. */
+       USHORT  usTMax;                                                  /* The max temperature */
+       UCHAR   ucFanControlMode;                                 /* Legacy or Fuzzy Fan mode */
+       USHORT  usFanPWMMax;                                      /* Maximum allowed fan power in percent */
+       USHORT  usFanOutputSensitivity;           /* Sensitivity of fan reaction to temepature changes */
+       USHORT  usFanRPMMax;                                      /* The default value in RPM */
+       ULONG  ulMinFanSCLKAcousticLimit;               /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */
+       UCHAR   ucTargetTemperature;                     /* Advanced fan controller target temperature. */
+       UCHAR   ucMinimumPWMLimit;                        /* The minimum PWM that the advanced fan controller can set.  This should be set to the highest PWM that will run the fan at its lowest RPM. */
+       USHORT  usFanGainEdge;
+       USHORT  usFanGainHotspot;
+       USHORT  usFanGainLiquid;
+       USHORT  usFanGainVrVddc;
+       USHORT  usFanGainVrMvdd;
+       USHORT  usFanGainPlx;
+       USHORT  usFanGainHbm;
+       USHORT  usReserved;
+} ATOM_Fiji_Fan_Table;
+
+typedef struct _ATOM_Tonga_Thermal_Controller {
+       UCHAR ucRevId;
+       UCHAR ucType;              /* one of ATOM_TONGA_PP_THERMALCONTROLLER_* */
+       UCHAR ucI2cLine;                /* as interpreted by DAL I2C */
+       UCHAR ucI2cAddress;
+       UCHAR ucFanParameters;  /* Fan Control Parameters. */
+       UCHAR ucFanMinRPM;       /* Fan Minimum RPM (hundreds) -- for display purposes only. */
+       UCHAR ucFanMaxRPM;       /* Fan Maximum RPM (hundreds) -- for display purposes only. */
+       UCHAR ucReserved;
+       UCHAR ucFlags;             /* to be defined */
+} ATOM_Tonga_Thermal_Controller;
+
+typedef struct _ATOM_Tonga_VCE_State_Record {
+       UCHAR  ucVCEClockIndex; /*index into usVCEDependencyTableOffset of 'ATOM_Tonga_MM_Dependency_Table' type */
+       UCHAR  ucFlag;          /* 2 bits indicates memory p-states */
+       UCHAR  ucSCLKIndex;             /*index into ATOM_Tonga_SCLK_Dependency_Table */
+       UCHAR  ucMCLKIndex;             /*index into ATOM_Tonga_MCLK_Dependency_Table */
+} ATOM_Tonga_VCE_State_Record;
+
+typedef struct _ATOM_Tonga_VCE_State_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;
+       ATOM_Tonga_VCE_State_Record entries[1];
+} ATOM_Tonga_VCE_State_Table;
+
+typedef struct _ATOM_Tonga_PowerTune_Table {
+       UCHAR  ucRevId;
+       USHORT usTDP;
+       USHORT usConfigurableTDP;
+       USHORT usTDC;
+       USHORT usBatteryPowerLimit;
+       USHORT usSmallPowerLimit;
+       USHORT usLowCACLeakage;
+       USHORT usHighCACLeakage;
+       USHORT usMaximumPowerDeliveryLimit;
+       USHORT usTjMax;
+       USHORT usPowerTuneDataSetID;
+       USHORT usEDCLimit;
+       USHORT usSoftwareShutdownTemp;
+       USHORT usClockStretchAmount;
+       USHORT usReserve[2];
+} ATOM_Tonga_PowerTune_Table;
+
+typedef struct _ATOM_Fiji_PowerTune_Table {
+       UCHAR  ucRevId;
+       USHORT usTDP;
+       USHORT usConfigurableTDP;
+       USHORT usTDC;
+       USHORT usBatteryPowerLimit;
+       USHORT usSmallPowerLimit;
+       USHORT usLowCACLeakage;
+       USHORT usHighCACLeakage;
+       USHORT usMaximumPowerDeliveryLimit;
+       USHORT usTjMax;  /* For Fiji, this is also usTemperatureLimitEdge; */
+       USHORT usPowerTuneDataSetID;
+       USHORT usEDCLimit;
+       USHORT usSoftwareShutdownTemp;
+       USHORT usClockStretchAmount;
+       USHORT usTemperatureLimitHotspot;  /*The following are added for Fiji */
+       USHORT usTemperatureLimitLiquid1;
+       USHORT usTemperatureLimitLiquid2;
+       USHORT usTemperatureLimitVrVddc;
+       USHORT usTemperatureLimitVrMvdd;
+       USHORT usTemperatureLimitPlx;
+       UCHAR  ucLiquid1_I2C_address;  /*Liquid */
+       UCHAR  ucLiquid2_I2C_address;
+       UCHAR  ucLiquid_I2C_Line;
+       UCHAR  ucVr_I2C_address;        /*VR */
+       UCHAR  ucVr_I2C_Line;
+       UCHAR  ucPlx_I2C_address;  /*PLX */
+       UCHAR  ucPlx_I2C_Line;
+       USHORT usReserved;
+} ATOM_Fiji_PowerTune_Table;
+
+#define ATOM_PPM_A_A    1
+#define ATOM_PPM_A_I    2
+typedef struct _ATOM_Tonga_PPM_Table {
+       UCHAR   ucRevId;
+       UCHAR   ucPpmDesign;              /*A+I or A+A */
+       USHORT  usCpuCoreNumber;
+       ULONG  ulPlatformTDP;
+       ULONG  ulSmallACPlatformTDP;
+       ULONG  ulPlatformTDC;
+       ULONG  ulSmallACPlatformTDC;
+       ULONG  ulApuTDP;
+       ULONG  ulDGpuTDP;
+       ULONG  ulDGpuUlvPower;
+       ULONG  ulTjmax;
+} ATOM_Tonga_PPM_Table;
+
+typedef struct _ATOM_Tonga_Hard_Limit_Record {
+       ULONG  ulSCLKLimit;
+       ULONG  ulMCLKLimit;
+       USHORT  usVddcLimit;
+       USHORT  usVddciLimit;
+       USHORT  usVddgfxLimit;
+} ATOM_Tonga_Hard_Limit_Record;
+
+typedef struct _ATOM_Tonga_Hard_Limit_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;
+       ATOM_Tonga_Hard_Limit_Record entries[1];
+} ATOM_Tonga_Hard_Limit_Table;
+
+typedef struct _ATOM_Tonga_GPIO_Table {
+       UCHAR  ucRevId;
+       UCHAR  ucVRHotTriggeredSclkDpmIndex;            /* If VRHot signal is triggered SCLK will be limited to this DPM level */
+       UCHAR  ucReserve[5];
+} ATOM_Tonga_GPIO_Table;
+
+typedef struct _PPTable_Generic_SubTable_Header {
+       UCHAR  ucRevId;
+} PPTable_Generic_SubTable_Header;
+
+
+#pragma pack(pop)
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
new file mode 100644 (file)
index 0000000..7de701d
--- /dev/null
@@ -0,0 +1,1325 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "process_pptables_v1_0.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pp_debug.h"
+#include "hwmgr.h"
+#include "cgs_common.h"
+#include "pptable_v1_0.h"
+
+/**
+ * Private Function used during initialization.
+ * @param hwmgr Pointer to the hardware manager.
+ * @param setIt A flag indication if the capability should be set (TRUE) or reset (FALSE).
+ * @param cap Which capability to set/reset.
+ */
+static void set_hw_cap(struct pp_hwmgr *hwmgr, bool setIt, enum phm_platform_caps cap)
+{
+       if (setIt)
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap);
+       else
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap);
+}
+
+
+/**
+ * Private Function used during initialization.
+ * @param hwmgr Pointer to the hardware manager.
+ * @param powerplay_caps the bit array (from BIOS) of capability bits.
+ * @exception the current implementation always returns 1.
+ */
+static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
+{
+       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE16____),
+               "ATOM_PP_PLATFORM_CAP_ASPM_L1 is not supported!", continue);
+       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE64____),
+               "ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY is not supported!", continue);
+       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE512____),
+               "ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL is not supported!", continue);
+       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE1024____),
+               "ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 is not supported!", continue);
+       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE2048____),
+               "ATOM_PP_PLATFORM_CAP_HTLINKCONTROL is not supported!", continue);
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_POWERPLAY),
+                       PHM_PlatformCaps_PowerPlaySupport
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_SBIOSPOWERSOURCE),
+                       PHM_PlatformCaps_BiosPowerSourceControl
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_HARDWAREDC),
+                       PHM_PlatformCaps_AutomaticDCTransition
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_MVDD_CONTROL),
+                       PHM_PlatformCaps_EnableMVDDControl
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_VDDCI_CONTROL),
+                       PHM_PlatformCaps_ControlVDDCI
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_VDDGFX_CONTROL),
+                       PHM_PlatformCaps_ControlVDDGFX
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_BACO),
+                       PHM_PlatformCaps_BACO
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_DISABLE_VOLTAGE_ISLAND),
+                       PHM_PlatformCaps_DisableVoltageIsland
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL),
+                       PHM_PlatformCaps_CombinePCCWithThermalSignal
+                 );
+
+       set_hw_cap(
+                       hwmgr,
+                       0 != (powerplay_caps & ATOM_TONGA_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE),
+                       PHM_PlatformCaps_LoadPostProductionFirmware
+                 );
+
+       return 0;
+}
+
+/**
+ * Private Function to get the PowerPlay Table Address.
+ */
+const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
+{
+       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+
+       u16 size;
+       u8 frev, crev;
+       void *table_address = (void *)hwmgr->soft_pp_table;
+
+       if (!table_address) {
+               table_address = (ATOM_Tonga_POWERPLAYTABLE *)
+                               cgs_atom_get_data_table(hwmgr->device,
+                                               index, &size, &frev, &crev);
+               hwmgr->soft_pp_table = table_address;   /*Cache the result in RAM.*/
+               hwmgr->soft_pp_table_size = size;
+       }
+
+       return table_address;
+}
+
+static int get_vddc_lookup_table(
+               struct pp_hwmgr *hwmgr,
+               phm_ppt_v1_voltage_lookup_table **lookup_table,
+               const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables,
+               uint32_t max_levels
+               )
+{
+       uint32_t table_size, i;
+       phm_ppt_v1_voltage_lookup_table *table;
+       phm_ppt_v1_voltage_lookup_record *record;
+       ATOM_Tonga_Voltage_Lookup_Record *atom_record;
+
+       PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries),
+               "Invalid CAC Leakage PowerPlay Table!", return 1);
+
+       table_size = sizeof(uint32_t) +
+               sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
+
+       table = kzalloc(table_size, GFP_KERNEL);
+
+       if (NULL == table)
+               return -ENOMEM;
+
+       memset(table, 0x00, table_size);
+
+       table->count = vddc_lookup_pp_tables->ucNumEntries;
+
+       for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
+               record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       phm_ppt_v1_voltage_lookup_record,
+                                       entries, table, i);
+               atom_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_Voltage_Lookup_Record,
+                                       entries, vddc_lookup_pp_tables, i);
+               record->us_calculated = 0;
+               record->us_vdd = atom_record->usVdd;
+               record->us_cac_low = atom_record->usCACLow;
+               record->us_cac_mid = atom_record->usCACMid;
+               record->us_cac_high = atom_record->usCACHigh;
+       }
+
+       *lookup_table = table;
+
+       return 0;
+}
+
+/**
+ * Private Function used during initialization.
+ * Initialize Platform Power Management Parameter table
+ * @param hwmgr Pointer to the hardware manager.
+ * @param atom_ppm_table Pointer to PPM table in VBIOS
+ */
+static int get_platform_power_management_table(
+               struct pp_hwmgr *hwmgr,
+               ATOM_Tonga_PPM_Table *atom_ppm_table)
+{
+       struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
+       struct phm_ppt_v1_information *pp_table_information =
+               (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       if (NULL == ptr)
+               return -ENOMEM;
+
+       ptr->ppm_design
+               = atom_ppm_table->ucPpmDesign;
+       ptr->cpu_core_number
+               = atom_ppm_table->usCpuCoreNumber;
+       ptr->platform_tdp
+               = atom_ppm_table->ulPlatformTDP;
+       ptr->small_ac_platform_tdp
+               = atom_ppm_table->ulSmallACPlatformTDP;
+       ptr->platform_tdc
+               = atom_ppm_table->ulPlatformTDC;
+       ptr->small_ac_platform_tdc
+               = atom_ppm_table->ulSmallACPlatformTDC;
+       ptr->apu_tdp
+               = atom_ppm_table->ulApuTDP;
+       ptr->dgpu_tdp
+               = atom_ppm_table->ulDGpuTDP;
+       ptr->dgpu_ulv_power
+               = atom_ppm_table->ulDGpuUlvPower;
+       ptr->tj_max
+               = atom_ppm_table->ulTjmax;
+
+       pp_table_information->ppm_parameter_table = ptr;
+
+       return 0;
+}
+
+/**
+ * Private Function used during initialization.
+ * Initialize TDP limits for DPM2
+ * @param hwmgr Pointer to the hardware manager.
+ * @param powerplay_table Pointer to the PowerPlay Table.
+ */
+static int init_dpm_2_parameters(
+               struct pp_hwmgr *hwmgr,
+               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
+               )
+{
+       int result = 0;
+       struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       ATOM_Tonga_PPM_Table *atom_ppm_table;
+       uint32_t disable_ppm = 0;
+       uint32_t disable_power_control = 0;
+
+       pp_table_information->us_ulv_voltage_offset =
+               le16_to_cpu(powerplay_table->usUlvVoltageOffset);
+
+       pp_table_information->ppm_parameter_table = NULL;
+       pp_table_information->vddc_lookup_table = NULL;
+       pp_table_information->vddgfx_lookup_table = NULL;
+       /* TDP limits */
+       hwmgr->platform_descriptor.TDPODLimit =
+               le16_to_cpu(powerplay_table->usPowerControlLimit);
+       hwmgr->platform_descriptor.TDPAdjustment = 0;
+       hwmgr->platform_descriptor.VidAdjustment = 0;
+       hwmgr->platform_descriptor.VidAdjustmentPolarity = 0;
+       hwmgr->platform_descriptor.VidMinLimit = 0;
+       hwmgr->platform_descriptor.VidMaxLimit = 1500000;
+       hwmgr->platform_descriptor.VidStep = 6250;
+
+       disable_power_control = 0;
+       if (0 == disable_power_control) {
+               /* enable TDP overdrive (PowerControl) feature as well if supported */
+               if (hwmgr->platform_descriptor.TDPODLimit != 0)
+                       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerControl);
+       }
+
+       if (0 != powerplay_table->usVddcLookupTableOffset) {
+               const ATOM_Tonga_Voltage_Lookup_Table *pVddcCACTable =
+                       (ATOM_Tonga_Voltage_Lookup_Table *)(((unsigned long)powerplay_table) +
+                       le16_to_cpu(powerplay_table->usVddcLookupTableOffset));
+
+               result = get_vddc_lookup_table(hwmgr,
+                       &pp_table_information->vddc_lookup_table, pVddcCACTable, 16);
+       }
+
+       if (0 != powerplay_table->usVddgfxLookupTableOffset) {
+               const ATOM_Tonga_Voltage_Lookup_Table *pVddgfxCACTable =
+                       (ATOM_Tonga_Voltage_Lookup_Table *)(((unsigned long)powerplay_table) +
+                       le16_to_cpu(powerplay_table->usVddgfxLookupTableOffset));
+
+               result = get_vddc_lookup_table(hwmgr,
+                       &pp_table_information->vddgfx_lookup_table, pVddgfxCACTable, 16);
+       }
+
+       disable_ppm = 0;
+       if (0 == disable_ppm) {
+               atom_ppm_table = (ATOM_Tonga_PPM_Table *)
+                       (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
+
+               if (0 != powerplay_table->usPPMTableOffset) {
+                       if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
+                               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_EnablePlatformPowerManagement);
+                       }
+               }
+       }
+
+       return result;
+}
+
+static int get_valid_clk(
+               struct pp_hwmgr *hwmgr,
+               struct phm_clock_array **clk_table,
+               phm_ppt_v1_clock_voltage_dependency_table const *clk_volt_pp_table
+               )
+{
+       uint32_t table_size, i;
+       struct phm_clock_array *table;
+       phm_ppt_v1_clock_voltage_dependency_record *dep_record;
+
+       PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count),
+               "Invalid PowerPlay Table!", return -1);
+
+       table_size = sizeof(uint32_t) +
+               sizeof(uint32_t) * clk_volt_pp_table->count;
+
+       table = kzalloc(table_size, GFP_KERNEL);
+
+       if (NULL == table)
+               return -ENOMEM;
+
+       memset(table, 0x00, table_size);
+
+       table->count = (uint32_t)clk_volt_pp_table->count;
+
+       for (i = 0; i < table->count; i++) {
+               dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                               phm_ppt_v1_clock_voltage_dependency_record,
+                               entries, clk_volt_pp_table, i);
+               table->values[i] = (uint32_t)dep_record->clk;
+       }
+       *clk_table = table;
+
+       return 0;
+}
+
+static int get_hard_limits(
+               struct pp_hwmgr *hwmgr,
+               struct phm_clock_and_voltage_limits *limits,
+               ATOM_Tonga_Hard_Limit_Table const *limitable
+               )
+{
+       PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1);
+
+       /* currently we always take entries[0] parameters */
+       limits->sclk = (uint32_t)limitable->entries[0].ulSCLKLimit;
+       limits->mclk = (uint32_t)limitable->entries[0].ulMCLKLimit;
+       limits->vddc = (uint16_t)limitable->entries[0].usVddcLimit;
+       limits->vddci = (uint16_t)limitable->entries[0].usVddciLimit;
+       limits->vddgfx = (uint16_t)limitable->entries[0].usVddgfxLimit;
+
+       return 0;
+}
+
+static int get_mclk_voltage_dependency_table(
+               struct pp_hwmgr *hwmgr,
+               phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_mclk_dep_table,
+               ATOM_Tonga_MCLK_Dependency_Table const *mclk_dep_table
+               )
+{
+       uint32_t table_size, i;
+       phm_ppt_v1_clock_voltage_dependency_table *mclk_table;
+       phm_ppt_v1_clock_voltage_dependency_record *mclk_table_record;
+       ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
+
+       PP_ASSERT_WITH_CODE((0 != mclk_dep_table->ucNumEntries),
+               "Invalid PowerPlay Table!", return -1);
+
+       table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+               * mclk_dep_table->ucNumEntries;
+
+       mclk_table = kzalloc(table_size, GFP_KERNEL);
+
+       if (NULL == mclk_table)
+               return -ENOMEM;
+
+       memset(mclk_table, 0x00, table_size);
+
+       mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
+
+       for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
+               mclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       phm_ppt_v1_clock_voltage_dependency_record,
+                                               entries, mclk_table, i);
+               mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_MCLK_Dependency_Record,
+                                               entries, mclk_dep_table, i);
+               mclk_table_record->vddInd = mclk_dep_record->ucVddcInd;
+               mclk_table_record->vdd_offset = mclk_dep_record->usVddgfxOffset;
+               mclk_table_record->vddci = mclk_dep_record->usVddci;
+               mclk_table_record->mvdd = mclk_dep_record->usMvdd;
+               mclk_table_record->clk = mclk_dep_record->ulMclk;
+       }
+
+       *pp_tonga_mclk_dep_table = mclk_table;
+
+       return 0;
+}
+
+static int get_sclk_voltage_dependency_table(
+               struct pp_hwmgr *hwmgr,
+               phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
+               PPTable_Generic_SubTable_Header const  *sclk_dep_table
+               )
+{
+       uint32_t table_size, i;
+       phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
+       phm_ppt_v1_clock_voltage_dependency_record *sclk_table_record;
+
+       if (sclk_dep_table->ucRevId < 1) {
+               const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
+                           (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
+               ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
+
+               PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
+                       "Invalid PowerPlay Table!", return -1);
+
+               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+                       * tonga_table->ucNumEntries;
+
+               sclk_table = kzalloc(table_size, GFP_KERNEL);
+
+               if (NULL == sclk_table)
+                       return -ENOMEM;
+
+               memset(sclk_table, 0x00, table_size);
+
+               sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
+
+               for (i = 0; i < tonga_table->ucNumEntries; i++) {
+                       sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               ATOM_Tonga_SCLK_Dependency_Record,
+                                               entries, tonga_table, i);
+                       sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               phm_ppt_v1_clock_voltage_dependency_record,
+                                               entries, sclk_table, i);
+                       sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+                       sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+                       sclk_table_record->clk = sclk_dep_record->ulSclk;
+                       sclk_table_record->cks_enable =
+                               (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+                       sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
+               }
+       } else {
+               const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
+                           (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
+               ATOM_Polaris_SCLK_Dependency_Record *sclk_dep_record;
+
+               PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
+                       "Invalid PowerPlay Table!", return -1);
+
+               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+                       * polaris_table->ucNumEntries;
+
+               sclk_table = kzalloc(table_size, GFP_KERNEL);
+
+               if (NULL == sclk_table)
+                       return -ENOMEM;
+
+               memset(sclk_table, 0x00, table_size);
+
+               sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
+
+               for (i = 0; i < polaris_table->ucNumEntries; i++) {
+                       sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               ATOM_Polaris_SCLK_Dependency_Record,
+                                               entries, polaris_table, i);
+                       sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               phm_ppt_v1_clock_voltage_dependency_record,
+                                               entries, sclk_table, i);
+                       sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+                       sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+                       sclk_table_record->clk = sclk_dep_record->ulSclk;
+                       sclk_table_record->cks_enable =
+                               (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+                       sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
+                       sclk_table_record->sclk_offset = sclk_dep_record->ulSclkOffset;
+               }
+       }
+       *pp_tonga_sclk_dep_table = sclk_table;
+
+       return 0;
+}
+
+static int get_pcie_table(
+               struct pp_hwmgr *hwmgr,
+               phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
+               PPTable_Generic_SubTable_Header const *ptable
+               )
+{
+       uint32_t table_size, i, pcie_count;
+       phm_ppt_v1_pcie_table *pcie_table;
+       struct phm_ppt_v1_information *pp_table_information =
+               (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       phm_ppt_v1_pcie_record *pcie_record;
+
+       if (ptable->ucRevId < 1) {
+               const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)ptable;
+               ATOM_Tonga_PCIE_Record *atom_pcie_record;
+
+               PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
+                       "Invalid PowerPlay Table!", return -1);
+
+               table_size = sizeof(uint32_t) +
+                       sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
+
+               pcie_table = kzalloc(table_size, GFP_KERNEL);
+
+               if (pcie_table == NULL)
+                       return -ENOMEM;
+
+               memset(pcie_table, 0x00, table_size);
+
+               /*
+               * Make sure the number of pcie entries are less than or equal to sclk dpm levels.
+               * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
+               */
+               pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
+               if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
+                       pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
+               else
+                       printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
+                       Disregarding the excess entries... \n");
+
+               pcie_table->count = pcie_count;
+               for (i = 0; i < pcie_count; i++) {
+                       pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               phm_ppt_v1_pcie_record,
+                                               entries, pcie_table, i);
+                       atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               ATOM_Tonga_PCIE_Record,
+                                               entries, atom_pcie_table, i);
+                       pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+                       pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
+               }
+
+               *pp_tonga_pcie_table = pcie_table;
+       } else {
+               /* Polaris10/Polaris11 and newer. */
+               const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)ptable;
+               ATOM_Polaris10_PCIE_Record *atom_pcie_record;
+
+               PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
+                       "Invalid PowerPlay Table!", return -1);
+
+               table_size = sizeof(uint32_t) +
+                       sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
+
+               pcie_table = kzalloc(table_size, GFP_KERNEL);
+
+               if (pcie_table == NULL)
+                       return -ENOMEM;
+
+               memset(pcie_table, 0x00, table_size);
+
+               /*
+               * Make sure the number of pcie entries are less than or equal to sclk dpm levels.
+               * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
+               */
+               pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
+               if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
+                       pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
+               else
+                       printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
+                       Disregarding the excess entries... \n");
+
+               pcie_table->count = pcie_count;
+
+               for (i = 0; i < pcie_count; i++) {
+                       pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               phm_ppt_v1_pcie_record,
+                                               entries, pcie_table, i);
+                       atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               ATOM_Polaris10_PCIE_Record,
+                                               entries, atom_pcie_table, i);
+                       pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+                       pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
+                       pcie_record->pcie_sclk = atom_pcie_record->ulPCIE_Sclk;
+               }
+
+               *pp_tonga_pcie_table = pcie_table;
+       }
+
+       return 0;
+}
+
+static int get_cac_tdp_table(
+               struct pp_hwmgr *hwmgr,
+               struct phm_cac_tdp_table **cac_tdp_table,
+               const PPTable_Generic_SubTable_Header * table
+               )
+{
+       uint32_t table_size;
+       struct phm_cac_tdp_table *tdp_table;
+
+       table_size = sizeof(uint32_t) + sizeof(struct phm_cac_tdp_table);
+       tdp_table = kzalloc(table_size, GFP_KERNEL);
+
+       if (NULL == tdp_table)
+               return -ENOMEM;
+
+       memset(tdp_table, 0x00, table_size);
+
+       hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
+
+       if (NULL == hwmgr->dyn_state.cac_dtp_table) {
+               kfree(tdp_table);
+               return -ENOMEM;
+       }
+
+       memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
+
+       if (table->ucRevId < 3) {
+               const ATOM_Tonga_PowerTune_Table *tonga_table =
+                       (ATOM_Tonga_PowerTune_Table *)table;
+               tdp_table->usTDP = tonga_table->usTDP;
+               tdp_table->usConfigurableTDP =
+                       tonga_table->usConfigurableTDP;
+               tdp_table->usTDC = tonga_table->usTDC;
+               tdp_table->usBatteryPowerLimit =
+                       tonga_table->usBatteryPowerLimit;
+               tdp_table->usSmallPowerLimit =
+                       tonga_table->usSmallPowerLimit;
+               tdp_table->usLowCACLeakage =
+                       tonga_table->usLowCACLeakage;
+               tdp_table->usHighCACLeakage =
+                       tonga_table->usHighCACLeakage;
+               tdp_table->usMaximumPowerDeliveryLimit =
+                       tonga_table->usMaximumPowerDeliveryLimit;
+               tdp_table->usDefaultTargetOperatingTemp =
+                       tonga_table->usTjMax;
+               tdp_table->usTargetOperatingTemp =
+                       tonga_table->usTjMax; /*Set the initial temp to the same as default */
+               tdp_table->usPowerTuneDataSetID =
+                       tonga_table->usPowerTuneDataSetID;
+               tdp_table->usSoftwareShutdownTemp =
+                       tonga_table->usSoftwareShutdownTemp;
+               tdp_table->usClockStretchAmount =
+                       tonga_table->usClockStretchAmount;
+       } else {   /* Fiji and newer */
+               const ATOM_Fiji_PowerTune_Table *fijitable =
+                       (ATOM_Fiji_PowerTune_Table *)table;
+               tdp_table->usTDP = fijitable->usTDP;
+               tdp_table->usConfigurableTDP = fijitable->usConfigurableTDP;
+               tdp_table->usTDC = fijitable->usTDC;
+               tdp_table->usBatteryPowerLimit = fijitable->usBatteryPowerLimit;
+               tdp_table->usSmallPowerLimit = fijitable->usSmallPowerLimit;
+               tdp_table->usLowCACLeakage = fijitable->usLowCACLeakage;
+               tdp_table->usHighCACLeakage = fijitable->usHighCACLeakage;
+               tdp_table->usMaximumPowerDeliveryLimit =
+                       fijitable->usMaximumPowerDeliveryLimit;
+               tdp_table->usDefaultTargetOperatingTemp =
+                       fijitable->usTjMax;
+               tdp_table->usTargetOperatingTemp =
+                       fijitable->usTjMax; /*Set the initial temp to the same as default */
+               tdp_table->usPowerTuneDataSetID =
+                       fijitable->usPowerTuneDataSetID;
+               tdp_table->usSoftwareShutdownTemp =
+                       fijitable->usSoftwareShutdownTemp;
+               tdp_table->usClockStretchAmount =
+                       fijitable->usClockStretchAmount;
+               tdp_table->usTemperatureLimitHotspot =
+                       fijitable->usTemperatureLimitHotspot;
+               tdp_table->usTemperatureLimitLiquid1 =
+                       fijitable->usTemperatureLimitLiquid1;
+               tdp_table->usTemperatureLimitLiquid2 =
+                       fijitable->usTemperatureLimitLiquid2;
+               tdp_table->usTemperatureLimitVrVddc =
+                       fijitable->usTemperatureLimitVrVddc;
+               tdp_table->usTemperatureLimitVrMvdd =
+                       fijitable->usTemperatureLimitVrMvdd;
+               tdp_table->usTemperatureLimitPlx =
+                       fijitable->usTemperatureLimitPlx;
+               tdp_table->ucLiquid1_I2C_address =
+                       fijitable->ucLiquid1_I2C_address;
+               tdp_table->ucLiquid2_I2C_address =
+                       fijitable->ucLiquid2_I2C_address;
+               tdp_table->ucLiquid_I2C_Line =
+                       fijitable->ucLiquid_I2C_Line;
+               tdp_table->ucVr_I2C_address = fijitable->ucVr_I2C_address;
+               tdp_table->ucVr_I2C_Line = fijitable->ucVr_I2C_Line;
+               tdp_table->ucPlx_I2C_address = fijitable->ucPlx_I2C_address;
+               tdp_table->ucPlx_I2C_Line = fijitable->ucPlx_I2C_Line;
+       }
+
+       *cac_tdp_table = tdp_table;
+
+       return 0;
+}
+
+static int get_mm_clock_voltage_table(
+               struct pp_hwmgr *hwmgr,
+               phm_ppt_v1_mm_clock_voltage_dependency_table **tonga_mm_table,
+               const ATOM_Tonga_MM_Dependency_Table * mm_dependency_table
+               )
+{
+       uint32_t table_size, i;
+       const ATOM_Tonga_MM_Dependency_Record *mm_dependency_record;
+       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table;
+       phm_ppt_v1_mm_clock_voltage_dependency_record *mm_table_record;
+
+       PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries),
+               "Invalid PowerPlay Table!", return -1);
+       table_size = sizeof(uint32_t) +
+               sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record)
+               * mm_dependency_table->ucNumEntries;
+       mm_table = kzalloc(table_size, GFP_KERNEL);
+
+       if (NULL == mm_table)
+               return -ENOMEM;
+
+       memset(mm_table, 0x00, table_size);
+
+       mm_table->count = mm_dependency_table->ucNumEntries;
+
+       for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
+               mm_dependency_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               ATOM_Tonga_MM_Dependency_Record,
+                                               entries, mm_dependency_table, i);
+               mm_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       phm_ppt_v1_mm_clock_voltage_dependency_record,
+                                       entries, mm_table, i);
+               mm_table_record->vddcInd = mm_dependency_record->ucVddcInd;
+               mm_table_record->vddgfx_offset = mm_dependency_record->usVddgfxOffset;
+               mm_table_record->aclk = mm_dependency_record->ulAClk;
+               mm_table_record->samclock = mm_dependency_record->ulSAMUClk;
+               mm_table_record->eclk = mm_dependency_record->ulEClk;
+               mm_table_record->vclk = mm_dependency_record->ulVClk;
+               mm_table_record->dclk = mm_dependency_record->ulDClk;
+       }
+
+       *tonga_mm_table = mm_table;
+
+       return 0;
+}
+
+/**
+ * Private Function used during initialization.
+ * Initialize clock voltage dependency
+ * @param hwmgr Pointer to the hardware manager.
+ * @param powerplay_table Pointer to the PowerPlay Table.
+ */
+static int init_clock_voltage_dependency(
+               struct pp_hwmgr *hwmgr,
+               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
+               )
+{
+       int result = 0;
+       struct phm_ppt_v1_information *pp_table_information =
+               (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       const ATOM_Tonga_MM_Dependency_Table *mm_dependency_table =
+               (const ATOM_Tonga_MM_Dependency_Table *)(((unsigned long) powerplay_table) +
+               le16_to_cpu(powerplay_table->usMMDependencyTableOffset));
+       const PPTable_Generic_SubTable_Header *pPowerTuneTable =
+               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
+               le16_to_cpu(powerplay_table->usPowerTuneTableOffset));
+       const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
+               (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
+               le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
+       const PPTable_Generic_SubTable_Header *sclk_dep_table =
+               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
+               le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+       const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
+               (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
+               le16_to_cpu(powerplay_table->usHardLimitTableOffset));
+       const PPTable_Generic_SubTable_Header *pcie_table =
+               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
+               le16_to_cpu(powerplay_table->usPCIETableOffset));
+
+       pp_table_information->vdd_dep_on_sclk = NULL;
+       pp_table_information->vdd_dep_on_mclk = NULL;
+       pp_table_information->mm_dep_table = NULL;
+       pp_table_information->pcie_table = NULL;
+
+       if (powerplay_table->usMMDependencyTableOffset != 0)
+               result = get_mm_clock_voltage_table(hwmgr,
+               &pp_table_information->mm_dep_table, mm_dependency_table);
+
+       if (result == 0 && powerplay_table->usPowerTuneTableOffset != 0)
+               result = get_cac_tdp_table(hwmgr,
+               &pp_table_information->cac_dtp_table, pPowerTuneTable);
+
+       if (result == 0 && powerplay_table->usSclkDependencyTableOffset != 0)
+               result = get_sclk_voltage_dependency_table(hwmgr,
+               &pp_table_information->vdd_dep_on_sclk, sclk_dep_table);
+
+       if (result == 0 && powerplay_table->usMclkDependencyTableOffset != 0)
+               result = get_mclk_voltage_dependency_table(hwmgr,
+               &pp_table_information->vdd_dep_on_mclk, mclk_dep_table);
+
+       if (result == 0 && powerplay_table->usPCIETableOffset != 0)
+               result = get_pcie_table(hwmgr,
+               &pp_table_information->pcie_table, pcie_table);
+
+       if (result == 0 && powerplay_table->usHardLimitTableOffset != 0)
+               result = get_hard_limits(hwmgr,
+               &pp_table_information->max_clock_voltage_on_dc, pHardLimits);
+
+       hwmgr->dyn_state.max_clock_voltage_on_dc.sclk =
+               pp_table_information->max_clock_voltage_on_dc.sclk;
+       hwmgr->dyn_state.max_clock_voltage_on_dc.mclk =
+               pp_table_information->max_clock_voltage_on_dc.mclk;
+       hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
+               pp_table_information->max_clock_voltage_on_dc.vddc;
+       hwmgr->dyn_state.max_clock_voltage_on_dc.vddci =
+               pp_table_information->max_clock_voltage_on_dc.vddci;
+
+       if (result == 0 && (NULL != pp_table_information->vdd_dep_on_mclk)
+               && (0 != pp_table_information->vdd_dep_on_mclk->count))
+               result = get_valid_clk(hwmgr, &pp_table_information->valid_mclk_values,
+               pp_table_information->vdd_dep_on_mclk);
+
+       if (result == 0 && (NULL != pp_table_information->vdd_dep_on_sclk)
+               && (0 != pp_table_information->vdd_dep_on_sclk->count))
+               result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values,
+               pp_table_information->vdd_dep_on_sclk);
+
+       return result;
+}
+
+/** Retrieves the (signed) Overdrive limits from VBIOS.
+ * The max engine clock, memory clock and max temperature come from the firmware info table.
+ *
+ * The information is placed into the platform descriptor.
+ *
+ * @param hwmgr source of the VBIOS table and owner of the platform descriptor to be updated.
+ * @param powerplay_table the address of the PowerPlay table.
+ *
+ * @return 1 as long as the firmware info table was present and of a supported version.
+ */
+static int init_over_drive_limits(
+               struct pp_hwmgr *hwmgr,
+               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
+{
+       hwmgr->platform_descriptor.overdriveLimit.engineClock =
+               le16_to_cpu(powerplay_table->ulMaxODEngineClock);
+       hwmgr->platform_descriptor.overdriveLimit.memoryClock =
+               le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
+
+       hwmgr->platform_descriptor.minOverdriveVDDC = 0;
+       hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
+       hwmgr->platform_descriptor.overdriveVDDCStep = 0;
+
+       if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 \
+               && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) {
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ACOverdriveSupport);
+       }
+
+       return 0;
+}
+
+/**
+ * Private Function used during initialization.
+ * Inspect the PowerPlay table for obvious signs of corruption.
+ * @param hwmgr Pointer to the hardware manager.
+ * @param powerplay_table Pointer to the PowerPlay Table.
+ * @exception This implementation always returns 1.
+ */
+static int init_thermal_controller(
+               struct pp_hwmgr *hwmgr,
+               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
+               )
+{
+       const PPTable_Generic_SubTable_Header *fan_table;
+       ATOM_Tonga_Thermal_Controller *thermal_controller;
+
+       thermal_controller = (ATOM_Tonga_Thermal_Controller *)
+               (((unsigned long)powerplay_table) +
+               le16_to_cpu(powerplay_table->usThermalControllerOffset));
+       PP_ASSERT_WITH_CODE((0 != powerplay_table->usThermalControllerOffset),
+               "Thermal controller table not set!", return -1);
+
+       hwmgr->thermal_controller.ucType = thermal_controller->ucType;
+       hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine;
+       hwmgr->thermal_controller.ucI2cAddress = thermal_controller->ucI2cAddress;
+
+       hwmgr->thermal_controller.fanInfo.bNoFan =
+               (0 != (thermal_controller->ucFanParameters & ATOM_TONGA_PP_FANPARAMETERS_NOFAN));
+
+       hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
+               thermal_controller->ucFanParameters &
+               ATOM_TONGA_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+
+       hwmgr->thermal_controller.fanInfo.ulMinRPM
+               = thermal_controller->ucFanMinRPM * 100UL;
+       hwmgr->thermal_controller.fanInfo.ulMaxRPM
+               = thermal_controller->ucFanMaxRPM * 100UL;
+
+       set_hw_cap(
+                       hwmgr,
+                       ATOM_TONGA_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
+                       PHM_PlatformCaps_ThermalController
+                 );
+
+       if (0 == powerplay_table->usFanTableOffset)
+               return 0;
+
+       fan_table = (const PPTable_Generic_SubTable_Header *)
+               (((unsigned long)powerplay_table) +
+               le16_to_cpu(powerplay_table->usFanTableOffset));
+
+       PP_ASSERT_WITH_CODE((0 != powerplay_table->usFanTableOffset),
+               "Fan table not set!", return -1);
+       PP_ASSERT_WITH_CODE((0 < fan_table->ucRevId),
+               "Unsupported fan table format!", return -1);
+
+       hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
+               = 100000;
+       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+               PHM_PlatformCaps_MicrocodeFanControl);
+
+       if (fan_table->ucRevId < 8) {
+               const ATOM_Tonga_Fan_Table *tonga_fan_table =
+                       (ATOM_Tonga_Fan_Table *)fan_table;
+               hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
+                       = tonga_fan_table->ucTHyst;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMin
+                       = tonga_fan_table->usTMin;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMed
+                       = tonga_fan_table->usTMed;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTHigh
+                       = tonga_fan_table->usTHigh;
+               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin
+                       = tonga_fan_table->usPWMMin;
+               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed
+                       = tonga_fan_table->usPWMMed;
+               hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh
+                       = tonga_fan_table->usPWMHigh;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMax
+                       = 10900;                  /* hard coded */
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMax
+                       = tonga_fan_table->usTMax;
+               hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode
+                       = tonga_fan_table->ucFanControlMode;
+               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM
+                       = tonga_fan_table->usFanPWMMax;
+               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity
+                       = 4836;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity
+                       = tonga_fan_table->usFanOutputSensitivity;
+               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM
+                       = tonga_fan_table->usFanRPMMax;
+               hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit
+                       = (tonga_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places.  SMC wants MHz. */
+               hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature
+                       = tonga_fan_table->ucTargetTemperature;
+               hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit
+                       = tonga_fan_table->ucMinimumPWMLimit;
+       } else {
+               const ATOM_Fiji_Fan_Table *fiji_fan_table =
+                       (ATOM_Fiji_Fan_Table *)fan_table;
+               hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
+                       = fiji_fan_table->ucTHyst;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMin
+                       = fiji_fan_table->usTMin;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMed
+                       = fiji_fan_table->usTMed;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTHigh
+                       = fiji_fan_table->usTHigh;
+               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin
+                       = fiji_fan_table->usPWMMin;
+               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed
+                       = fiji_fan_table->usPWMMed;
+               hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh
+                       = fiji_fan_table->usPWMHigh;
+               hwmgr->thermal_controller.advanceFanControlParameters.usTMax
+                       = fiji_fan_table->usTMax;
+               hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode
+                       = fiji_fan_table->ucFanControlMode;
+               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM
+                       = fiji_fan_table->usFanPWMMax;
+               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity
+                       = 4836;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity
+                       = fiji_fan_table->usFanOutputSensitivity;
+               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM
+                       = fiji_fan_table->usFanRPMMax;
+               hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit
+                       = (fiji_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places.  SMC wants MHz. */
+               hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature
+                       = fiji_fan_table->ucTargetTemperature;
+               hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit
+                       = fiji_fan_table->ucMinimumPWMLimit;
+
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge
+                       = fiji_fan_table->usFanGainEdge;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot
+                       = fiji_fan_table->usFanGainHotspot;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid
+                       = fiji_fan_table->usFanGainLiquid;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc
+                       = fiji_fan_table->usFanGainVrVddc;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd
+                       = fiji_fan_table->usFanGainVrMvdd;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx
+                       = fiji_fan_table->usFanGainPlx;
+               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm
+                       = fiji_fan_table->usFanGainHbm;
+       }
+
+       return 0;
+}
+
+/**
+ * Private Function used during initialization.
+ * Inspect the PowerPlay table for obvious signs of corruption.
+ * @param hwmgr Pointer to the hardware manager.
+ * @param powerplay_table Pointer to the PowerPlay Table.
+ * @exception 2 if the powerplay table is incorrect.
+ */
+static int check_powerplay_tables(
+               struct pp_hwmgr *hwmgr,
+               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
+               )
+{
+       const ATOM_Tonga_State_Array *state_arrays;
+
+       state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)powerplay_table) +
+               le16_to_cpu(powerplay_table->usStateArrayOffset));
+
+       PP_ASSERT_WITH_CODE((ATOM_Tonga_TABLE_REVISION_TONGA <=
+               powerplay_table->sHeader.ucTableFormatRevision),
+               "Unsupported PPTable format!", return -1);
+       PP_ASSERT_WITH_CODE((0 != powerplay_table->usStateArrayOffset),
+               "State table is not set!", return -1);
+       PP_ASSERT_WITH_CODE((0 < powerplay_table->sHeader.usStructureSize),
+               "Invalid PowerPlay Table!", return -1);
+       PP_ASSERT_WITH_CODE((0 < state_arrays->ucNumEntries),
+               "Invalid PowerPlay Table!", return -1);
+
+       return 0;
+}
+
+int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
+{
+       int result = 0;
+       const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
+
+       hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v1_information), GFP_KERNEL);
+
+       PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable),
+                           "Failed to allocate hwmgr->pptable!", return -ENOMEM);
+
+       memset(hwmgr->pptable, 0x00, sizeof(struct phm_ppt_v1_information));
+
+       powerplay_table = get_powerplay_table(hwmgr);
+
+       PP_ASSERT_WITH_CODE((NULL != powerplay_table),
+               "Missing PowerPlay Table!", return -1);
+
+       result = check_powerplay_tables(hwmgr, powerplay_table);
+
+       PP_ASSERT_WITH_CODE((result == 0),
+                           "check_powerplay_tables failed", return result);
+
+       result = set_platform_caps(hwmgr,
+                                  le32_to_cpu(powerplay_table->ulPlatformCaps));
+
+       PP_ASSERT_WITH_CODE((result == 0),
+                           "set_platform_caps failed", return result);
+
+       result = init_thermal_controller(hwmgr, powerplay_table);
+
+       PP_ASSERT_WITH_CODE((result == 0),
+                           "init_thermal_controller failed", return result);
+
+       result = init_over_drive_limits(hwmgr, powerplay_table);
+
+       PP_ASSERT_WITH_CODE((result == 0),
+                           "init_over_drive_limits failed", return result);
+
+       result = init_clock_voltage_dependency(hwmgr, powerplay_table);
+
+       PP_ASSERT_WITH_CODE((result == 0),
+                           "init_clock_voltage_dependency failed", return result);
+
+       result = init_dpm_2_parameters(hwmgr, powerplay_table);
+
+       PP_ASSERT_WITH_CODE((result == 0),
+                           "init_dpm_2_parameters failed", return result);
+
+       return result;
+}
+
+int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
+{
+       struct phm_ppt_v1_information *pp_table_information =
+               (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+       kfree(pp_table_information->vdd_dep_on_sclk);
+       pp_table_information->vdd_dep_on_sclk = NULL;
+
+       kfree(pp_table_information->vdd_dep_on_mclk);
+       pp_table_information->vdd_dep_on_mclk = NULL;
+
+       kfree(pp_table_information->valid_mclk_values);
+       pp_table_information->valid_mclk_values = NULL;
+
+       kfree(pp_table_information->valid_sclk_values);
+       pp_table_information->valid_sclk_values = NULL;
+
+       kfree(pp_table_information->vddc_lookup_table);
+       pp_table_information->vddc_lookup_table = NULL;
+
+       kfree(pp_table_information->vddgfx_lookup_table);
+       pp_table_information->vddgfx_lookup_table = NULL;
+
+       kfree(pp_table_information->mm_dep_table);
+       pp_table_information->mm_dep_table = NULL;
+
+       kfree(pp_table_information->cac_dtp_table);
+       pp_table_information->cac_dtp_table = NULL;
+
+       kfree(hwmgr->dyn_state.cac_dtp_table);
+       hwmgr->dyn_state.cac_dtp_table = NULL;
+
+       kfree(pp_table_information->ppm_parameter_table);
+       pp_table_information->ppm_parameter_table = NULL;
+
+       kfree(pp_table_information->pcie_table);
+       pp_table_information->pcie_table = NULL;
+
+       kfree(hwmgr->pptable);
+       hwmgr->pptable = NULL;
+
+       return 0;
+}
+
+const struct pp_table_func pptable_v1_0_funcs = {
+       .pptable_init = pp_tables_v1_0_initialize,
+       .pptable_fini = pp_tables_v1_0_uninitialize,
+};
+
+int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr)
+{
+       ATOM_Tonga_State_Array const *state_arrays;
+       const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+
+       PP_ASSERT_WITH_CODE((NULL != pp_table),
+                       "Missing PowerPlay Table!", return -1);
+       PP_ASSERT_WITH_CODE((pp_table->sHeader.ucTableFormatRevision >=
+                       ATOM_Tonga_TABLE_REVISION_TONGA),
+                       "Incorrect PowerPlay table revision!", return -1);
+
+       state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)pp_table) +
+                       le16_to_cpu(pp_table->usStateArrayOffset));
+
+       return (uint32_t)(state_arrays->ucNumEntries);
+}
+
+/**
+* Private function to convert flags stored in the BIOS to software flags in PowerPlay.
+*/
+static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
+               uint16_t classification, uint16_t classification2)
+{
+       uint32_t result = 0;
+
+       if (classification & ATOM_PPLIB_CLASSIFICATION_BOOT)
+               result |= PP_StateClassificationFlag_Boot;
+
+       if (classification & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+               result |= PP_StateClassificationFlag_Thermal;
+
+       if (classification & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
+               result |= PP_StateClassificationFlag_LimitedPowerSource;
+
+       if (classification & ATOM_PPLIB_CLASSIFICATION_REST)
+               result |= PP_StateClassificationFlag_Rest;
+
+       if (classification & ATOM_PPLIB_CLASSIFICATION_FORCED)
+               result |= PP_StateClassificationFlag_Forced;
+
+       if (classification & ATOM_PPLIB_CLASSIFICATION_ACPI)
+               result |= PP_StateClassificationFlag_ACPI;
+
+       if (classification2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
+               result |= PP_StateClassificationFlag_LimitedPowerSource_2;
+
+       return result;
+}
+
+static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
+{
+       const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+       const ATOM_Tonga_VCE_State_Table *vce_state_table =
+                               (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset));
+
+       if (vce_state_table == NULL)
+               return 0;
+
+       return vce_state_table->ucNumEntries;
+}
+
+static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
+               struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+{
+       const ATOM_Tonga_VCE_State_Record *vce_state_record;
+       ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
+       ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
+       ATOM_Tonga_MM_Dependency_Record *mm_dep_record;
+       const ATOM_Tonga_POWERPLAYTABLE *pptable = get_powerplay_table(hwmgr);
+       const ATOM_Tonga_VCE_State_Table *vce_state_table = (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pptable)
+                                                         + le16_to_cpu(pptable->usVCEStateTableOffset));
+       const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = (ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long)pptable)
+                                                         + le16_to_cpu(pptable->usSclkDependencyTableOffset));
+       const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = (ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long)pptable)
+                                                         + le16_to_cpu(pptable->usMclkDependencyTableOffset));
+       const ATOM_Tonga_MM_Dependency_Table *mm_dep_table = (ATOM_Tonga_MM_Dependency_Table *)(((unsigned long)pptable)
+                                                         + le16_to_cpu(pptable->usMMDependencyTableOffset));
+
+       PP_ASSERT_WITH_CODE((i < vce_state_table->ucNumEntries),
+                        "Requested state entry ID is out of range!",
+                        return -EINVAL);
+
+       vce_state_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_VCE_State_Record,
+                                       entries, vce_state_table, i);
+       sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_SCLK_Dependency_Record,
+                                       entries, sclk_dep_table,
+                                       vce_state_record->ucSCLKIndex);
+       mm_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_MM_Dependency_Record,
+                                       entries, mm_dep_table,
+                                       vce_state_record->ucVCEClockIndex);
+       *flag = vce_state_record->ucFlag;
+
+       vce_state->evclk = mm_dep_record->ulEClk;
+       vce_state->ecclk = mm_dep_record->ulEClk;
+       vce_state->sclk = sclk_dep_record->ulSclk;
+
+       if (vce_state_record->ucMCLKIndex >= mclk_dep_table->ucNumEntries)
+               mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_MCLK_Dependency_Record,
+                                       entries, mclk_dep_table,
+                                       mclk_dep_table->ucNumEntries - 1);
+       else
+               mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                       ATOM_Tonga_MCLK_Dependency_Record,
+                                       entries, mclk_dep_table,
+                                       vce_state_record->ucMCLKIndex);
+
+       vce_state->mclk = mclk_dep_record->ulMclk;
+       return 0;
+}
+
+/**
+* Create a Power State out of an entry in the PowerPlay table.
+* This function is called by the hardware back-end.
+* @param hwmgr Pointer to the hardware manager.
+* @param entry_index The index of the entry to be extracted from the table.
+* @param power_state The address of the PowerState instance being created.
+* @return -1 if the entry cannot be retrieved.
+*/
+int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
+               uint32_t entry_index, struct pp_power_state *power_state,
+               int (*call_back_func)(struct pp_hwmgr *, void *,
+                               struct pp_power_state *, void *, uint32_t))
+{
+       int result = 0;
+       const ATOM_Tonga_State_Array *state_arrays;
+       const ATOM_Tonga_State *state_entry;
+       const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+       int i, j;
+       uint32_t flags = 0;
+
+       PP_ASSERT_WITH_CODE((NULL != pp_table), "Missing PowerPlay Table!", return -1;);
+       power_state->classification.bios_index = entry_index;
+
+       if (pp_table->sHeader.ucTableFormatRevision >=
+                       ATOM_Tonga_TABLE_REVISION_TONGA) {
+               state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)pp_table) +
+                               le16_to_cpu(pp_table->usStateArrayOffset));
+
+               PP_ASSERT_WITH_CODE((0 < pp_table->usStateArrayOffset),
+                               "Invalid PowerPlay Table State Array Offset.", return -1);
+               PP_ASSERT_WITH_CODE((0 < state_arrays->ucNumEntries),
+                               "Invalid PowerPlay Table State Array.", return -1);
+               PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries),
+                               "Invalid PowerPlay Table State Array Entry.", return -1);
+
+               state_entry = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+                                               ATOM_Tonga_State, entries,
+                                               state_arrays, entry_index);
+
+               result = call_back_func(hwmgr, (void *)state_entry, power_state,
+                               (void *)pp_table,
+                               make_classification_flags(hwmgr,
+                                       le16_to_cpu(state_entry->usClassification),
+                                       le16_to_cpu(state_entry->usClassification2)));
+       }
+
+       if (!result && (power_state->classification.flags &
+                       PP_StateClassificationFlag_Boot))
+               result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware));
+
+       hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
+
+       if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+               for (j = 0; j < i; j++)
+                       ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
+       }
+
+       return result;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
new file mode 100644 (file)
index 0000000..b9710ab
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _PROCESSPPTABLES_V1_0_H
+#define _PROCESSPPTABLES_V1_0_H
+
+#include "hwmgr.h"
+
+extern const struct pp_table_func pptable_v1_0_funcs;
+extern int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr);
+extern int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t entry_index,
+               struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
+                               struct pp_power_state *, void *, uint32_t));
+
+#endif
+
index 6c321b0..ccf7ebe 100644 (file)
@@ -1523,7 +1523,7 @@ int get_number_of_vce_state_table_entries(
 
 int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
                                                        unsigned long i,
-                                                       struct PP_VCEState *vce_state,
+                                                       struct pp_vce_state *vce_state,
                                                        void **clock_info,
                                                        unsigned long *flag)
 {
index 7f9ba7f..582d04a 100644 (file)
@@ -29,8 +29,8 @@
 #include "tonga_hwmgr.h"
 #include "pptable.h"
 #include "processpptables.h"
-#include "tonga_processpptables.h"
-#include "tonga_pptable.h"
+#include "process_pptables_v1_0.h"
+#include "pptable_v1_0.h"
 #include "pp_debug.h"
 #include "tonga_ppsmc.h"
 #include "cgs_common.h"
@@ -202,6 +202,7 @@ uint8_t tonga_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
        return i - 1;
 }
 
+
 /**
  * @brief PhwTonga_GetVoltageOrder
  *  Returns index of requested voltage record in lookup(table)
@@ -229,7 +230,7 @@ uint8_t tonga_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
        return i-1;
 }
 
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
+static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
 {
        /*
         * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
@@ -334,7 +335,7 @@ void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
 
 }
 
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
 {
        tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
 
@@ -771,7 +772,7 @@ int tonga_set_boot_state(struct pp_hwmgr *hwmgr)
  * @param    hwmgr  the address of the powerplay hardware manager.
  * @return   always 0
  */
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
+static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
 {
        tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
        struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
@@ -1314,15 +1315,6 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
-/**
- * Convert a voltage value in mv unit to VID number required by SMU firmware
- */
-static uint8_t convert_to_vid(uint16_t vddc)
-{
-       return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
-
 /**
  * Preparation of vddc and vddgfx CAC tables for SMC.
  *
@@ -2894,7 +2886,7 @@ int tonga_populate_smc_initial_state(struct pp_hwmgr *hwmgr,
  * @param    pInput  the pointer to input data (PowerState)
  * @return   always 0
  */
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
+static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
 {
        int result;
        tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
@@ -3989,7 +3981,7 @@ int tonga_set_valid_flag(phw_tonga_mc_reg_table *table)
        return 0;
 }
 
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
 {
        int result;
        tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
@@ -4326,6 +4318,79 @@ int tonga_program_voting_clients(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static void tonga_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
+{
+       bool protection;
+       enum DPM_EVENT_SRC src;
+
+       switch (sources) {
+       default:
+               printk(KERN_ERR "Unknown throttling event sources.");
+               /* fall through */
+       case 0:
+               protection = false;
+               /* src is unused */
+               break;
+       case (1 << PHM_AutoThrottleSource_Thermal):
+               protection = true;
+               src = DPM_EVENT_SRC_DIGITAL;
+               break;
+       case (1 << PHM_AutoThrottleSource_External):
+               protection = true;
+               src = DPM_EVENT_SRC_EXTERNAL;
+               break;
+       case (1 << PHM_AutoThrottleSource_External) |
+                       (1 << PHM_AutoThrottleSource_Thermal):
+               protection = true;
+               src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
+               break;
+       }
+       /* Order matters - don't enable thermal protection for the wrong source. */
+       if (protection) {
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
+                               DPM_EVENT_SRC, src);
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+                               THERMAL_PROTECTION_DIS,
+                               !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                                               PHM_PlatformCaps_ThermalController));
+       } else
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+                               THERMAL_PROTECTION_DIS, 1);
+}
+
+static int tonga_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+               PHM_AutoThrottleSource source)
+{
+       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+       if (!(data->active_auto_throttle_sources & (1 << source))) {
+               data->active_auto_throttle_sources |= 1 << source;
+               tonga_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+       }
+       return 0;
+}
+
+static int tonga_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+       return tonga_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+static int tonga_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+               PHM_AutoThrottleSource source)
+{
+       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+       if (data->active_auto_throttle_sources & (1 << source)) {
+               data->active_auto_throttle_sources &= ~(1 << source);
+               tonga_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+       }
+       return 0;
+}
+
+static int tonga_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+       return tonga_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
 
 int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
@@ -4409,6 +4474,10 @@ int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE((tmp_result == 0),
                        "Failed to power control set level!", result = tmp_result);
 
+       tmp_result = tonga_enable_thermal_auto_throttle(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to enable thermal auto throttle!", result = tmp_result);
+
        return result;
 }
 
@@ -4420,6 +4489,10 @@ int tonga_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                "SMC is still running!", return 0);
 
+       tmp_result = tonga_disable_thermal_auto_throttle(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable thermal auto throttle!", result = tmp_result);
+
        tmp_result = tonga_stop_dpm(hwmgr);
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                "Failed to stop DPM!", result = tmp_result);
@@ -5090,7 +5163,7 @@ static int tonga_get_pp_table_entry(struct pp_hwmgr *hwmgr,
 
        tonga_ps = cast_phw_tonga_power_state(&(ps->hardware));
 
-       result = tonga_get_powerplay_table_entry(hwmgr, entry_index, ps,
+       result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, ps,
                        tonga_get_pp_table_entry_callback_func);
 
        /* This is the earliest time we have all the dependency table and the VBIOS boot state
@@ -6254,7 +6327,7 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
        .get_sclk = tonga_dpm_get_sclk,
        .patch_boot_state = tonga_dpm_patch_boot_state,
        .get_pp_table_entry = tonga_get_pp_table_entry,
-       .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
+       .get_num_of_pp_table_entries = get_number_of_powerplay_table_entries_v1_0,
        .print_current_perforce_level = tonga_print_current_perforce_level,
        .powerdown_uvd = tonga_phm_powerdown_uvd,
        .powergate_uvd = tonga_phm_powergate_uvd,
@@ -6290,7 +6363,7 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
 int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
        hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
-       hwmgr->pptable_func = &tonga_pptable_funcs;
+       hwmgr->pptable_func = &pptable_v1_0_funcs;
        pp_tonga_thermal_initialize(hwmgr);
        return 0;
 }
index 9496ade..24d9a05 100644 (file)
@@ -56,9 +56,6 @@ void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
        else
                tonga_hwmgr->power_tune_defaults = &tonga_power_tune_data_set_array[0];
 
-       /* Assume disabled */
-       phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerContainment);
        phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_CAC);
        phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
deleted file mode 100644 (file)
index f127198..0000000
+++ /dev/null
@@ -1,436 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef TONGA_PPTABLE_H
-#define TONGA_PPTABLE_H
-
-/** \file
- * This is a PowerPlay table header file
- */
-#pragma pack(push, 1)
-
-#include "hwmgr.h"
-
-#define ATOM_TONGA_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
-#define ATOM_TONGA_PP_FANPARAMETERS_NOFAN                                 0x80    /* No fan is connected to this controller. */
-
-#define ATOM_TONGA_PP_THERMALCONTROLLER_NONE      0
-#define ATOM_TONGA_PP_THERMALCONTROLLER_LM96163   17
-#define ATOM_TONGA_PP_THERMALCONTROLLER_TONGA     21
-#define ATOM_TONGA_PP_THERMALCONTROLLER_FIJI      22
-
-/*
- * Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
- * We probably should reserve the bit 0x80 for this use.
- * To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
- * The driver can pick the correct internal controller based on the ASIC.
- */
-
-#define ATOM_TONGA_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    /* ADT7473 Fan Control + Internal Thermal Controller */
-#define ATOM_TONGA_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    /* EMC2103 Fan Control + Internal Thermal Controller */
-
-/*/* ATOM_TONGA_POWERPLAYTABLE::ulPlatformCaps */
-#define ATOM_TONGA_PP_PLATFORM_CAP_VDDGFX_CONTROL              0x1            /* This cap indicates whether vddgfx will be a separated power rail. */
-#define ATOM_TONGA_PP_PLATFORM_CAP_POWERPLAY                   0x2            /* This cap indicates whether this is a mobile part and CCC need to show Powerplay page. */
-#define ATOM_TONGA_PP_PLATFORM_CAP_SBIOSPOWERSOURCE            0x4            /* This cap indicates whether power source notificaiton is done by SBIOS directly. */
-#define ATOM_TONGA_PP_PLATFORM_CAP_DISABLE_VOLTAGE_ISLAND      0x8            /* Enable the option to overwrite voltage island feature to be disabled, regardless of VddGfx power rail support. */
-#define ____RETIRE16____                                0x10
-#define ATOM_TONGA_PP_PLATFORM_CAP_HARDWAREDC                 0x20            /* This cap indicates whether power source notificaiton is done by GPIO directly. */
-#define ____RETIRE64____                                0x40
-#define ____RETIRE128____                               0x80
-#define ____RETIRE256____                              0x100
-#define ____RETIRE512____                              0x200
-#define ____RETIRE1024____                             0x400
-#define ____RETIRE2048____                             0x800
-#define ATOM_TONGA_PP_PLATFORM_CAP_MVDD_CONTROL             0x1000            /* This cap indicates dynamic MVDD is required. Uncheck to disable it. */
-#define ____RETIRE2000____                            0x2000
-#define ____RETIRE4000____                            0x4000
-#define ATOM_TONGA_PP_PLATFORM_CAP_VDDCI_CONTROL            0x8000            /* This cap indicates dynamic VDDCI is required. Uncheck to disable it. */
-#define ____RETIRE10000____                          0x10000
-#define ATOM_TONGA_PP_PLATFORM_CAP_BACO                    0x20000            /* Enable to indicate the driver supports BACO state. */
-
-#define ATOM_TONGA_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17         0x100000     /* Enable to indicate the driver supports thermal2GPIO17. */
-#define ATOM_TONGA_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL  0x1000000     /* Enable to indicate if thermal and PCC are sharing the same GPIO */
-#define ATOM_TONGA_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE       0x2000000
-
-/* ATOM_PPLIB_NONCLOCK_INFO::usClassification */
-#define ATOM_PPLIB_CLASSIFICATION_UI_MASK               0x0007
-#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT              0
-#define ATOM_PPLIB_CLASSIFICATION_UI_NONE               0
-#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY            1
-#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED           3
-#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE        5
-/* 2, 4, 6, 7 are reserved */
-
-#define ATOM_PPLIB_CLASSIFICATION_BOOT                  0x0008
-#define ATOM_PPLIB_CLASSIFICATION_THERMAL               0x0010
-#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE    0x0020
-#define ATOM_PPLIB_CLASSIFICATION_REST                  0x0040
-#define ATOM_PPLIB_CLASSIFICATION_FORCED                0x0080
-#define ATOM_PPLIB_CLASSIFICATION_ACPI                  0x1000
-
-/* ATOM_PPLIB_NONCLOCK_INFO::usClassification2 */
-#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
-
-#define ATOM_Tonga_DISALLOW_ON_DC                       0x00004000
-#define ATOM_Tonga_ENABLE_VARIBRIGHT                    0x00008000
-
-#define ATOM_Tonga_TABLE_REVISION_TONGA                 7
-
-typedef struct _ATOM_Tonga_POWERPLAYTABLE {
-       ATOM_COMMON_TABLE_HEADER sHeader;
-
-       UCHAR  ucTableRevision;
-       USHORT usTableSize;                                             /*the size of header structure */
-
-       ULONG   ulGoldenPPID;
-       ULONG   ulGoldenRevision;
-       USHORT  usFormatID;
-
-       USHORT  usVoltageTime;                                   /*in microseconds */
-       ULONG   ulPlatformCaps;                                   /*See ATOM_Tonga_CAPS_* */
-
-       ULONG   ulMaxODEngineClock;                        /*For Overdrive.  */
-       ULONG   ulMaxODMemoryClock;                        /*For Overdrive. */
-
-       USHORT  usPowerControlLimit;
-       USHORT  usUlvVoltageOffset;                               /*in mv units */
-
-       USHORT  usStateArrayOffset;                               /*points to ATOM_Tonga_State_Array */
-       USHORT  usFanTableOffset;                                 /*points to ATOM_Tonga_Fan_Table */
-       USHORT  usThermalControllerOffset;                 /*points to ATOM_Tonga_Thermal_Controller */
-       USHORT  usReserv;                                                  /*CustomThermalPolicy removed for Tonga. Keep this filed as reserved. */
-
-       USHORT  usMclkDependencyTableOffset;       /*points to ATOM_Tonga_MCLK_Dependency_Table */
-       USHORT  usSclkDependencyTableOffset;       /*points to ATOM_Tonga_SCLK_Dependency_Table */
-       USHORT  usVddcLookupTableOffset;                   /*points to ATOM_Tonga_Voltage_Lookup_Table */
-       USHORT  usVddgfxLookupTableOffset;              /*points to ATOM_Tonga_Voltage_Lookup_Table */
-
-       USHORT  usMMDependencyTableOffset;                /*points to ATOM_Tonga_MM_Dependency_Table */
-
-       USHORT  usVCEStateTableOffset;                     /*points to ATOM_Tonga_VCE_State_Table; */
-
-       USHORT  usPPMTableOffset;                                 /*points to ATOM_Tonga_PPM_Table */
-       USHORT  usPowerTuneTableOffset;                   /*points to ATOM_PowerTune_Table */
-
-       USHORT  usHardLimitTableOffset;                    /*points to ATOM_Tonga_Hard_Limit_Table */
-
-       USHORT  usPCIETableOffset;                                /*points to ATOM_Tonga_PCIE_Table */
-
-       USHORT  usGPIOTableOffset;                                /*points to ATOM_Tonga_GPIO_Table */
-
-       USHORT  usReserved[6];                                     /*TODO: modify reserved size to fit structure aligning */
-} ATOM_Tonga_POWERPLAYTABLE;
-
-typedef struct _ATOM_Tonga_State {
-       UCHAR  ucEngineClockIndexHigh;
-       UCHAR  ucEngineClockIndexLow;
-
-       UCHAR  ucMemoryClockIndexHigh;
-       UCHAR  ucMemoryClockIndexLow;
-
-       UCHAR  ucPCIEGenLow;
-       UCHAR  ucPCIEGenHigh;
-
-       UCHAR  ucPCIELaneLow;
-       UCHAR  ucPCIELaneHigh;
-
-       USHORT usClassification;
-       ULONG ulCapsAndSettings;
-       USHORT usClassification2;
-       UCHAR  ucUnused[4];
-} ATOM_Tonga_State;
-
-typedef struct _ATOM_Tonga_State_Array {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;             /* Number of entries. */
-       ATOM_Tonga_State states[1];     /* Dynamically allocate entries. */
-} ATOM_Tonga_State_Array;
-
-typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
-       UCHAR  ucVddcInd;       /* Vddc voltage */
-       USHORT usVddci;
-       USHORT usVddgfxOffset;  /* Offset relative to Vddc voltage */
-       USHORT usMvdd;
-       ULONG ulMclk;
-       USHORT usReserved;
-} ATOM_Tonga_MCLK_Dependency_Record;
-
-typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                                                             /* Number of entries. */
-       ATOM_Tonga_MCLK_Dependency_Record entries[1];                           /* Dynamically allocate entries. */
-} ATOM_Tonga_MCLK_Dependency_Table;
-
-typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
-       UCHAR  ucVddInd;                                                                                        /* Base voltage */
-       USHORT usVddcOffset;                                                                            /* Offset relative to base voltage */
-       ULONG ulSclk;
-       USHORT usEdcCurrent;
-       UCHAR  ucReliabilityTemperature;
-       UCHAR  ucCKSVOffsetandDisable;                                                    /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
-} ATOM_Tonga_SCLK_Dependency_Record;
-
-typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                                                             /* Number of entries. */
-       ATOM_Tonga_SCLK_Dependency_Record entries[1];                            /* Dynamically allocate entries. */
-} ATOM_Tonga_SCLK_Dependency_Table;
-
-typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
-       UCHAR  ucVddInd;                                                                                        /* Base voltage */
-       USHORT usVddcOffset;                                                                            /* Offset relative to base voltage */
-       ULONG ulSclk;
-       USHORT usEdcCurrent;
-       UCHAR  ucReliabilityTemperature;
-       UCHAR  ucCKSVOffsetandDisable;                  /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
-       ULONG  ulSclkOffset;
-} ATOM_Polaris_SCLK_Dependency_Record;
-
-typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                                     /* Number of entries. */
-       ATOM_Polaris_SCLK_Dependency_Record entries[1];                          /* Dynamically allocate entries. */
-} ATOM_Polaris_SCLK_Dependency_Table;
-
-typedef struct _ATOM_Tonga_PCIE_Record {
-       UCHAR ucPCIEGenSpeed;
-       UCHAR usPCIELaneWidth;
-       UCHAR ucReserved[2];
-} ATOM_Tonga_PCIE_Record;
-
-typedef struct _ATOM_Tonga_PCIE_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                                                             /* Number of entries. */
-       ATOM_Tonga_PCIE_Record entries[1];                                                      /* Dynamically allocate entries. */
-} ATOM_Tonga_PCIE_Table;
-
-typedef struct _ATOM_Polaris10_PCIE_Record {
-       UCHAR ucPCIEGenSpeed;
-       UCHAR usPCIELaneWidth;
-       UCHAR ucReserved[2];
-       ULONG ulPCIE_Sclk;
-} ATOM_Polaris10_PCIE_Record;
-
-typedef struct _ATOM_Polaris10_PCIE_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                         /* Number of entries. */
-       ATOM_Polaris10_PCIE_Record entries[1];                      /* Dynamically allocate entries. */
-} ATOM_Polaris10_PCIE_Table;
-
-
-typedef struct _ATOM_Tonga_MM_Dependency_Record {
-       UCHAR   ucVddcInd;                                                                                       /* VDDC voltage */
-       USHORT  usVddgfxOffset;                                                                   /* Offset relative to VDDC voltage */
-       ULONG  ulDClk;                                                                                          /* UVD D-clock */
-       ULONG  ulVClk;                                                                                          /* UVD V-clock */
-       ULONG  ulEClk;                                                                                          /* VCE clock */
-       ULONG  ulAClk;                                                                                          /* ACP clock */
-       ULONG  ulSAMUClk;                                                                                       /* SAMU clock */
-} ATOM_Tonga_MM_Dependency_Record;
-
-typedef struct _ATOM_Tonga_MM_Dependency_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                                                             /* Number of entries. */
-       ATOM_Tonga_MM_Dependency_Record entries[1];                        /* Dynamically allocate entries. */
-} ATOM_Tonga_MM_Dependency_Table;
-
-typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
-       USHORT usVdd;                                                                                      /* Base voltage */
-       USHORT usCACLow;
-       USHORT usCACMid;
-       USHORT usCACHigh;
-} ATOM_Tonga_Voltage_Lookup_Record;
-
-typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;                                                                             /* Number of entries. */
-       ATOM_Tonga_Voltage_Lookup_Record entries[1];                            /* Dynamically allocate entries. */
-} ATOM_Tonga_Voltage_Lookup_Table;
-
-typedef struct _ATOM_Tonga_Fan_Table {
-       UCHAR   ucRevId;                                                 /* Change this if the table format changes or version changes so that the other fields are not the same. */
-       UCHAR   ucTHyst;                                                 /* Temperature hysteresis. Integer. */
-       USHORT  usTMin;                                                  /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */
-       USHORT  usTMed;                                                  /* The middle temperature where we change slopes. */
-       USHORT  usTHigh;                                                 /* The high point above TMed for adjusting the second slope. */
-       USHORT  usPWMMin;                                                /* The minimum PWM value in percent (0.01% increments). */
-       USHORT  usPWMMed;                                                /* The PWM value (in percent) at TMed. */
-       USHORT  usPWMHigh;                                               /* The PWM value at THigh. */
-       USHORT  usTMax;                                                  /* The max temperature */
-       UCHAR   ucFanControlMode;                                 /* Legacy or Fuzzy Fan mode */
-       USHORT  usFanPWMMax;                                      /* Maximum allowed fan power in percent */
-       USHORT  usFanOutputSensitivity;           /* Sensitivity of fan reaction to temepature changes */
-       USHORT  usFanRPMMax;                                      /* The default value in RPM */
-       ULONG  ulMinFanSCLKAcousticLimit;          /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */
-       UCHAR   ucTargetTemperature;                     /* Advanced fan controller target temperature. */
-       UCHAR   ucMinimumPWMLimit;                        /* The minimum PWM that the advanced fan controller can set.  This should be set to the highest PWM that will run the fan at its lowest RPM. */
-       USHORT  usReserved;
-} ATOM_Tonga_Fan_Table;
-
-typedef struct _ATOM_Fiji_Fan_Table {
-       UCHAR   ucRevId;                                                 /* Change this if the table format changes or version changes so that the other fields are not the same. */
-       UCHAR   ucTHyst;                                                 /* Temperature hysteresis. Integer. */
-       USHORT  usTMin;                                                  /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */
-       USHORT  usTMed;                                                  /* The middle temperature where we change slopes. */
-       USHORT  usTHigh;                                                 /* The high point above TMed for adjusting the second slope. */
-       USHORT  usPWMMin;                                                /* The minimum PWM value in percent (0.01% increments). */
-       USHORT  usPWMMed;                                                /* The PWM value (in percent) at TMed. */
-       USHORT  usPWMHigh;                                               /* The PWM value at THigh. */
-       USHORT  usTMax;                                                  /* The max temperature */
-       UCHAR   ucFanControlMode;                                 /* Legacy or Fuzzy Fan mode */
-       USHORT  usFanPWMMax;                                      /* Maximum allowed fan power in percent */
-       USHORT  usFanOutputSensitivity;           /* Sensitivity of fan reaction to temepature changes */
-       USHORT  usFanRPMMax;                                      /* The default value in RPM */
-       ULONG  ulMinFanSCLKAcousticLimit;               /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */
-       UCHAR   ucTargetTemperature;                     /* Advanced fan controller target temperature. */
-       UCHAR   ucMinimumPWMLimit;                        /* The minimum PWM that the advanced fan controller can set.  This should be set to the highest PWM that will run the fan at its lowest RPM. */
-       USHORT  usFanGainEdge;
-       USHORT  usFanGainHotspot;
-       USHORT  usFanGainLiquid;
-       USHORT  usFanGainVrVddc;
-       USHORT  usFanGainVrMvdd;
-       USHORT  usFanGainPlx;
-       USHORT  usFanGainHbm;
-       USHORT  usReserved;
-} ATOM_Fiji_Fan_Table;
-
-typedef struct _ATOM_Tonga_Thermal_Controller {
-       UCHAR ucRevId;
-       UCHAR ucType;              /* one of ATOM_TONGA_PP_THERMALCONTROLLER_* */
-       UCHAR ucI2cLine;                /* as interpreted by DAL I2C */
-       UCHAR ucI2cAddress;
-       UCHAR ucFanParameters;  /* Fan Control Parameters. */
-       UCHAR ucFanMinRPM;       /* Fan Minimum RPM (hundreds) -- for display purposes only. */
-       UCHAR ucFanMaxRPM;       /* Fan Maximum RPM (hundreds) -- for display purposes only. */
-       UCHAR ucReserved;
-       UCHAR ucFlags;             /* to be defined */
-} ATOM_Tonga_Thermal_Controller;
-
-typedef struct _ATOM_Tonga_VCE_State_Record {
-       UCHAR  ucVCEClockIndex; /*index into usVCEDependencyTableOffset of 'ATOM_Tonga_MM_Dependency_Table' type */
-       UCHAR  ucFlag;          /* 2 bits indicates memory p-states */
-       UCHAR  ucSCLKIndex;             /*index into ATOM_Tonga_SCLK_Dependency_Table */
-       UCHAR  ucMCLKIndex;             /*index into ATOM_Tonga_MCLK_Dependency_Table */
-} ATOM_Tonga_VCE_State_Record;
-
-typedef struct _ATOM_Tonga_VCE_State_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;
-       ATOM_Tonga_VCE_State_Record entries[1];
-} ATOM_Tonga_VCE_State_Table;
-
-typedef struct _ATOM_Tonga_PowerTune_Table {
-       UCHAR  ucRevId;
-       USHORT usTDP;
-       USHORT usConfigurableTDP;
-       USHORT usTDC;
-       USHORT usBatteryPowerLimit;
-       USHORT usSmallPowerLimit;
-       USHORT usLowCACLeakage;
-       USHORT usHighCACLeakage;
-       USHORT usMaximumPowerDeliveryLimit;
-       USHORT usTjMax;
-       USHORT usPowerTuneDataSetID;
-       USHORT usEDCLimit;
-       USHORT usSoftwareShutdownTemp;
-       USHORT usClockStretchAmount;
-       USHORT usReserve[2];
-} ATOM_Tonga_PowerTune_Table;
-
-typedef struct _ATOM_Fiji_PowerTune_Table {
-       UCHAR  ucRevId;
-       USHORT usTDP;
-       USHORT usConfigurableTDP;
-       USHORT usTDC;
-       USHORT usBatteryPowerLimit;
-       USHORT usSmallPowerLimit;
-       USHORT usLowCACLeakage;
-       USHORT usHighCACLeakage;
-       USHORT usMaximumPowerDeliveryLimit;
-       USHORT usTjMax;  /* For Fiji, this is also usTemperatureLimitEdge; */
-       USHORT usPowerTuneDataSetID;
-       USHORT usEDCLimit;
-       USHORT usSoftwareShutdownTemp;
-       USHORT usClockStretchAmount;
-       USHORT usTemperatureLimitHotspot;  /*The following are added for Fiji */
-       USHORT usTemperatureLimitLiquid1;
-       USHORT usTemperatureLimitLiquid2;
-       USHORT usTemperatureLimitVrVddc;
-       USHORT usTemperatureLimitVrMvdd;
-       USHORT usTemperatureLimitPlx;
-       UCHAR  ucLiquid1_I2C_address;  /*Liquid */
-       UCHAR  ucLiquid2_I2C_address;
-       UCHAR  ucLiquid_I2C_Line;
-       UCHAR  ucVr_I2C_address;        /*VR */
-       UCHAR  ucVr_I2C_Line;
-       UCHAR  ucPlx_I2C_address;  /*PLX */
-       UCHAR  ucPlx_I2C_Line;
-       USHORT usReserved;
-} ATOM_Fiji_PowerTune_Table;
-
-#define ATOM_PPM_A_A    1
-#define ATOM_PPM_A_I    2
-typedef struct _ATOM_Tonga_PPM_Table {
-       UCHAR   ucRevId;
-       UCHAR   ucPpmDesign;              /*A+I or A+A */
-       USHORT  usCpuCoreNumber;
-       ULONG  ulPlatformTDP;
-       ULONG  ulSmallACPlatformTDP;
-       ULONG  ulPlatformTDC;
-       ULONG  ulSmallACPlatformTDC;
-       ULONG  ulApuTDP;
-       ULONG  ulDGpuTDP;
-       ULONG  ulDGpuUlvPower;
-       ULONG  ulTjmax;
-} ATOM_Tonga_PPM_Table;
-
-typedef struct _ATOM_Tonga_Hard_Limit_Record {
-       ULONG  ulSCLKLimit;
-       ULONG  ulMCLKLimit;
-       USHORT  usVddcLimit;
-       USHORT  usVddciLimit;
-       USHORT  usVddgfxLimit;
-} ATOM_Tonga_Hard_Limit_Record;
-
-typedef struct _ATOM_Tonga_Hard_Limit_Table {
-       UCHAR ucRevId;
-       UCHAR ucNumEntries;
-       ATOM_Tonga_Hard_Limit_Record entries[1];
-} ATOM_Tonga_Hard_Limit_Table;
-
-typedef struct _ATOM_Tonga_GPIO_Table {
-       UCHAR  ucRevId;
-       UCHAR  ucVRHotTriggeredSclkDpmIndex;            /* If VRHot signal is triggered SCLK will be limited to this DPM level */
-       UCHAR  ucReserve[5];
-} ATOM_Tonga_GPIO_Table;
-
-typedef struct _PPTable_Generic_SubTable_Header {
-       UCHAR  ucRevId;
-} PPTable_Generic_SubTable_Header;
-
-
-#pragma pack(pop)
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
deleted file mode 100644 (file)
index a0ffd4a..0000000
+++ /dev/null
@@ -1,1213 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include "tonga_processpptables.h"
-#include "ppatomctrl.h"
-#include "atombios.h"
-#include "pp_debug.h"
-#include "hwmgr.h"
-#include "cgs_common.h"
-#include "tonga_pptable.h"
-
-/**
- * Private Function used during initialization.
- * @param hwmgr Pointer to the hardware manager.
- * @param setIt A flag indication if the capability should be set (TRUE) or reset (FALSE).
- * @param cap Which capability to set/reset.
- */
-static void set_hw_cap(struct pp_hwmgr *hwmgr, bool setIt, enum phm_platform_caps cap)
-{
-       if (setIt)
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap);
-       else
-               phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap);
-}
-
-
-/**
- * Private Function used during initialization.
- * @param hwmgr Pointer to the hardware manager.
- * @param powerplay_caps the bit array (from BIOS) of capability bits.
- * @exception the current implementation always returns 1.
- */
-static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps)
-{
-       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE16____),
-               "ATOM_PP_PLATFORM_CAP_ASPM_L1 is not supported!", continue);
-       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE64____),
-               "ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY is not supported!", continue);
-       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE512____),
-               "ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL is not supported!", continue);
-       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE1024____),
-               "ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 is not supported!", continue);
-       PP_ASSERT_WITH_CODE((~powerplay_caps & ____RETIRE2048____),
-               "ATOM_PP_PLATFORM_CAP_HTLINKCONTROL is not supported!", continue);
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_POWERPLAY),
-                       PHM_PlatformCaps_PowerPlaySupport
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_SBIOSPOWERSOURCE),
-                       PHM_PlatformCaps_BiosPowerSourceControl
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_HARDWAREDC),
-                       PHM_PlatformCaps_AutomaticDCTransition
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_MVDD_CONTROL),
-                       PHM_PlatformCaps_EnableMVDDControl
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_VDDCI_CONTROL),
-                       PHM_PlatformCaps_ControlVDDCI
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_VDDGFX_CONTROL),
-                       PHM_PlatformCaps_ControlVDDGFX
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_BACO),
-                       PHM_PlatformCaps_BACO
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_CAP_DISABLE_VOLTAGE_ISLAND),
-                       PHM_PlatformCaps_DisableVoltageIsland
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL),
-                       PHM_PlatformCaps_CombinePCCWithThermalSignal
-                 );
-
-       set_hw_cap(
-                       hwmgr,
-                       0 != (powerplay_caps & ATOM_TONGA_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE),
-                       PHM_PlatformCaps_LoadPostProductionFirmware
-                 );
-
-       return 0;
-}
-
-/**
- * Private Function to get the PowerPlay Table Address.
- */
-const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
-{
-       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-
-       u16 size;
-       u8 frev, crev;
-       void *table_address = (void *)hwmgr->soft_pp_table;
-
-       if (!table_address) {
-               table_address = (ATOM_Tonga_POWERPLAYTABLE *)
-                               cgs_atom_get_data_table(hwmgr->device,
-                                               index, &size, &frev, &crev);
-               hwmgr->soft_pp_table = table_address;   /*Cache the result in RAM.*/
-               hwmgr->soft_pp_table_size = size;
-       }
-
-       return table_address;
-}
-
-static int get_vddc_lookup_table(
-               struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_voltage_lookup_table **lookup_table,
-               const ATOM_Tonga_Voltage_Lookup_Table   *vddc_lookup_pp_tables,
-               uint32_t        max_levels
-               )
-{
-       uint32_t table_size, i;
-       phm_ppt_v1_voltage_lookup_table *table;
-
-       PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries),
-               "Invalid CAC Leakage PowerPlay Table!", return 1);
-
-       table_size = sizeof(uint32_t) +
-               sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
-
-       table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == table)
-               return -ENOMEM;
-
-       memset(table, 0x00, table_size);
-
-       table->count = vddc_lookup_pp_tables->ucNumEntries;
-
-       for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
-               table->entries[i].us_calculated = 0;
-               table->entries[i].us_vdd =
-                       vddc_lookup_pp_tables->entries[i].usVdd;
-               table->entries[i].us_cac_low =
-                       vddc_lookup_pp_tables->entries[i].usCACLow;
-               table->entries[i].us_cac_mid =
-                       vddc_lookup_pp_tables->entries[i].usCACMid;
-               table->entries[i].us_cac_high =
-                       vddc_lookup_pp_tables->entries[i].usCACHigh;
-       }
-
-       *lookup_table = table;
-
-       return 0;
-}
-
-/**
- * Private Function used during initialization.
- * Initialize Platform Power Management Parameter table
- * @param hwmgr Pointer to the hardware manager.
- * @param atom_ppm_table Pointer to PPM table in VBIOS
- */
-static int get_platform_power_management_table(
-               struct pp_hwmgr *hwmgr,
-               ATOM_Tonga_PPM_Table *atom_ppm_table)
-{
-       struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
-       struct phm_ppt_v1_information *pp_table_information =
-               (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (NULL == ptr)
-               return -ENOMEM;
-
-       ptr->ppm_design
-               = atom_ppm_table->ucPpmDesign;
-       ptr->cpu_core_number
-               = atom_ppm_table->usCpuCoreNumber;
-       ptr->platform_tdp
-               = atom_ppm_table->ulPlatformTDP;
-       ptr->small_ac_platform_tdp
-               = atom_ppm_table->ulSmallACPlatformTDP;
-       ptr->platform_tdc
-               = atom_ppm_table->ulPlatformTDC;
-       ptr->small_ac_platform_tdc
-               = atom_ppm_table->ulSmallACPlatformTDC;
-       ptr->apu_tdp
-               = atom_ppm_table->ulApuTDP;
-       ptr->dgpu_tdp
-               = atom_ppm_table->ulDGpuTDP;
-       ptr->dgpu_ulv_power
-               = atom_ppm_table->ulDGpuUlvPower;
-       ptr->tj_max
-               = atom_ppm_table->ulTjmax;
-
-       pp_table_information->ppm_parameter_table = ptr;
-
-       return 0;
-}
-
-/**
- * Private Function used during initialization.
- * Initialize TDP limits for DPM2
- * @param hwmgr Pointer to the hardware manager.
- * @param powerplay_table Pointer to the PowerPlay Table.
- */
-static int init_dpm_2_parameters(
-               struct pp_hwmgr *hwmgr,
-               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
-               )
-{
-       int result = 0;
-       struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       ATOM_Tonga_PPM_Table *atom_ppm_table;
-       uint32_t disable_ppm = 0;
-       uint32_t disable_power_control = 0;
-
-       pp_table_information->us_ulv_voltage_offset =
-               le16_to_cpu(powerplay_table->usUlvVoltageOffset);
-
-       pp_table_information->ppm_parameter_table = NULL;
-       pp_table_information->vddc_lookup_table = NULL;
-       pp_table_information->vddgfx_lookup_table = NULL;
-       /* TDP limits */
-       hwmgr->platform_descriptor.TDPODLimit =
-               le16_to_cpu(powerplay_table->usPowerControlLimit);
-       hwmgr->platform_descriptor.TDPAdjustment = 0;
-       hwmgr->platform_descriptor.VidAdjustment = 0;
-       hwmgr->platform_descriptor.VidAdjustmentPolarity = 0;
-       hwmgr->platform_descriptor.VidMinLimit = 0;
-       hwmgr->platform_descriptor.VidMaxLimit = 1500000;
-       hwmgr->platform_descriptor.VidStep = 6250;
-
-       disable_power_control = 0;
-       if (0 == disable_power_control) {
-               /* enable TDP overdrive (PowerControl) feature as well if supported */
-               if (hwmgr->platform_descriptor.TDPODLimit != 0)
-                       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_PowerControl);
-       }
-
-       if (0 != powerplay_table->usVddcLookupTableOffset) {
-               const ATOM_Tonga_Voltage_Lookup_Table *pVddcCACTable =
-                       (ATOM_Tonga_Voltage_Lookup_Table *)(((unsigned long)powerplay_table) +
-                       le16_to_cpu(powerplay_table->usVddcLookupTableOffset));
-
-               result = get_vddc_lookup_table(hwmgr,
-                       &pp_table_information->vddc_lookup_table, pVddcCACTable, 16);
-       }
-
-       if (0 != powerplay_table->usVddgfxLookupTableOffset) {
-               const ATOM_Tonga_Voltage_Lookup_Table *pVddgfxCACTable =
-                       (ATOM_Tonga_Voltage_Lookup_Table *)(((unsigned long)powerplay_table) +
-                       le16_to_cpu(powerplay_table->usVddgfxLookupTableOffset));
-
-               result = get_vddc_lookup_table(hwmgr,
-                       &pp_table_information->vddgfx_lookup_table, pVddgfxCACTable, 16);
-       }
-
-       disable_ppm = 0;
-       if (0 == disable_ppm) {
-               atom_ppm_table = (ATOM_Tonga_PPM_Table *)
-                       (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
-
-               if (0 != powerplay_table->usPPMTableOffset) {
-                       if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
-                               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_EnablePlatformPowerManagement);
-                       }
-               }
-       }
-
-       return result;
-}
-
-static int get_valid_clk(
-               struct pp_hwmgr *hwmgr,
-               struct phm_clock_array **clk_table,
-               const phm_ppt_v1_clock_voltage_dependency_table  * clk_volt_pp_table
-               )
-{
-       uint32_t table_size, i;
-       struct phm_clock_array *table;
-
-       PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count),
-               "Invalid PowerPlay Table!", return -1);
-
-       table_size = sizeof(uint32_t) +
-               sizeof(uint32_t) * clk_volt_pp_table->count;
-
-       table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == table)
-               return -ENOMEM;
-
-       memset(table, 0x00, table_size);
-
-       table->count = (uint32_t)clk_volt_pp_table->count;
-
-       for (i = 0; i < table->count; i++)
-               table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk;
-
-       *clk_table = table;
-
-       return 0;
-}
-
-static int get_hard_limits(
-               struct pp_hwmgr *hwmgr,
-               struct phm_clock_and_voltage_limits *limits,
-               const ATOM_Tonga_Hard_Limit_Table * limitable
-               )
-{
-       PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1);
-
-       /* currently we always take entries[0] parameters */
-       limits->sclk = (uint32_t)limitable->entries[0].ulSCLKLimit;
-       limits->mclk = (uint32_t)limitable->entries[0].ulMCLKLimit;
-       limits->vddc = (uint16_t)limitable->entries[0].usVddcLimit;
-       limits->vddci = (uint16_t)limitable->entries[0].usVddciLimit;
-       limits->vddgfx = (uint16_t)limitable->entries[0].usVddgfxLimit;
-
-       return 0;
-}
-
-static int get_mclk_voltage_dependency_table(
-               struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_mclk_dep_table,
-               const ATOM_Tonga_MCLK_Dependency_Table * mclk_dep_table
-               )
-{
-       uint32_t table_size, i;
-       phm_ppt_v1_clock_voltage_dependency_table *mclk_table;
-
-       PP_ASSERT_WITH_CODE((0 != mclk_dep_table->ucNumEntries),
-               "Invalid PowerPlay Table!", return -1);
-
-       table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
-               * mclk_dep_table->ucNumEntries;
-
-       mclk_table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == mclk_table)
-               return -ENOMEM;
-
-       memset(mclk_table, 0x00, table_size);
-
-       mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
-
-       for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
-               mclk_table->entries[i].vddInd =
-                       mclk_dep_table->entries[i].ucVddcInd;
-               mclk_table->entries[i].vdd_offset =
-                       mclk_dep_table->entries[i].usVddgfxOffset;
-               mclk_table->entries[i].vddci =
-                       mclk_dep_table->entries[i].usVddci;
-               mclk_table->entries[i].mvdd =
-                       mclk_dep_table->entries[i].usMvdd;
-               mclk_table->entries[i].clk =
-                       mclk_dep_table->entries[i].ulMclk;
-       }
-
-       *pp_tonga_mclk_dep_table = mclk_table;
-
-       return 0;
-}
-
-static int get_sclk_voltage_dependency_table(
-               struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
-               const PPTable_Generic_SubTable_Header *sclk_dep_table
-               )
-{
-       uint32_t table_size, i;
-       phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
-
-       if (sclk_dep_table->ucRevId < 1) {
-               const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
-                           (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
-
-               PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
-                       "Invalid PowerPlay Table!", return -1);
-
-               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
-                       * tonga_table->ucNumEntries;
-
-               sclk_table = kzalloc(table_size, GFP_KERNEL);
-
-               if (NULL == sclk_table)
-                       return -ENOMEM;
-
-               memset(sclk_table, 0x00, table_size);
-
-               sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
-
-               for (i = 0; i < tonga_table->ucNumEntries; i++) {
-                       sclk_table->entries[i].vddInd =
-                               tonga_table->entries[i].ucVddInd;
-                       sclk_table->entries[i].vdd_offset =
-                               tonga_table->entries[i].usVddcOffset;
-                       sclk_table->entries[i].clk =
-                               tonga_table->entries[i].ulSclk;
-                       sclk_table->entries[i].cks_enable =
-                               (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
-                       sclk_table->entries[i].cks_voffset =
-                               (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
-               }
-       } else {
-               const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
-                           (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
-
-               PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
-                       "Invalid PowerPlay Table!", return -1);
-
-               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
-                       * polaris_table->ucNumEntries;
-
-               sclk_table = kzalloc(table_size, GFP_KERNEL);
-
-               if (NULL == sclk_table)
-                       return -ENOMEM;
-
-               memset(sclk_table, 0x00, table_size);
-
-               sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
-
-               for (i = 0; i < polaris_table->ucNumEntries; i++) {
-                       sclk_table->entries[i].vddInd =
-                               polaris_table->entries[i].ucVddInd;
-                       sclk_table->entries[i].vdd_offset =
-                               polaris_table->entries[i].usVddcOffset;
-                       sclk_table->entries[i].clk =
-                               polaris_table->entries[i].ulSclk;
-                       sclk_table->entries[i].cks_enable =
-                               (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
-                       sclk_table->entries[i].cks_voffset =
-                               (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
-                       sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
-               }
-       }
-       *pp_tonga_sclk_dep_table = sclk_table;
-
-       return 0;
-}
-
-static int get_pcie_table(
-               struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
-               const PPTable_Generic_SubTable_Header * pTable
-               )
-{
-       uint32_t table_size, i, pcie_count;
-       phm_ppt_v1_pcie_table *pcie_table;
-       struct phm_ppt_v1_information *pp_table_information =
-               (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       if (pTable->ucRevId < 1) {
-               const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)pTable;
-               PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
-                       "Invalid PowerPlay Table!", return -1);
-
-               table_size = sizeof(uint32_t) +
-                       sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
-
-               pcie_table = kzalloc(table_size, GFP_KERNEL);
-
-               if (pcie_table == NULL)
-                       return -ENOMEM;
-
-               memset(pcie_table, 0x00, table_size);
-
-               /*
-               * Make sure the number of pcie entries are less than or equal to sclk dpm levels.
-               * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
-               */
-               pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
-               if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
-                       pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
-               else
-                       printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
-                       Disregarding the excess entries... \n");
-
-               pcie_table->count = pcie_count;
-
-               for (i = 0; i < pcie_count; i++) {
-                       pcie_table->entries[i].gen_speed =
-                               atom_pcie_table->entries[i].ucPCIEGenSpeed;
-                       pcie_table->entries[i].lane_width =
-                               atom_pcie_table->entries[i].usPCIELaneWidth;
-               }
-
-               *pp_tonga_pcie_table = pcie_table;
-       } else {
-               /* Polaris10/Polaris11 and newer. */
-               const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)pTable;
-               PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
-                       "Invalid PowerPlay Table!", return -1);
-
-               table_size = sizeof(uint32_t) +
-                       sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
-
-               pcie_table = kzalloc(table_size, GFP_KERNEL);
-
-               if (pcie_table == NULL)
-                       return -ENOMEM;
-
-               memset(pcie_table, 0x00, table_size);
-
-               /*
-               * Make sure the number of pcie entries are less than or equal to sclk dpm levels.
-               * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
-               */
-               pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
-               if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
-                       pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
-               else
-                       printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
-                       Disregarding the excess entries... \n");
-
-               pcie_table->count = pcie_count;
-
-               for (i = 0; i < pcie_count; i++) {
-                       pcie_table->entries[i].gen_speed =
-                               atom_pcie_table->entries[i].ucPCIEGenSpeed;
-                       pcie_table->entries[i].lane_width =
-                               atom_pcie_table->entries[i].usPCIELaneWidth;
-                       pcie_table->entries[i].pcie_sclk =
-                               atom_pcie_table->entries[i].ulPCIE_Sclk;
-               }
-
-               *pp_tonga_pcie_table = pcie_table;
-       }
-
-       return 0;
-}
-
-static int get_cac_tdp_table(
-               struct pp_hwmgr *hwmgr,
-               struct phm_cac_tdp_table **cac_tdp_table,
-               const PPTable_Generic_SubTable_Header * table
-               )
-{
-       uint32_t table_size;
-       struct phm_cac_tdp_table *tdp_table;
-
-       table_size = sizeof(uint32_t) + sizeof(struct phm_cac_tdp_table);
-       tdp_table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == tdp_table)
-               return -ENOMEM;
-
-       memset(tdp_table, 0x00, table_size);
-
-       hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == hwmgr->dyn_state.cac_dtp_table) {
-               kfree(tdp_table);
-               return -ENOMEM;
-       }
-
-       memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
-
-       if (table->ucRevId < 3) {
-               const ATOM_Tonga_PowerTune_Table *tonga_table =
-                       (ATOM_Tonga_PowerTune_Table *)table;
-               tdp_table->usTDP = tonga_table->usTDP;
-               tdp_table->usConfigurableTDP =
-                       tonga_table->usConfigurableTDP;
-               tdp_table->usTDC = tonga_table->usTDC;
-               tdp_table->usBatteryPowerLimit =
-                       tonga_table->usBatteryPowerLimit;
-               tdp_table->usSmallPowerLimit =
-                       tonga_table->usSmallPowerLimit;
-               tdp_table->usLowCACLeakage =
-                       tonga_table->usLowCACLeakage;
-               tdp_table->usHighCACLeakage =
-                       tonga_table->usHighCACLeakage;
-               tdp_table->usMaximumPowerDeliveryLimit =
-                       tonga_table->usMaximumPowerDeliveryLimit;
-               tdp_table->usDefaultTargetOperatingTemp =
-                       tonga_table->usTjMax;
-               tdp_table->usTargetOperatingTemp =
-                       tonga_table->usTjMax; /*Set the initial temp to the same as default */
-               tdp_table->usPowerTuneDataSetID =
-                       tonga_table->usPowerTuneDataSetID;
-               tdp_table->usSoftwareShutdownTemp =
-                       tonga_table->usSoftwareShutdownTemp;
-               tdp_table->usClockStretchAmount =
-                       tonga_table->usClockStretchAmount;
-       } else {   /* Fiji and newer */
-               const ATOM_Fiji_PowerTune_Table *fijitable =
-                       (ATOM_Fiji_PowerTune_Table *)table;
-               tdp_table->usTDP = fijitable->usTDP;
-               tdp_table->usConfigurableTDP = fijitable->usConfigurableTDP;
-               tdp_table->usTDC = fijitable->usTDC;
-               tdp_table->usBatteryPowerLimit = fijitable->usBatteryPowerLimit;
-               tdp_table->usSmallPowerLimit = fijitable->usSmallPowerLimit;
-               tdp_table->usLowCACLeakage = fijitable->usLowCACLeakage;
-               tdp_table->usHighCACLeakage = fijitable->usHighCACLeakage;
-               tdp_table->usMaximumPowerDeliveryLimit =
-                       fijitable->usMaximumPowerDeliveryLimit;
-               tdp_table->usDefaultTargetOperatingTemp =
-                       fijitable->usTjMax;
-               tdp_table->usTargetOperatingTemp =
-                       fijitable->usTjMax; /*Set the initial temp to the same as default */
-               tdp_table->usPowerTuneDataSetID =
-                       fijitable->usPowerTuneDataSetID;
-               tdp_table->usSoftwareShutdownTemp =
-                       fijitable->usSoftwareShutdownTemp;
-               tdp_table->usClockStretchAmount =
-                       fijitable->usClockStretchAmount;
-               tdp_table->usTemperatureLimitHotspot =
-                       fijitable->usTemperatureLimitHotspot;
-               tdp_table->usTemperatureLimitLiquid1 =
-                       fijitable->usTemperatureLimitLiquid1;
-               tdp_table->usTemperatureLimitLiquid2 =
-                       fijitable->usTemperatureLimitLiquid2;
-               tdp_table->usTemperatureLimitVrVddc =
-                       fijitable->usTemperatureLimitVrVddc;
-               tdp_table->usTemperatureLimitVrMvdd =
-                       fijitable->usTemperatureLimitVrMvdd;
-               tdp_table->usTemperatureLimitPlx =
-                       fijitable->usTemperatureLimitPlx;
-               tdp_table->ucLiquid1_I2C_address =
-                       fijitable->ucLiquid1_I2C_address;
-               tdp_table->ucLiquid2_I2C_address =
-                       fijitable->ucLiquid2_I2C_address;
-               tdp_table->ucLiquid_I2C_Line =
-                       fijitable->ucLiquid_I2C_Line;
-               tdp_table->ucVr_I2C_address = fijitable->ucVr_I2C_address;
-               tdp_table->ucVr_I2C_Line = fijitable->ucVr_I2C_Line;
-               tdp_table->ucPlx_I2C_address = fijitable->ucPlx_I2C_address;
-               tdp_table->ucPlx_I2C_Line = fijitable->ucPlx_I2C_Line;
-       }
-
-       *cac_tdp_table = tdp_table;
-
-       return 0;
-}
-
-static int get_mm_clock_voltage_table(
-               struct pp_hwmgr *hwmgr,
-               phm_ppt_v1_mm_clock_voltage_dependency_table **tonga_mm_table,
-               const ATOM_Tonga_MM_Dependency_Table * mm_dependency_table
-               )
-{
-       uint32_t table_size, i;
-       const ATOM_Tonga_MM_Dependency_Record *mm_dependency_record;
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table;
-
-       PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries),
-               "Invalid PowerPlay Table!", return -1);
-       table_size = sizeof(uint32_t) +
-               sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record)
-               * mm_dependency_table->ucNumEntries;
-       mm_table = kzalloc(table_size, GFP_KERNEL);
-
-       if (NULL == mm_table)
-               return -ENOMEM;
-
-       memset(mm_table, 0x00, table_size);
-
-       mm_table->count = mm_dependency_table->ucNumEntries;
-
-       for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
-               mm_dependency_record = &mm_dependency_table->entries[i];
-               mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd;
-               mm_table->entries[i].vddgfx_offset = mm_dependency_record->usVddgfxOffset;
-               mm_table->entries[i].aclk = mm_dependency_record->ulAClk;
-               mm_table->entries[i].samclock = mm_dependency_record->ulSAMUClk;
-               mm_table->entries[i].eclk = mm_dependency_record->ulEClk;
-               mm_table->entries[i].vclk = mm_dependency_record->ulVClk;
-               mm_table->entries[i].dclk = mm_dependency_record->ulDClk;
-       }
-
-       *tonga_mm_table = mm_table;
-
-       return 0;
-}
-
-/**
- * Private Function used during initialization.
- * Initialize clock voltage dependency
- * @param hwmgr Pointer to the hardware manager.
- * @param powerplay_table Pointer to the PowerPlay Table.
- */
-static int init_clock_voltage_dependency(
-               struct pp_hwmgr *hwmgr,
-               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
-               )
-{
-       int result = 0;
-       struct phm_ppt_v1_information *pp_table_information =
-               (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       const ATOM_Tonga_MM_Dependency_Table *mm_dependency_table =
-               (const ATOM_Tonga_MM_Dependency_Table *)(((unsigned long) powerplay_table) +
-               le16_to_cpu(powerplay_table->usMMDependencyTableOffset));
-       const PPTable_Generic_SubTable_Header *pPowerTuneTable =
-               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
-               le16_to_cpu(powerplay_table->usPowerTuneTableOffset));
-       const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
-               (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
-               le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-       const PPTable_Generic_SubTable_Header *sclk_dep_table =
-               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
-               le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
-       const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
-               (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
-               le16_to_cpu(powerplay_table->usHardLimitTableOffset));
-       const PPTable_Generic_SubTable_Header *pcie_table =
-               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
-               le16_to_cpu(powerplay_table->usPCIETableOffset));
-
-       pp_table_information->vdd_dep_on_sclk = NULL;
-       pp_table_information->vdd_dep_on_mclk = NULL;
-       pp_table_information->mm_dep_table = NULL;
-       pp_table_information->pcie_table = NULL;
-
-       if (powerplay_table->usMMDependencyTableOffset != 0)
-               result = get_mm_clock_voltage_table(hwmgr,
-               &pp_table_information->mm_dep_table, mm_dependency_table);
-
-       if (result == 0 && powerplay_table->usPowerTuneTableOffset != 0)
-               result = get_cac_tdp_table(hwmgr,
-               &pp_table_information->cac_dtp_table, pPowerTuneTable);
-
-       if (result == 0 && powerplay_table->usSclkDependencyTableOffset != 0)
-               result = get_sclk_voltage_dependency_table(hwmgr,
-               &pp_table_information->vdd_dep_on_sclk, sclk_dep_table);
-
-       if (result == 0 && powerplay_table->usMclkDependencyTableOffset != 0)
-               result = get_mclk_voltage_dependency_table(hwmgr,
-               &pp_table_information->vdd_dep_on_mclk, mclk_dep_table);
-
-       if (result == 0 && powerplay_table->usPCIETableOffset != 0)
-               result = get_pcie_table(hwmgr,
-               &pp_table_information->pcie_table, pcie_table);
-
-       if (result == 0 && powerplay_table->usHardLimitTableOffset != 0)
-               result = get_hard_limits(hwmgr,
-               &pp_table_information->max_clock_voltage_on_dc, pHardLimits);
-
-       hwmgr->dyn_state.max_clock_voltage_on_dc.sclk =
-               pp_table_information->max_clock_voltage_on_dc.sclk;
-       hwmgr->dyn_state.max_clock_voltage_on_dc.mclk =
-               pp_table_information->max_clock_voltage_on_dc.mclk;
-       hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
-               pp_table_information->max_clock_voltage_on_dc.vddc;
-       hwmgr->dyn_state.max_clock_voltage_on_dc.vddci =
-               pp_table_information->max_clock_voltage_on_dc.vddci;
-
-       if (result == 0 && (NULL != pp_table_information->vdd_dep_on_mclk)
-               && (0 != pp_table_information->vdd_dep_on_mclk->count))
-               result = get_valid_clk(hwmgr, &pp_table_information->valid_mclk_values,
-               pp_table_information->vdd_dep_on_mclk);
-
-       if (result == 0 && (NULL != pp_table_information->vdd_dep_on_sclk)
-               && (0 != pp_table_information->vdd_dep_on_sclk->count))
-               result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values,
-               pp_table_information->vdd_dep_on_sclk);
-
-       return result;
-}
-
-/** Retrieves the (signed) Overdrive limits from VBIOS.
- * The max engine clock, memory clock and max temperature come from the firmware info table.
- *
- * The information is placed into the platform descriptor.
- *
- * @param hwmgr source of the VBIOS table and owner of the platform descriptor to be updated.
- * @param powerplay_table the address of the PowerPlay table.
- *
- * @return 1 as long as the firmware info table was present and of a supported version.
- */
-static int init_over_drive_limits(
-               struct pp_hwmgr *hwmgr,
-               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
-{
-       hwmgr->platform_descriptor.overdriveLimit.engineClock =
-               le16_to_cpu(powerplay_table->ulMaxODEngineClock);
-       hwmgr->platform_descriptor.overdriveLimit.memoryClock =
-               le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
-
-       hwmgr->platform_descriptor.minOverdriveVDDC = 0;
-       hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
-       hwmgr->platform_descriptor.overdriveVDDCStep = 0;
-
-       if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 \
-               && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) {
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_ACOverdriveSupport);
-       }
-
-       return 0;
-}
-
-/**
- * Private Function used during initialization.
- * Inspect the PowerPlay table for obvious signs of corruption.
- * @param hwmgr Pointer to the hardware manager.
- * @param powerplay_table Pointer to the PowerPlay Table.
- * @exception This implementation always returns 1.
- */
-static int init_thermal_controller(
-               struct pp_hwmgr *hwmgr,
-               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
-               )
-{
-       const PPTable_Generic_SubTable_Header *fan_table;
-       ATOM_Tonga_Thermal_Controller *thermal_controller;
-
-       thermal_controller = (ATOM_Tonga_Thermal_Controller *)
-               (((unsigned long)powerplay_table) +
-               le16_to_cpu(powerplay_table->usThermalControllerOffset));
-       PP_ASSERT_WITH_CODE((0 != powerplay_table->usThermalControllerOffset),
-               "Thermal controller table not set!", return -1);
-
-       hwmgr->thermal_controller.ucType = thermal_controller->ucType;
-       hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine;
-       hwmgr->thermal_controller.ucI2cAddress = thermal_controller->ucI2cAddress;
-
-       hwmgr->thermal_controller.fanInfo.bNoFan =
-               (0 != (thermal_controller->ucFanParameters & ATOM_TONGA_PP_FANPARAMETERS_NOFAN));
-
-       hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
-               thermal_controller->ucFanParameters &
-               ATOM_TONGA_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
-
-       hwmgr->thermal_controller.fanInfo.ulMinRPM
-               = thermal_controller->ucFanMinRPM * 100UL;
-       hwmgr->thermal_controller.fanInfo.ulMaxRPM
-               = thermal_controller->ucFanMaxRPM * 100UL;
-
-       set_hw_cap(
-                       hwmgr,
-                       ATOM_TONGA_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
-                       PHM_PlatformCaps_ThermalController
-                 );
-
-       if (0 == powerplay_table->usFanTableOffset)
-               return 0;
-
-       fan_table = (const PPTable_Generic_SubTable_Header *)
-               (((unsigned long)powerplay_table) +
-               le16_to_cpu(powerplay_table->usFanTableOffset));
-
-       PP_ASSERT_WITH_CODE((0 != powerplay_table->usFanTableOffset),
-               "Fan table not set!", return -1);
-       PP_ASSERT_WITH_CODE((0 < fan_table->ucRevId),
-               "Unsupported fan table format!", return -1);
-
-       hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
-               = 100000;
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-               PHM_PlatformCaps_MicrocodeFanControl);
-
-       if (fan_table->ucRevId < 8) {
-               const ATOM_Tonga_Fan_Table *tonga_fan_table =
-                       (ATOM_Tonga_Fan_Table *)fan_table;
-               hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
-                       = tonga_fan_table->ucTHyst;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMin
-                       = tonga_fan_table->usTMin;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMed
-                       = tonga_fan_table->usTMed;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTHigh
-                       = tonga_fan_table->usTHigh;
-               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin
-                       = tonga_fan_table->usPWMMin;
-               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed
-                       = tonga_fan_table->usPWMMed;
-               hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh
-                       = tonga_fan_table->usPWMHigh;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMax
-                       = 10900;                  /* hard coded */
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMax
-                       = tonga_fan_table->usTMax;
-               hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode
-                       = tonga_fan_table->ucFanControlMode;
-               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM
-                       = tonga_fan_table->usFanPWMMax;
-               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity
-                       = 4836;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity
-                       = tonga_fan_table->usFanOutputSensitivity;
-               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM
-                       = tonga_fan_table->usFanRPMMax;
-               hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit
-                       = (tonga_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places.  SMC wants MHz. */
-               hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature
-                       = tonga_fan_table->ucTargetTemperature;
-               hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit
-                       = tonga_fan_table->ucMinimumPWMLimit;
-       } else {
-               const ATOM_Fiji_Fan_Table *fiji_fan_table =
-                       (ATOM_Fiji_Fan_Table *)fan_table;
-               hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
-                       = fiji_fan_table->ucTHyst;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMin
-                       = fiji_fan_table->usTMin;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMed
-                       = fiji_fan_table->usTMed;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTHigh
-                       = fiji_fan_table->usTHigh;
-               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin
-                       = fiji_fan_table->usPWMMin;
-               hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed
-                       = fiji_fan_table->usPWMMed;
-               hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh
-                       = fiji_fan_table->usPWMHigh;
-               hwmgr->thermal_controller.advanceFanControlParameters.usTMax
-                       = fiji_fan_table->usTMax;
-               hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode
-                       = fiji_fan_table->ucFanControlMode;
-               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM
-                       = fiji_fan_table->usFanPWMMax;
-               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity
-                       = 4836;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity
-                       = fiji_fan_table->usFanOutputSensitivity;
-               hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM
-                       = fiji_fan_table->usFanRPMMax;
-               hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit
-                       = (fiji_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places.  SMC wants MHz. */
-               hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature
-                       = fiji_fan_table->ucTargetTemperature;
-               hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit
-                       = fiji_fan_table->ucMinimumPWMLimit;
-
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge
-                       = fiji_fan_table->usFanGainEdge;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot
-                       = fiji_fan_table->usFanGainHotspot;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid
-                       = fiji_fan_table->usFanGainLiquid;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc
-                       = fiji_fan_table->usFanGainVrVddc;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd
-                       = fiji_fan_table->usFanGainVrMvdd;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx
-                       = fiji_fan_table->usFanGainPlx;
-               hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm
-                       = fiji_fan_table->usFanGainHbm;
-       }
-
-       return 0;
-}
-
-/**
- * Private Function used during initialization.
- * Inspect the PowerPlay table for obvious signs of corruption.
- * @param hwmgr Pointer to the hardware manager.
- * @param powerplay_table Pointer to the PowerPlay Table.
- * @exception 2 if the powerplay table is incorrect.
- */
-static int check_powerplay_tables(
-               struct pp_hwmgr *hwmgr,
-               const ATOM_Tonga_POWERPLAYTABLE *powerplay_table
-               )
-{
-       const ATOM_Tonga_State_Array *state_arrays;
-
-       state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)powerplay_table) +
-               le16_to_cpu(powerplay_table->usStateArrayOffset));
-
-       PP_ASSERT_WITH_CODE((ATOM_Tonga_TABLE_REVISION_TONGA <=
-               powerplay_table->sHeader.ucTableFormatRevision),
-               "Unsupported PPTable format!", return -1);
-       PP_ASSERT_WITH_CODE((0 != powerplay_table->usStateArrayOffset),
-               "State table is not set!", return -1);
-       PP_ASSERT_WITH_CODE((0 < powerplay_table->sHeader.usStructureSize),
-               "Invalid PowerPlay Table!", return -1);
-       PP_ASSERT_WITH_CODE((0 < state_arrays->ucNumEntries),
-               "Invalid PowerPlay Table!", return -1);
-
-       return 0;
-}
-
-int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
-{
-       int result = 0;
-       const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
-
-       hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v1_information), GFP_KERNEL);
-
-       PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable),
-                           "Failed to allocate hwmgr->pptable!", return -ENOMEM);
-
-       memset(hwmgr->pptable, 0x00, sizeof(struct phm_ppt_v1_information));
-
-       powerplay_table = get_powerplay_table(hwmgr);
-
-       PP_ASSERT_WITH_CODE((NULL != powerplay_table),
-               "Missing PowerPlay Table!", return -1);
-
-       result = check_powerplay_tables(hwmgr, powerplay_table);
-
-       PP_ASSERT_WITH_CODE((result == 0),
-                           "check_powerplay_tables failed", return result);
-
-       result = set_platform_caps(hwmgr,
-                                  le32_to_cpu(powerplay_table->ulPlatformCaps));
-
-       PP_ASSERT_WITH_CODE((result == 0),
-                           "set_platform_caps failed", return result);
-
-       result = init_thermal_controller(hwmgr, powerplay_table);
-
-       PP_ASSERT_WITH_CODE((result == 0),
-                           "init_thermal_controller failed", return result);
-
-       result = init_over_drive_limits(hwmgr, powerplay_table);
-
-       PP_ASSERT_WITH_CODE((result == 0),
-                           "init_over_drive_limits failed", return result);
-
-       result = init_clock_voltage_dependency(hwmgr, powerplay_table);
-
-       PP_ASSERT_WITH_CODE((result == 0),
-                           "init_clock_voltage_dependency failed", return result);
-
-       result = init_dpm_2_parameters(hwmgr, powerplay_table);
-
-       PP_ASSERT_WITH_CODE((result == 0),
-                           "init_dpm_2_parameters failed", return result);
-
-       return result;
-}
-
-int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
-{
-       struct phm_ppt_v1_information *pp_table_information =
-               (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-       kfree(pp_table_information->vdd_dep_on_sclk);
-       pp_table_information->vdd_dep_on_sclk = NULL;
-
-       kfree(pp_table_information->vdd_dep_on_mclk);
-       pp_table_information->vdd_dep_on_mclk = NULL;
-
-       kfree(pp_table_information->valid_mclk_values);
-       pp_table_information->valid_mclk_values = NULL;
-
-       kfree(pp_table_information->valid_sclk_values);
-       pp_table_information->valid_sclk_values = NULL;
-
-       kfree(pp_table_information->vddc_lookup_table);
-       pp_table_information->vddc_lookup_table = NULL;
-
-       kfree(pp_table_information->vddgfx_lookup_table);
-       pp_table_information->vddgfx_lookup_table = NULL;
-
-       kfree(pp_table_information->mm_dep_table);
-       pp_table_information->mm_dep_table = NULL;
-
-       kfree(pp_table_information->cac_dtp_table);
-       pp_table_information->cac_dtp_table = NULL;
-
-       kfree(hwmgr->dyn_state.cac_dtp_table);
-       hwmgr->dyn_state.cac_dtp_table = NULL;
-
-       kfree(pp_table_information->ppm_parameter_table);
-       pp_table_information->ppm_parameter_table = NULL;
-
-       kfree(pp_table_information->pcie_table);
-       pp_table_information->pcie_table = NULL;
-
-       kfree(hwmgr->pptable);
-       hwmgr->pptable = NULL;
-
-       return 0;
-}
-
-const struct pp_table_func tonga_pptable_funcs = {
-       .pptable_init = tonga_pp_tables_initialize,
-       .pptable_fini = tonga_pp_tables_uninitialize,
-};
-
-int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
-{
-       const ATOM_Tonga_State_Array * state_arrays;
-       const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
-
-       PP_ASSERT_WITH_CODE((NULL != pp_table),
-                       "Missing PowerPlay Table!", return -1);
-       PP_ASSERT_WITH_CODE((pp_table->sHeader.ucTableFormatRevision >=
-                       ATOM_Tonga_TABLE_REVISION_TONGA),
-                       "Incorrect PowerPlay table revision!", return -1);
-
-       state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)pp_table) +
-                       le16_to_cpu(pp_table->usStateArrayOffset));
-
-       return (uint32_t)(state_arrays->ucNumEntries);
-}
-
-/**
-* Private function to convert flags stored in the BIOS to software flags in PowerPlay.
-*/
-static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
-               uint16_t classification, uint16_t classification2)
-{
-       uint32_t result = 0;
-
-       if (classification & ATOM_PPLIB_CLASSIFICATION_BOOT)
-               result |= PP_StateClassificationFlag_Boot;
-
-       if (classification & ATOM_PPLIB_CLASSIFICATION_THERMAL)
-               result |= PP_StateClassificationFlag_Thermal;
-
-       if (classification & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
-               result |= PP_StateClassificationFlag_LimitedPowerSource;
-
-       if (classification & ATOM_PPLIB_CLASSIFICATION_REST)
-               result |= PP_StateClassificationFlag_Rest;
-
-       if (classification & ATOM_PPLIB_CLASSIFICATION_FORCED)
-               result |= PP_StateClassificationFlag_Forced;
-
-       if (classification & ATOM_PPLIB_CLASSIFICATION_ACPI)
-               result |= PP_StateClassificationFlag_ACPI;
-
-       if (classification2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
-               result |= PP_StateClassificationFlag_LimitedPowerSource_2;
-
-       return result;
-}
-
-/**
-* Create a Power State out of an entry in the PowerPlay table.
-* This function is called by the hardware back-end.
-* @param hwmgr Pointer to the hardware manager.
-* @param entry_index The index of the entry to be extracted from the table.
-* @param power_state The address of the PowerState instance being created.
-* @return -1 if the entry cannot be retrieved.
-*/
-int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
-               uint32_t entry_index, struct pp_power_state *power_state,
-               int (*call_back_func)(struct pp_hwmgr *, void *,
-                               struct pp_power_state *, void *, uint32_t))
-{
-       int result = 0;
-       const ATOM_Tonga_State_Array * state_arrays;
-       const ATOM_Tonga_State *state_entry;
-       const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
-
-       PP_ASSERT_WITH_CODE((NULL != pp_table), "Missing PowerPlay Table!", return -1;);
-       power_state->classification.bios_index = entry_index;
-
-       if (pp_table->sHeader.ucTableFormatRevision >=
-                       ATOM_Tonga_TABLE_REVISION_TONGA) {
-               state_arrays = (ATOM_Tonga_State_Array *)(((unsigned long)pp_table) +
-                               le16_to_cpu(pp_table->usStateArrayOffset));
-
-               PP_ASSERT_WITH_CODE((0 < pp_table->usStateArrayOffset),
-                               "Invalid PowerPlay Table State Array Offset.", return -1);
-               PP_ASSERT_WITH_CODE((0 < state_arrays->ucNumEntries),
-                               "Invalid PowerPlay Table State Array.", return -1);
-               PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries),
-                               "Invalid PowerPlay Table State Array Entry.", return -1);
-
-               state_entry = &(state_arrays->states[entry_index]);
-
-               result = call_back_func(hwmgr, (void *)state_entry, power_state,
-                               (void *)pp_table,
-                               make_classification_flags(hwmgr,
-                                       le16_to_cpu(state_entry->usClassification),
-                                       le16_to_cpu(state_entry->usClassification2)));
-       }
-
-       if (!result && (power_state->classification.flags &
-                       PP_StateClassificationFlag_Boot))
-               result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware));
-
-       return result;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
deleted file mode 100644 (file)
index d24b888..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
-
-#include "hwmgr.h"
-
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
-               struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
-                               struct pp_power_state *, void *, uint32_t));
-
-#endif
-
index 3f8172f..18f39e8 100644 (file)
@@ -131,9 +131,6 @@ struct amd_pp_init {
        struct cgs_device *device;
        uint32_t chip_family;
        uint32_t chip_id;
-       uint32_t rev_id;
-       uint16_t sub_sys_id;
-       uint16_t sub_vendor_id;
 };
 
 enum amd_pp_display_config_type{
index 962cb53..d449583 100644 (file)
@@ -341,7 +341,6 @@ extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
 extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
 extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
 extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
-extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
 extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
 extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
 extern int phm_set_power_state(struct pp_hwmgr *hwmgr,
index 36b4ec9..e987483 100644 (file)
 #include "hwmgr_ppt.h"
 #include "ppatomctrl.h"
 #include "hwmgr_ppt.h"
+#include "power_state.h"
 
 struct pp_instance;
 struct pp_hwmgr;
-struct pp_hw_power_state;
-struct pp_power_state;
-struct PP_VCEState;
 struct phm_fan_speed_info;
 struct pp_atomctrl_voltage_table;
 
-
 extern int amdgpu_powercontainment;
 extern int amdgpu_sclk_deep_sleep_en;
+extern unsigned amdgpu_pp_feature_mask;
+
+#define VOLTAGE_SCALE 4
+
+uint8_t convert_to_vid(uint16_t vddc);
 
 enum DISPLAY_GAP {
        DISPLAY_GAP_VBLANK_OR_WM = 0,   /* Wait for vblank or MCHG watermark. */
@@ -52,7 +54,6 @@ enum DISPLAY_GAP {
 };
 typedef enum DISPLAY_GAP DISPLAY_GAP;
 
-
 struct vi_dpm_level {
        bool enabled;
        uint32_t value;
@@ -74,6 +75,19 @@ enum PP_Result {
 #define PCIE_PERF_REQ_GEN2         3
 #define PCIE_PERF_REQ_GEN3         4
 
+enum PP_FEATURE_MASK {
+       PP_SCLK_DPM_MASK = 0x1,
+       PP_MCLK_DPM_MASK = 0x2,
+       PP_PCIE_DPM_MASK = 0x4,
+       PP_SCLK_DEEP_SLEEP_MASK = 0x8,
+       PP_POWER_CONTAINMENT_MASK = 0x10,
+       PP_UVD_HANDSHAKE_MASK = 0x20,
+       PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
+       PP_VBI_TIME_SUPPORT_MASK = 0x80,
+       PP_ULV_MASK = 0x100,
+       PP_ENABLE_GFX_CG_THRU_SMU = 0x200
+};
+
 enum PHM_BackEnd_Magic {
        PHM_Dummy_Magic       = 0xAA5555AA,
        PHM_RV770_Magic       = 0xDCBAABCD,
@@ -354,7 +368,7 @@ struct pp_table_func {
        int (*pptable_get_vce_state_table_entry)(
                                                struct pp_hwmgr *hwmgr,
                                                unsigned long i,
-                                               struct PP_VCEState *vce_state,
+                                               struct pp_vce_state *vce_state,
                                                void **clock_info,
                                                unsigned long *flag);
 };
@@ -573,22 +587,43 @@ struct phm_microcode_version_info {
        uint32_t NB;
 };
 
+#define PP_MAX_VCE_LEVELS 6
+
+enum PP_VCE_LEVEL {
+       PP_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
+       PP_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
+       PP_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
+       PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+       PP_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
+       PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
+
+enum PP_TABLE_VERSION {
+       PP_TABLE_V0 = 0,
+       PP_TABLE_V1,
+       PP_TABLE_V2,
+       PP_TABLE_MAX
+};
+
 /**
  * The main hardware manager structure.
  */
 struct pp_hwmgr {
        uint32_t chip_family;
        uint32_t chip_id;
-       uint32_t hw_revision;
-       uint32_t sub_sys_id;
-       uint32_t sub_vendor_id;
 
+       uint32_t pp_table_version;
        void *device;
        struct pp_smumgr *smumgr;
        const void *soft_pp_table;
        uint32_t soft_pp_table_size;
        void *hardcode_pp_table;
        bool need_pp_table_upload;
+
+       struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+       uint32_t num_vce_state_tables;
+
        enum amd_dpm_forced_level dpm_level;
        bool block_hw_access;
        struct phm_gfx_arbiter gfx_arbiter;
@@ -626,6 +661,7 @@ struct pp_hwmgr {
        struct pp_power_state    *boot_ps;
        struct pp_power_state    *uvd_ps;
        struct amd_pp_display_configuration display_config;
+       uint32_t feature_mask;
 };
 
 
@@ -661,6 +697,8 @@ extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, st
 extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
 extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
 extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
+extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
+               uint32_t voltage);
 extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
 extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
 extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
@@ -671,6 +709,9 @@ extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
 extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
 extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
 
+extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+                               uint32_t sclk, uint16_t id, uint16_t *voltage);
+
 #define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
 
 #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
@@ -685,8 +726,6 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
         PHM_FIELD_SHIFT(reg, field))
 
 
-
-
 /* Operations on named fields. */
 
 #define PHM_READ_FIELD(device, reg, field)     \
index a3f0ce4..9ceaed9 100644 (file)
@@ -158,7 +158,7 @@ struct pp_power_state {
 
 
 /*Structure to hold a VCE state entry*/
-struct PP_VCEState {
+struct pp_vce_state {
        uint32_t evclk;
        uint32_t ecclk;
        uint32_t sclk;
@@ -171,30 +171,28 @@ enum PP_MMProfilingState {
        PP_MMProfilingState_Stopped
 };
 
-struct PP_Clock_Engine_Request {
-       unsigned long clientType;
-       unsigned long ctxid;
+struct pp_clock_engine_request {
+       unsigned long client_type;
+       unsigned long ctx_id;
        uint64_t  context_handle;
        unsigned long sclk;
-       unsigned long sclkHardMin;
+       unsigned long sclk_hard_min;
        unsigned long mclk;
        unsigned long iclk;
        unsigned long evclk;
        unsigned long ecclk;
-       unsigned long ecclkHardMin;
+       unsigned long ecclk_hard_min;
        unsigned long vclk;
        unsigned long dclk;
-       unsigned long samclk;
-       unsigned long acpclk;
-       unsigned long sclkOverdrive;
-       unsigned long mclkOverdrive;
+       unsigned long sclk_over_drive;
+       unsigned long mclk_over_drive;
        unsigned long sclk_threshold;
        unsigned long flag;
        unsigned long vclk_ceiling;
        unsigned long dclk_ceiling;
        unsigned long num_cus;
-       unsigned long pmflag;
-       enum PP_MMProfilingState MMProfilingState;
+       unsigned long pm_flag;
+       enum PP_MMProfilingState mm_profiling_state;
 };
 
 #endif
index d7d83b7..bfdbec1 100644 (file)
@@ -43,5 +43,8 @@
        } while (0)
 
 
+#define GET_FLEXIBLE_ARRAY_MEMBER_ADDR(type, member, ptr, n)   \
+       (type *)((char *)&(ptr)->member + (sizeof(type) * (n)))
+
 #endif /* PP_DEBUG_H */
 
index 3c235f0..34abfd2 100644 (file)
@@ -74,7 +74,6 @@ struct pp_smumgr_func {
 struct pp_smumgr {
        uint32_t chip_family;
        uint32_t chip_id;
-       uint32_t hw_revision;
        void *device;
        void *backend;
        uint32_t usec_timeout;
@@ -122,6 +121,12 @@ extern int smu_allocate_memory(void *device, uint32_t size,
 
 extern int smu_free_memory(void *device, void *handle);
 
+extern int cz_smum_init(struct pp_smumgr *smumgr);
+extern int iceland_smum_init(struct pp_smumgr *smumgr);
+extern int tonga_smum_init(struct pp_smumgr *smumgr);
+extern int fiji_smum_init(struct pp_smumgr *smumgr);
+extern int polaris10_smum_init(struct pp_smumgr *smumgr);
+
 #define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
 
 #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
index 87c023e..5a44485 100644 (file)
@@ -89,13 +89,8 @@ static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
        if (result != 0)
                return result;
 
-       result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+       return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
                                        SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
-
-       if (result != 0)
-               return result;
-
-       return 0;
 }
 
 static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
@@ -106,12 +101,12 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
 
        if (0 != (3 & smc_address)) {
                printk(KERN_ERR "[ powerplay ] SMC address must be 4 byte aligned\n");
-               return -1;
+               return -EINVAL;
        }
 
        if (limit <= (smc_address + 3)) {
                printk(KERN_ERR "[ powerplay ] SMC address beyond the SMC RAM area\n");
-               return -1;
+               return -EINVAL;
        }
 
        cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
@@ -129,9 +124,10 @@ static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
                return -EINVAL;
 
        result = cz_set_smc_sram_address(smumgr, smc_address, limit);
-       cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
+       if (!result)
+               cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
 
-       return 0;
+       return result;
 }
 
 static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
@@ -148,7 +144,6 @@ static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
 static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
 {
        struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
-       int result = 0;
        uint32_t smc_address;
 
        if (!smumgr->reload_fw) {
@@ -177,11 +172,9 @@ static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
        cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
                                cz_smu->toc_entry_power_profiling_index);
 
-       result = cz_send_msg_to_smc_with_parameter(smumgr,
+       return cz_send_msg_to_smc_with_parameter(smumgr,
                                        PPSMC_MSG_ExecuteJob,
                                        cz_smu->toc_entry_initialize_index);
-
-       return result;
 }
 
 static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
@@ -195,9 +188,6 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
        if (smumgr == NULL || smumgr->device == NULL)
                return -EINVAL;
 
-       return cgs_read_register(smumgr->device,
-                                       mmSMU_MP1_SRBM2P_ARG_0);
-
        cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
 
        for (i = 0; i < smumgr->usec_timeout; i++) {
@@ -275,7 +265,10 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
        if (smumgr->chip_id == CHIP_STONEY)
                fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
 
-       cz_request_smu_load_fw(smumgr);
+       ret = cz_request_smu_load_fw(smumgr);
+       if (ret)
+               printk(KERN_ERR "[ powerplay] SMU firmware load failed\n");
+
        cz_check_fw_load_finish(smumgr, fw_to_check);
 
        ret = cz_load_mec_firmware(smumgr);
@@ -566,10 +559,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
 
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
-       if (smumgr->chip_id == CHIP_STONEY)
-               cz_smu_populate_single_ucode_load_task(smumgr,
-                               CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
-       else
+       if (smumgr->chip_id != CHIP_STONEY)
                cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
@@ -580,10 +570,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-       if (smumgr->chip_id == CHIP_STONEY)
-               cz_smu_populate_single_ucode_load_task(smumgr,
-                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-       else
+       if (smumgr->chip_id != CHIP_STONEY)
                cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
@@ -610,19 +597,12 @@ static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
        struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
 
        cz_smu->toc_entry_used_count = 0;
-
        cz_smu_initialize_toc_empty_job_list(smumgr);
-
        cz_smu_construct_toc_for_rlc_aram_save(smumgr);
-
        cz_smu_construct_toc_for_vddgfx_enter(smumgr);
-
        cz_smu_construct_toc_for_vddgfx_exit(smumgr);
-
        cz_smu_construct_toc_for_power_profiling(smumgr);
-
        cz_smu_construct_toc_for_bootup(smumgr);
-
        cz_smu_construct_toc_for_clock_table(smumgr);
 
        return 0;
index 704ff4c..8047ad2 100644 (file)
@@ -40,7 +40,6 @@
 #include "cgs_common.h"
 
 #define POLARIS10_SMC_SIZE 0x20000
-#define VOLTAGE_SCALE 4
 
 /* Microcode file is stored in this buffer */
 #define BUFFER_SIZE                 80000
index e5377ae..7c2445f 100644 (file)
 
 #include <polaris10_ppsmc.h>
 #include <pp_endian.h>
+#include "smu74.h"
 
 struct polaris10_avfs {
        enum AVFS_BTC_STATUS avfs_btc_status;
        uint32_t           avfs_btc_param;
 };
 
+struct polaris10_pt_defaults {
+       uint8_t   SviLoadLineEn;
+       uint8_t   SviLoadLineVddC;
+       uint8_t   TDC_VDDC_ThrottleReleaseLimitPerc;
+       uint8_t   TDC_MAWt;
+       uint8_t   TdcWaterfallCtl;
+       uint8_t   DTEAmbientTempBase;
+
+       uint32_t  DisplayCac;
+       uint32_t  BAPM_TEMP_GRADIENT;
+       uint16_t  BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+       uint16_t  BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+};
+
 struct polaris10_buffer_entry {
        uint32_t data_size;
        uint32_t mc_addr_low;
@@ -40,6 +55,11 @@ struct polaris10_buffer_entry {
        unsigned long  handle;
 };
 
+struct polaris10_range_table {
+       uint32_t trans_lower_frequency; /* in 10khz */
+       uint32_t trans_upper_frequency;
+};
+
 struct polaris10_smumgr {
        uint8_t *header;
        uint8_t *mec_image;
index cf3cabe..bbeb786 100644 (file)
 #include "smumgr.h"
 #include "cgs_common.h"
 #include "linux/delay.h"
-#include "cz_smumgr.h"
-#include "tonga_smumgr.h"
-#include "iceland_smumgr.h"
-#include "fiji_smumgr.h"
-#include "polaris10_smumgr.h"
+
 
 int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
 {
@@ -48,7 +44,6 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
        smumgr->device = pp_init->device;
        smumgr->chip_family = pp_init->chip_family;
        smumgr->chip_id = pp_init->chip_id;
-       smumgr->hw_revision = pp_init->rev_id;
        smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
        smumgr->reload_fw = 1;
        handle->smu_mgr = smumgr;
index 6d4ff34..28e6471 100644 (file)
@@ -198,8 +198,8 @@ static int arcpgu_probe(struct platform_device *pdev)
        int ret;
 
        drm = drm_dev_alloc(&arcpgu_drm_driver, &pdev->dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
 
        ret = arcpgu_load(drm);
        if (ret)
index d83b46a..fb6a418 100644 (file)
@@ -326,8 +326,8 @@ static int hdlcd_drm_bind(struct device *dev)
                return -ENOMEM;
 
        drm = drm_dev_alloc(&hdlcd_driver, dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
 
        drm->dev_private = hdlcd;
        dev_set_drvdata(dev, drm);
index c383d72..9280358 100644 (file)
@@ -311,8 +311,8 @@ static int malidp_bind(struct device *dev)
                return ret;
 
        drm = drm_dev_alloc(&malidp_driver, dev);
-       if (!drm) {
-               ret = -ENOMEM;
+       if (IS_ERR(drm)) {
+               ret = PTR_ERR(drm);
                goto alloc_fail;
        }
 
index b29a412..608df4c 100644 (file)
@@ -150,7 +150,8 @@ static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct ast_bo *astbo = ast_bo(bo);
 
-       return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
+       return drm_vma_node_verify_access(&astbo->gem.vma_node,
+                                         filp->private_data);
 }
 
 static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
index 8e7483d..5f48431 100644 (file)
@@ -797,8 +797,8 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
        int ret;
 
        ddev = drm_dev_alloc(&atmel_hlcdc_dc_driver, &pdev->dev);
-       if (!ddev)
-               return -ENOMEM;
+       if (IS_ERR(ddev))
+               return PTR_ERR(ddev);
 
        ret = atmel_hlcdc_dc_load(ddev);
        if (ret)
index 207a2cb..0b4e5d1 100644 (file)
@@ -178,7 +178,7 @@ static void bochs_encoder_init(struct drm_device *dev)
 }
 
 
-int bochs_connector_get_modes(struct drm_connector *connector)
+static int bochs_connector_get_modes(struct drm_connector *connector)
 {
        int count;
 
index 5c5638a..269cfca 100644 (file)
@@ -128,7 +128,8 @@ static int bochs_bo_verify_access(struct ttm_buffer_object *bo,
 {
        struct bochs_bo *bochsbo = bochs_bo(bo);
 
-       return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp);
+       return drm_vma_node_verify_access(&bochsbo->gem.vma_node,
+                                         filp->private_data);
 }
 
 static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
index ec8fb2e..8ed3906 100644 (file)
@@ -922,15 +922,13 @@ static int adv7511_parse_dt(struct device_node *np,
        return 0;
 }
 
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
 static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 {
        struct adv7511_link_config link_config;
        struct adv7511 *adv7511;
        struct device *dev = &i2c->dev;
+       unsigned int main_i2c_addr = i2c->addr << 1;
+       unsigned int edid_i2c_addr = main_i2c_addr + 4;
        unsigned int val;
        int ret;
 
@@ -991,8 +989,10 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 
        regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
        regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
-                    packet_i2c_addr);
-       regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
+                    main_i2c_addr - 0xa);
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
+                    main_i2c_addr - 2);
+
        adv7511_packet_disable(adv7511, 0xffff);
 
        adv7511->i2c_main = i2c;
index 5eebd15..d7f7b7c 100644 (file)
@@ -149,13 +149,12 @@ void adv7533_uninit_cec(struct adv7511 *adv)
        i2c_unregister_device(adv->i2c_cec);
 }
 
-static const int cec_i2c_addr = 0x78;
-
 int adv7533_init_cec(struct adv7511 *adv)
 {
        int ret;
 
-       adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter, cec_i2c_addr >> 1);
+       adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
+                                    adv->i2c_main->addr - 1);
        if (!adv->i2c_cec)
                return -ENOMEM;
 
index efac8ab..0f2e423 100644 (file)
@@ -31,6 +31,7 @@
 #include <drm/bridge/analogix_dp.h>
 
 #include "analogix_dp_core.h"
+#include "analogix_dp_reg.h"
 
 #define to_dp(nm)      container_of(nm, struct analogix_dp_device, nm)
 
@@ -147,7 +148,7 @@ static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
 {
        unsigned char psr_version;
 
-       analogix_dp_read_byte_from_dpcd(dp, DP_PSR_SUPPORT, &psr_version);
+       drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
        dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version);
 
        return (psr_version & DP_PSR_IS_SUPPORTED) ? true : false;
@@ -158,166 +159,37 @@ static void analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
        unsigned char psr_en;
 
        /* Disable psr function */
-       analogix_dp_read_byte_from_dpcd(dp, DP_PSR_EN_CFG, &psr_en);
+       drm_dp_dpcd_readb(&dp->aux, DP_PSR_EN_CFG, &psr_en);
        psr_en &= ~DP_PSR_ENABLE;
-       analogix_dp_write_byte_to_dpcd(dp, DP_PSR_EN_CFG, psr_en);
+       drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
 
        /* Main-Link transmitter remains active during PSR active states */
        psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION;
-       analogix_dp_write_byte_to_dpcd(dp, DP_PSR_EN_CFG, psr_en);
+       drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
 
        /* Enable psr function */
        psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE |
                 DP_PSR_CRC_VERIFICATION;
-       analogix_dp_write_byte_to_dpcd(dp, DP_PSR_EN_CFG, psr_en);
+       drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
 
        analogix_dp_enable_psr_crc(dp);
 }
 
-static unsigned char analogix_dp_calc_edid_check_sum(unsigned char *edid_data)
-{
-       int i;
-       unsigned char sum = 0;
-
-       for (i = 0; i < EDID_BLOCK_LENGTH; i++)
-               sum = sum + edid_data[i];
-
-       return sum;
-}
-
-static int analogix_dp_read_edid(struct analogix_dp_device *dp)
-{
-       unsigned char *edid = dp->edid;
-       unsigned int extend_block = 0;
-       unsigned char sum;
-       unsigned char test_vector;
-       int retval;
-
-       /*
-        * EDID device address is 0x50.
-        * However, if necessary, you must have set upper address
-        * into E-EDID in I2C device, 0x30.
-        */
-
-       /* Read Extension Flag, Number of 128-byte EDID extension blocks */
-       retval = analogix_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
-                                               EDID_EXTENSION_FLAG,
-                                               &extend_block);
-       if (retval)
-               return retval;
-
-       if (extend_block > 0) {
-               dev_dbg(dp->dev, "EDID data includes a single extension!\n");
-
-               /* Read EDID data */
-               retval = analogix_dp_read_bytes_from_i2c(dp,
-                                               I2C_EDID_DEVICE_ADDR,
-                                               EDID_HEADER_PATTERN,
-                                               EDID_BLOCK_LENGTH,
-                                               &edid[EDID_HEADER_PATTERN]);
-               if (retval != 0) {
-                       dev_err(dp->dev, "EDID Read failed!\n");
-                       return -EIO;
-               }
-               sum = analogix_dp_calc_edid_check_sum(edid);
-               if (sum != 0) {
-                       dev_err(dp->dev, "EDID bad checksum!\n");
-                       return -EIO;
-               }
-
-               /* Read additional EDID data */
-               retval = analogix_dp_read_bytes_from_i2c(dp,
-                               I2C_EDID_DEVICE_ADDR,
-                               EDID_BLOCK_LENGTH,
-                               EDID_BLOCK_LENGTH,
-                               &edid[EDID_BLOCK_LENGTH]);
-               if (retval != 0) {
-                       dev_err(dp->dev, "EDID Read failed!\n");
-                       return -EIO;
-               }
-               sum = analogix_dp_calc_edid_check_sum(&edid[EDID_BLOCK_LENGTH]);
-               if (sum != 0) {
-                       dev_err(dp->dev, "EDID bad checksum!\n");
-                       return -EIO;
-               }
-
-               analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
-                                               &test_vector);
-               if (test_vector & DP_TEST_LINK_EDID_READ) {
-                       analogix_dp_write_byte_to_dpcd(dp,
-                               DP_TEST_EDID_CHECKSUM,
-                               edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]);
-                       analogix_dp_write_byte_to_dpcd(dp,
-                               DP_TEST_RESPONSE,
-                               DP_TEST_EDID_CHECKSUM_WRITE);
-               }
-       } else {
-               dev_info(dp->dev, "EDID data does not include any extensions.\n");
-
-               /* Read EDID data */
-               retval = analogix_dp_read_bytes_from_i2c(dp,
-                               I2C_EDID_DEVICE_ADDR, EDID_HEADER_PATTERN,
-                               EDID_BLOCK_LENGTH, &edid[EDID_HEADER_PATTERN]);
-               if (retval != 0) {
-                       dev_err(dp->dev, "EDID Read failed!\n");
-                       return -EIO;
-               }
-               sum = analogix_dp_calc_edid_check_sum(edid);
-               if (sum != 0) {
-                       dev_err(dp->dev, "EDID bad checksum!\n");
-                       return -EIO;
-               }
-
-               analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
-                                               &test_vector);
-               if (test_vector & DP_TEST_LINK_EDID_READ) {
-                       analogix_dp_write_byte_to_dpcd(dp,
-                               DP_TEST_EDID_CHECKSUM, edid[EDID_CHECKSUM]);
-                       analogix_dp_write_byte_to_dpcd(dp,
-                               DP_TEST_RESPONSE, DP_TEST_EDID_CHECKSUM_WRITE);
-               }
-       }
-
-       dev_dbg(dp->dev, "EDID Read success!\n");
-       return 0;
-}
-
-static int analogix_dp_handle_edid(struct analogix_dp_device *dp)
-{
-       u8 buf[12];
-       int i;
-       int retval;
-
-       /* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */
-       retval = analogix_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV, 12, buf);
-       if (retval)
-               return retval;
-
-       /* Read EDID */
-       for (i = 0; i < 3; i++) {
-               retval = analogix_dp_read_edid(dp);
-               if (!retval)
-                       break;
-       }
-
-       return retval;
-}
-
 static void
 analogix_dp_enable_rx_to_enhanced_mode(struct analogix_dp_device *dp,
                                       bool enable)
 {
        u8 data;
 
-       analogix_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data);
+       drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &data);
 
        if (enable)
-               analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
-                                              DP_LANE_COUNT_ENHANCED_FRAME_EN |
-                                              DPCD_LANE_COUNT_SET(data));
+               drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
+                                  DP_LANE_COUNT_ENHANCED_FRAME_EN |
+                                       DPCD_LANE_COUNT_SET(data));
        else
-               analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
-                                              DPCD_LANE_COUNT_SET(data));
+               drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
+                                  DPCD_LANE_COUNT_SET(data));
 }
 
 static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp)
@@ -325,7 +197,7 @@ static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp)
        u8 data;
        int retval;
 
-       analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+       drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
        retval = DPCD_ENHANCED_FRAME_CAP(data);
 
        return retval;
@@ -344,8 +216,8 @@ static void analogix_dp_training_pattern_dis(struct analogix_dp_device *dp)
 {
        analogix_dp_set_training_pattern(dp, DP_NONE);
 
-       analogix_dp_write_byte_to_dpcd(dp, DP_TRAINING_PATTERN_SET,
-                                      DP_TRAINING_PATTERN_DISABLE);
+       drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                          DP_TRAINING_PATTERN_DISABLE);
 }
 
 static void
@@ -390,8 +262,8 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
        /* Setup RX configuration */
        buf[0] = dp->link_train.link_rate;
        buf[1] = dp->link_train.lane_count;
-       retval = analogix_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET, 2, buf);
-       if (retval)
+       retval = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, 2);
+       if (retval < 0)
                return retval;
 
        /* Set TX pre-emphasis to minimum */
@@ -415,20 +287,22 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
        analogix_dp_set_training_pattern(dp, TRAINING_PTN1);
 
        /* Set RX training pattern */
-       retval = analogix_dp_write_byte_to_dpcd(dp,
-                       DP_TRAINING_PATTERN_SET,
-                       DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1);
-       if (retval)
+       retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                                   DP_LINK_SCRAMBLING_DISABLE |
+                                       DP_TRAINING_PATTERN_1);
+       if (retval < 0)
                return retval;
 
        for (lane = 0; lane < lane_count; lane++)
                buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 |
                            DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
 
-       retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
-                                                lane_count, buf);
+       retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, buf,
+                                  lane_count);
+       if (retval < 0)
+               return retval;
 
-       return retval;
+       return 0;
 }
 
 static unsigned char analogix_dp_get_lane_status(u8 link_status[2], int lane)
@@ -580,25 +454,23 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
 
        lane_count = dp->link_train.lane_count;
 
-       retval =  analogix_dp_read_bytes_from_dpcd(dp,
-                       DP_LANE0_1_STATUS, 2, link_status);
-       if (retval)
+       retval = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status, 2);
+       if (retval < 0)
                return retval;
 
-       retval =  analogix_dp_read_bytes_from_dpcd(dp,
-                       DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
-       if (retval)
+       retval = drm_dp_dpcd_read(&dp->aux, DP_ADJUST_REQUEST_LANE0_1,
+                                 adjust_request, 2);
+       if (retval < 0)
                return retval;
 
        if (analogix_dp_clock_recovery_ok(link_status, lane_count) == 0) {
                /* set training pattern 2 for EQ */
                analogix_dp_set_training_pattern(dp, TRAINING_PTN2);
 
-               retval = analogix_dp_write_byte_to_dpcd(dp,
-                               DP_TRAINING_PATTERN_SET,
-                               DP_LINK_SCRAMBLING_DISABLE |
-                               DP_TRAINING_PATTERN_2);
-               if (retval)
+               retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                                           DP_LINK_SCRAMBLING_DISABLE |
+                                               DP_TRAINING_PATTERN_2);
+               if (retval < 0)
                        return retval;
 
                dev_info(dp->dev, "Link Training Clock Recovery success\n");
@@ -636,13 +508,12 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
                analogix_dp_set_lane_link_training(dp,
                        dp->link_train.training_lane[lane], lane);
 
-       retval = analogix_dp_write_bytes_to_dpcd(dp,
-                       DP_TRAINING_LANE0_SET, lane_count,
-                       dp->link_train.training_lane);
-       if (retval)
+       retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+                                  dp->link_train.training_lane, lane_count);
+       if (retval < 0)
                return retval;
 
-       return retval;
+       return 0;
 }
 
 static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
@@ -655,9 +526,8 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
 
        lane_count = dp->link_train.lane_count;
 
-       retval = analogix_dp_read_bytes_from_dpcd(dp,
-                       DP_LANE0_1_STATUS, 2, link_status);
-       if (retval)
+       retval = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status, 2);
+       if (retval < 0)
                return retval;
 
        if (analogix_dp_clock_recovery_ok(link_status, lane_count)) {
@@ -665,14 +535,14 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
                return -EIO;
        }
 
-       retval = analogix_dp_read_bytes_from_dpcd(dp,
-                       DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
-       if (retval)
+       retval = drm_dp_dpcd_read(&dp->aux, DP_ADJUST_REQUEST_LANE0_1,
+                                 adjust_request, 2);
+       if (retval < 0)
                return retval;
 
-       retval = analogix_dp_read_byte_from_dpcd(dp,
-                       DP_LANE_ALIGN_STATUS_UPDATED, &link_align);
-       if (retval)
+       retval = drm_dp_dpcd_readb(&dp->aux, DP_LANE_ALIGN_STATUS_UPDATED,
+                                  &link_align);
+       if (retval < 0)
                return retval;
 
        analogix_dp_get_adjust_training_lane(dp, adjust_request);
@@ -713,10 +583,12 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
                analogix_dp_set_lane_link_training(dp,
                        dp->link_train.training_lane[lane], lane);
 
-       retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
-                       lane_count, dp->link_train.training_lane);
+       retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+                                  dp->link_train.training_lane, lane_count);
+       if (retval < 0)
+               return retval;
 
-       return retval;
+       return 0;
 }
 
 static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp,
@@ -730,7 +602,7 @@ static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp,
         * For DP rev.1.2, Maximum link rate of Main Link lanes
         * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps, 0x14 = 5.4Gbps
         */
-       analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data);
+       drm_dp_dpcd_readb(&dp->aux, DP_MAX_LINK_RATE, &data);
        *bandwidth = data;
 }
 
@@ -743,7 +615,7 @@ static void analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp,
         * For DP rev.1.1, Maximum number of Main Link lanes
         * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
         */
-       analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+       drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
        *lane_count = DPCD_MAX_LANE_COUNT(data);
 }
 
@@ -912,19 +784,15 @@ static void analogix_dp_enable_scramble(struct analogix_dp_device *dp,
        if (enable) {
                analogix_dp_enable_scrambling(dp);
 
-               analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET,
-                                               &data);
-               analogix_dp_write_byte_to_dpcd(dp,
-                       DP_TRAINING_PATTERN_SET,
-                       (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
+               drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data);
+               drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                                  (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
        } else {
                analogix_dp_disable_scrambling(dp);
 
-               analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET,
-                                               &data);
-               analogix_dp_write_byte_to_dpcd(dp,
-                       DP_TRAINING_PATTERN_SET,
-                       (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
+               drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data);
+               drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+                                  (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
        }
 }
 
@@ -1050,33 +918,37 @@ out:
        return ret;
 }
 
-int analogix_dp_get_modes(struct drm_connector *connector)
+static int analogix_dp_get_modes(struct drm_connector *connector)
 {
        struct analogix_dp_device *dp = to_dp(connector);
-       struct edid *edid = (struct edid *)dp->edid;
+       struct edid *edid;
        int ret, num_modes = 0;
 
-       ret = analogix_dp_prepare_panel(dp, true, false);
-       if (ret) {
-               DRM_ERROR("Failed to prepare panel (%d)\n", ret);
-               return 0;
-       }
+       if (dp->plat_data->panel) {
+               num_modes += drm_panel_get_modes(dp->plat_data->panel);
+       } else {
+               ret = analogix_dp_prepare_panel(dp, true, false);
+               if (ret) {
+                       DRM_ERROR("Failed to prepare panel (%d)\n", ret);
+                       return 0;
+               }
 
-       if (analogix_dp_handle_edid(dp) == 0) {
-               drm_mode_connector_update_edid_property(&dp->connector, edid);
-               num_modes += drm_add_edid_modes(&dp->connector, edid);
-       }
+               edid = drm_get_edid(connector, &dp->aux.ddc);
+               if (edid) {
+                       drm_mode_connector_update_edid_property(&dp->connector,
+                                                               edid);
+                       num_modes += drm_add_edid_modes(&dp->connector, edid);
+                       kfree(edid);
+               }
 
-       if (dp->plat_data->panel)
-               num_modes += drm_panel_get_modes(dp->plat_data->panel);
+               ret = analogix_dp_prepare_panel(dp, false, false);
+               if (ret)
+                       DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
+       }
 
        if (dp->plat_data->get_modes)
                num_modes += dp->plat_data->get_modes(dp->plat_data, connector);
 
-       ret = analogix_dp_prepare_panel(dp, false, false);
-       if (ret)
-               DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
-
        return num_modes;
 }
 
@@ -1093,13 +965,16 @@ static const struct drm_connector_helper_funcs analogix_dp_connector_helper_func
        .best_encoder = analogix_dp_best_encoder,
 };
 
-enum drm_connector_status
+static enum drm_connector_status
 analogix_dp_detect(struct drm_connector *connector, bool force)
 {
        struct analogix_dp_device *dp = to_dp(connector);
        enum drm_connector_status status = connector_status_disconnected;
        int ret;
 
+       if (dp->plat_data->panel)
+               return connector_status_connected;
+
        ret = analogix_dp_prepare_panel(dp, true, false);
        if (ret) {
                DRM_ERROR("Failed to prepare panel (%d)\n", ret);
@@ -1395,6 +1270,14 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
        return 0;
 }
 
+static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
+                                      struct drm_dp_aux_msg *msg)
+{
+       struct analogix_dp_device *dp = to_dp(aux);
+
+       return analogix_dp_transfer(dp, msg);
+}
+
 int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
                     struct analogix_dp_plat_data *plat_data)
 {
@@ -1515,6 +1398,14 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
        dp->drm_dev = drm_dev;
        dp->encoder = dp->plat_data->encoder;
 
+       dp->aux.name = "DP-AUX";
+       dp->aux.transfer = analogix_dpaux_transfer;
+       dp->aux.dev = &pdev->dev;
+
+       ret = drm_dp_aux_register(&dp->aux);
+       if (ret)
+               goto err_disable_pm_runtime;
+
        ret = analogix_dp_create_bridge(drm_dev, dp);
        if (ret) {
                DRM_ERROR("failed to create bridge (%d)\n", ret);
index 473b980..5c6a288 100644 (file)
 #define MAX_CR_LOOP 5
 #define MAX_EQ_LOOP 5
 
-/* I2C EDID Chip ID, Slave Address */
-#define I2C_EDID_DEVICE_ADDR                   0x50
-#define I2C_E_EDID_DEVICE_ADDR                 0x30
-
-#define EDID_BLOCK_LENGTH                      0x80
-#define EDID_HEADER_PATTERN                    0x00
-#define EDID_EXTENSION_FLAG                    0x7e
-#define EDID_CHECKSUM                          0x7f
-
 /* DP_MAX_LANE_COUNT */
 #define DPCD_ENHANCED_FRAME_CAP(x)             (((x) >> 7) & 0x1)
 #define DPCD_MAX_LANE_COUNT(x)                 ((x) & 0x1f)
@@ -166,6 +157,7 @@ struct analogix_dp_device {
        struct drm_device       *drm_dev;
        struct drm_connector    connector;
        struct drm_bridge       *bridge;
+       struct drm_dp_aux       aux;
        struct clk              *clock;
        unsigned int            irq;
        void __iomem            *reg_base;
@@ -176,7 +168,6 @@ struct analogix_dp_device {
        int                     dpms_mode;
        int                     hpd_gpio;
        bool                    force_hpd;
-       unsigned char           edid[EDID_BLOCK_LENGTH * 2];
        bool                    psr_support;
 
        struct mutex            panel_lock;
@@ -210,33 +201,6 @@ void analogix_dp_reset_aux(struct analogix_dp_device *dp);
 void analogix_dp_init_aux(struct analogix_dp_device *dp);
 int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp);
 void analogix_dp_enable_sw_function(struct analogix_dp_device *dp);
-int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp);
-int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
-                                  unsigned int reg_addr,
-                                  unsigned char data);
-int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp,
-                                   unsigned int reg_addr,
-                                   unsigned char *data);
-int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp,
-                                   unsigned int reg_addr,
-                                   unsigned int count,
-                                   unsigned char data[]);
-int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp,
-                                    unsigned int reg_addr,
-                                    unsigned int count,
-                                    unsigned char data[]);
-int analogix_dp_select_i2c_device(struct analogix_dp_device *dp,
-                                 unsigned int device_addr,
-                                 unsigned int reg_addr);
-int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp,
-                                  unsigned int device_addr,
-                                  unsigned int reg_addr,
-                                  unsigned int *data);
-int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp,
-                                   unsigned int device_addr,
-                                   unsigned int reg_addr,
-                                   unsigned int count,
-                                   unsigned char edid[]);
 void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype);
 void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype);
 void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count);
@@ -285,5 +249,7 @@ void analogix_dp_disable_scrambling(struct analogix_dp_device *dp);
 void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp);
 void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
                              struct edp_vsc_psr *vsc);
+ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+                            struct drm_dp_aux_msg *msg);
 
 #endif /* _ANALOGIX_DP_CORE_H */
index 52c1b6b..cd37ac0 100644 (file)
@@ -585,330 +585,6 @@ int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
        return retval;
 }
 
-int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp,
-                                   unsigned int reg_addr,
-                                   unsigned char *data)
-{
-       u32 reg;
-       int i;
-       int retval;
-
-       for (i = 0; i < 3; i++) {
-               /* Clear AUX CH data buffer */
-               reg = BUF_CLR;
-               writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-               /* Select DPCD device address */
-               reg = AUX_ADDR_7_0(reg_addr);
-               writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
-               reg = AUX_ADDR_15_8(reg_addr);
-               writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
-               reg = AUX_ADDR_19_16(reg_addr);
-               writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
-               /*
-                * Set DisplayPort transaction and read 1 byte
-                * If bit 3 is 1, DisplayPort transaction.
-                * If Bit 3 is 0, I2C transaction.
-                */
-               reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
-               writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-               /* Start AUX transaction */
-               retval = analogix_dp_start_aux_transaction(dp);
-               if (retval == 0)
-                       break;
-
-               dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
-       }
-
-       /* Read data buffer */
-       reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
-       *data = (unsigned char)(reg & 0xff);
-
-       return retval;
-}
-
-int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp,
-                                   unsigned int reg_addr,
-                                   unsigned int count,
-                                   unsigned char data[])
-{
-       u32 reg;
-       unsigned int start_offset;
-       unsigned int cur_data_count;
-       unsigned int cur_data_idx;
-       int i;
-       int retval = 0;
-
-       /* Clear AUX CH data buffer */
-       reg = BUF_CLR;
-       writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-       start_offset = 0;
-       while (start_offset < count) {
-               /* Buffer size of AUX CH is 16 * 4bytes */
-               if ((count - start_offset) > 16)
-                       cur_data_count = 16;
-               else
-                       cur_data_count = count - start_offset;
-
-               for (i = 0; i < 3; i++) {
-                       /* Select DPCD device address */
-                       reg = AUX_ADDR_7_0(reg_addr + start_offset);
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
-                       reg = AUX_ADDR_15_8(reg_addr + start_offset);
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
-                       reg = AUX_ADDR_19_16(reg_addr + start_offset);
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
-                       for (cur_data_idx = 0; cur_data_idx < cur_data_count;
-                            cur_data_idx++) {
-                               reg = data[start_offset + cur_data_idx];
-                               writel(reg, dp->reg_base +
-                                      ANALOGIX_DP_BUF_DATA_0 +
-                                      4 * cur_data_idx);
-                       }
-
-                       /*
-                        * Set DisplayPort transaction and write
-                        * If bit 3 is 1, DisplayPort transaction.
-                        * If Bit 3 is 0, I2C transaction.
-                        */
-                       reg = AUX_LENGTH(cur_data_count) |
-                               AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-                       /* Start AUX transaction */
-                       retval = analogix_dp_start_aux_transaction(dp);
-                       if (retval == 0)
-                               break;
-
-                       dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-                               __func__);
-               }
-
-               start_offset += cur_data_count;
-       }
-
-       return retval;
-}
-
-int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp,
-                                    unsigned int reg_addr,
-                                    unsigned int count,
-                                    unsigned char data[])
-{
-       u32 reg;
-       unsigned int start_offset;
-       unsigned int cur_data_count;
-       unsigned int cur_data_idx;
-       int i;
-       int retval = 0;
-
-       /* Clear AUX CH data buffer */
-       reg = BUF_CLR;
-       writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-       start_offset = 0;
-       while (start_offset < count) {
-               /* Buffer size of AUX CH is 16 * 4bytes */
-               if ((count - start_offset) > 16)
-                       cur_data_count = 16;
-               else
-                       cur_data_count = count - start_offset;
-
-               /* AUX CH Request Transaction process */
-               for (i = 0; i < 3; i++) {
-                       /* Select DPCD device address */
-                       reg = AUX_ADDR_7_0(reg_addr + start_offset);
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
-                       reg = AUX_ADDR_15_8(reg_addr + start_offset);
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
-                       reg = AUX_ADDR_19_16(reg_addr + start_offset);
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
-                       /*
-                        * Set DisplayPort transaction and read
-                        * If bit 3 is 1, DisplayPort transaction.
-                        * If Bit 3 is 0, I2C transaction.
-                        */
-                       reg = AUX_LENGTH(cur_data_count) |
-                               AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-                       /* Start AUX transaction */
-                       retval = analogix_dp_start_aux_transaction(dp);
-                       if (retval == 0)
-                               break;
-
-                       dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-                               __func__);
-               }
-
-               for (cur_data_idx = 0; cur_data_idx < cur_data_count;
-                   cur_data_idx++) {
-                       reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0
-                                                + 4 * cur_data_idx);
-                       data[start_offset + cur_data_idx] =
-                               (unsigned char)reg;
-               }
-
-               start_offset += cur_data_count;
-       }
-
-       return retval;
-}
-
-int analogix_dp_select_i2c_device(struct analogix_dp_device *dp,
-                                 unsigned int device_addr,
-                                 unsigned int reg_addr)
-{
-       u32 reg;
-       int retval;
-
-       /* Set EDID device address */
-       reg = device_addr;
-       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
-       writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
-       writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
-       /* Set offset from base address of EDID device */
-       writel(reg_addr, dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
-
-       /*
-        * Set I2C transaction and write address
-        * If bit 3 is 1, DisplayPort transaction.
-        * If Bit 3 is 0, I2C transaction.
-        */
-       reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT |
-               AUX_TX_COMM_WRITE;
-       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-       /* Start AUX transaction */
-       retval = analogix_dp_start_aux_transaction(dp);
-       if (retval != 0)
-               dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
-
-       return retval;
-}
-
-int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp,
-                                  unsigned int device_addr,
-                                  unsigned int reg_addr,
-                                  unsigned int *data)
-{
-       u32 reg;
-       int i;
-       int retval;
-
-       for (i = 0; i < 3; i++) {
-               /* Clear AUX CH data buffer */
-               reg = BUF_CLR;
-               writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-               /* Select EDID device */
-               retval = analogix_dp_select_i2c_device(dp, device_addr,
-                                                      reg_addr);
-               if (retval != 0)
-                       continue;
-
-               /*
-                * Set I2C transaction and read data
-                * If bit 3 is 1, DisplayPort transaction.
-                * If Bit 3 is 0, I2C transaction.
-                */
-               reg = AUX_TX_COMM_I2C_TRANSACTION |
-                       AUX_TX_COMM_READ;
-               writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
-               /* Start AUX transaction */
-               retval = analogix_dp_start_aux_transaction(dp);
-               if (retval == 0)
-                       break;
-
-               dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
-       }
-
-       /* Read data */
-       if (retval == 0)
-               *data = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
-
-       return retval;
-}
-
-int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp,
-                                   unsigned int device_addr,
-                                   unsigned int reg_addr,
-                                   unsigned int count,
-                                   unsigned char edid[])
-{
-       u32 reg;
-       unsigned int i, j;
-       unsigned int cur_data_idx;
-       unsigned int defer = 0;
-       int retval = 0;
-
-       for (i = 0; i < count; i += 16) {
-               for (j = 0; j < 3; j++) {
-                       /* Clear AUX CH data buffer */
-                       reg = BUF_CLR;
-                       writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
-                       /* Set normal AUX CH command */
-                       reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
-                       reg &= ~ADDR_ONLY;
-                       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
-
-                       /*
-                        * If Rx sends defer, Tx sends only reads
-                        * request without sending address
-                        */
-                       if (!defer)
-                               retval = analogix_dp_select_i2c_device(dp,
-                                               device_addr, reg_addr + i);
-                       else
-                               defer = 0;
-
-                       if (retval == 0) {
-                               /*
-                                * Set I2C transaction and write data
-                                * If bit 3 is 1, DisplayPort transaction.
-                                * If Bit 3 is 0, I2C transaction.
-                                */
-                               reg = AUX_LENGTH(16) |
-                                       AUX_TX_COMM_I2C_TRANSACTION |
-                                       AUX_TX_COMM_READ;
-                               writel(reg, dp->reg_base +
-                                       ANALOGIX_DP_AUX_CH_CTL_1);
-
-                               /* Start AUX transaction */
-                               retval = analogix_dp_start_aux_transaction(dp);
-                               if (retval == 0)
-                                       break;
-
-                               dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
-                                       __func__);
-                       }
-                       /* Check if Rx sends defer */
-                       reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM);
-                       if (reg == AUX_RX_COMM_AUX_DEFER ||
-                           reg == AUX_RX_COMM_I2C_DEFER) {
-                               dev_err(dp->dev, "Defer: %d\n\n", reg);
-                               defer = 1;
-                       }
-               }
-
-               for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) {
-                       reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0
-                                                + 4 * cur_data_idx);
-                       edid[i + cur_data_idx] = (unsigned char)reg;
-               }
-       }
-
-       return retval;
-}
-
 void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
 {
        u32 reg;
@@ -1073,34 +749,22 @@ void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp,
 
 u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp)
 {
-       u32 reg;
-
-       reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
-       return reg;
+       return readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
 }
 
 u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp)
 {
-       u32 reg;
-
-       reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
-       return reg;
+       return readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
 }
 
 u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp)
 {
-       u32 reg;
-
-       reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
-       return reg;
+       return readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
 }
 
 u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp)
 {
-       u32 reg;
-
-       reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
-       return reg;
+       return readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
 }
 
 void analogix_dp_reset_macro(struct analogix_dp_device *dp)
@@ -1373,3 +1037,130 @@ void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
        val |= IF_EN;
        writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
 }
+
+ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+                            struct drm_dp_aux_msg *msg)
+{
+       u32 reg;
+       u8 *buffer = msg->buffer;
+       int timeout_loop = 0;
+       unsigned int i;
+       int num_transferred = 0;
+
+       /* Buffer size of AUX CH is 16 bytes */
+       if (WARN_ON(msg->size > 16))
+               return -E2BIG;
+
+       /* Clear AUX CH data buffer */
+       reg = BUF_CLR;
+       writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+       switch (msg->request & ~DP_AUX_I2C_MOT) {
+       case DP_AUX_I2C_WRITE:
+               reg = AUX_TX_COMM_WRITE | AUX_TX_COMM_I2C_TRANSACTION;
+               if (msg->request & DP_AUX_I2C_MOT)
+                       reg |= AUX_TX_COMM_MOT;
+               break;
+
+       case DP_AUX_I2C_READ:
+               reg = AUX_TX_COMM_READ | AUX_TX_COMM_I2C_TRANSACTION;
+               if (msg->request & DP_AUX_I2C_MOT)
+                       reg |= AUX_TX_COMM_MOT;
+               break;
+
+       case DP_AUX_NATIVE_WRITE:
+               reg = AUX_TX_COMM_WRITE | AUX_TX_COMM_DP_TRANSACTION;
+               break;
+
+       case DP_AUX_NATIVE_READ:
+               reg = AUX_TX_COMM_READ | AUX_TX_COMM_DP_TRANSACTION;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       reg |= AUX_LENGTH(msg->size);
+       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+       /* Select DPCD device address */
+       reg = AUX_ADDR_7_0(msg->address);
+       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+       reg = AUX_ADDR_15_8(msg->address);
+       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+       reg = AUX_ADDR_19_16(msg->address);
+       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+       if (!(msg->request & DP_AUX_I2C_READ)) {
+               for (i = 0; i < msg->size; i++) {
+                       reg = buffer[i];
+                       writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+                              4 * i);
+                       num_transferred++;
+               }
+       }
+
+       /* Enable AUX CH operation */
+       reg = AUX_EN;
+
+       /* Zero-sized messages specify address-only transactions. */
+       if (msg->size < 1)
+               reg |= ADDR_ONLY;
+
+       writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
+
+       /* Is AUX CH command reply received? */
+       /* TODO: Wait for an interrupt instead of looping? */
+       reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+       while (!(reg & RPLY_RECEIV)) {
+               timeout_loop++;
+               if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
+                       dev_err(dp->dev, "AUX CH command reply failed!\n");
+                       return -ETIMEDOUT;
+               }
+               reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+               usleep_range(10, 11);
+       }
+
+       /* Clear interrupt source for AUX CH command reply */
+       writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA);
+
+       /* Clear interrupt source for AUX CH access error */
+       reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+       if (reg & AUX_ERR) {
+               writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
+               return -EREMOTEIO;
+       }
+
+       /* Check AUX CH error access status */
+       reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
+       if ((reg & AUX_STATUS_MASK)) {
+               dev_err(dp->dev, "AUX CH error happened: %d\n\n",
+                       reg & AUX_STATUS_MASK);
+               return -EREMOTEIO;
+       }
+
+       if (msg->request & DP_AUX_I2C_READ) {
+               for (i = 0; i < msg->size; i++) {
+                       reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+                                   4 * i);
+                       buffer[i] = (unsigned char)reg;
+                       num_transferred++;
+               }
+       }
+
+       /* Check if Rx sends defer */
+       reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM);
+       if (reg == AUX_RX_COMM_AUX_DEFER)
+               msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
+       else if (reg == AUX_RX_COMM_I2C_DEFER)
+               msg->reply = DP_AUX_I2C_REPLY_DEFER;
+       else if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_WRITE ||
+                (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_READ)
+               msg->reply = DP_AUX_I2C_REPLY_ACK;
+       else if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE ||
+                (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
+               msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+
+       return num_transferred;
+}
index cdf39aa..66ad8e6 100644 (file)
@@ -1813,9 +1813,6 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
        /* Disable all interrupts */
        hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
 
-       hdmi->connector.funcs->destroy(&hdmi->connector);
-       hdmi->encoder->funcs->destroy(hdmi->encoder);
-
        clk_disable_unprepare(hdmi->iahb_clk);
        clk_disable_unprepare(hdmi->isfr_clk);
        i2c_put_adapter(hdmi->ddc);
index 1cc9ee6..bb2438d 100644 (file)
@@ -150,7 +150,8 @@ static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *fi
 {
        struct cirrus_bo *cirrusbo = cirrus_bo(bo);
 
-       return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
+       return drm_vma_node_verify_access(&cirrusbo->gem.vma_node,
+                                         filp->private_data);
 }
 
 static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
index a5126e5..904d29c 100644 (file)
@@ -1609,7 +1609,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
        struct drm_crtc_state *crtc_state;
        unsigned plane_mask;
        int ret = 0;
-       unsigned int i, j, k;
+       unsigned int i, j;
 
        /* disallow for drivers not supporting atomic: */
        if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1691,16 +1691,7 @@ retry:
                                goto out;
                        }
 
-                       for (k = 0; k < obj->properties->count; k++)
-                               if (obj->properties->properties[k]->base.id == prop_id)
-                                       break;
-
-                       if (k == obj->properties->count) {
-                               ret = -EINVAL;
-                               goto out;
-                       }
-
-                       prop = drm_property_find(dev, prop_id);
+                       prop = drm_mode_obj_find_prop_id(obj, prop_id);
                        if (!prop) {
                                drm_mode_object_unreference(obj);
                                ret = -ENOENT;
index 6fdd7ba..c3f8347 100644 (file)
@@ -1009,29 +1009,46 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
  * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
  * @dev: DRM device
  * @state: atomic state object with old state structures
+ * @pre_swap: if true, do an interruptible wait
  *
  * For implicit sync, driver should fish the exclusive fence out from the
  * incoming fb's and stash it in the drm_plane_state.  This is called after
  * drm_atomic_helper_swap_state() so it uses the current plane state (and
  * just uses the atomic state to find the changed planes)
+ *
+ * Returns zero if success or < 0 if fence_wait() fails.
  */
-void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
-                           struct drm_atomic_state *state)
+int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
+                                     struct drm_atomic_state *state,
+                                     bool pre_swap)
 {
        struct drm_plane *plane;
        struct drm_plane_state *plane_state;
-       int i;
+       int i, ret;
 
        for_each_plane_in_state(state, plane, plane_state, i) {
-               if (!plane->state->fence)
+               if (!pre_swap)
+                       plane_state = plane->state;
+
+               if (!plane_state->fence)
                        continue;
 
-               WARN_ON(!plane->state->fb);
+               WARN_ON(!plane_state->fb);
+
+               /*
+                * If waiting for fences pre-swap (ie: nonblock), userspace can
+                * still interrupt the operation. Instead of blocking until the
+                * timer expires, make the wait interruptible.
+                */
+               ret = fence_wait(plane_state->fence, pre_swap);
+               if (ret)
+                       return ret;
 
-               fence_wait(plane->state->fence, false);
-               fence_put(plane->state->fence);
-               plane->state->fence = NULL;
+               fence_put(plane_state->fence);
+               plane_state->fence = NULL;
        }
+
+       return 0;
 }
 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
 
@@ -1179,7 +1196,7 @@ static void commit_tail(struct drm_atomic_state *state)
 
        funcs = dev->mode_config.helper_private;
 
-       drm_atomic_helper_wait_for_fences(dev, state);
+       drm_atomic_helper_wait_for_fences(dev, state, false);
 
        drm_atomic_helper_wait_for_dependencies(state);
 
@@ -1238,6 +1255,12 @@ int drm_atomic_helper_commit(struct drm_device *dev,
        if (ret)
                return ret;
 
+       if (!nonblock) {
+               ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * This is the point of no return - everything below never fails except
         * when the hw goes bonghits. Which means we can commit the new state on
@@ -1670,7 +1693,7 @@ fail:
 }
 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
 
-bool plane_crtc_active(struct drm_plane_state *state)
+static bool plane_crtc_active(const struct drm_plane_state *state)
 {
        return state->crtc && state->crtc->state->active;
 }
index 0813b7e..85172a9 100644 (file)
  */
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
-#include <drm/drm_crtc.h>
+#include <drm/drm_blend.h>
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/sort.h>
 
-#include "drm_internal.h"
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * The basic plane composition model supported by standard plane properties only
+ * has a source rectangle (in logical pixels within the &drm_framebuffer), with
+ * sub-pixel accuracy, which is scaled up to a pixel-aligned destination
+ * rectangle in the visible area of a &drm_crtc. The visible area of a CRTC is
+ * defined by the horizontal and vertical visible pixels (stored in @hdisplay
+ * and @vdisplay) of the requested mode (stored in @mode in the
+ * &drm_crtc_state). These two rectangles are both stored in the
+ * &drm_plane_state.
+ *
+ * For the atomic ioctl the following standard (atomic) properties on the plane object
+ * encode the basic plane composition model:
+ *
+ * SRC_X:
+ *     X coordinate offset for the source rectangle within the
+ *     &drm_framebuffer, in 16.16 fixed point. Must be positive.
+ * SRC_Y:
+ *     Y coordinate offset for the source rectangle within the
+ *     &drm_framebuffer, in 16.16 fixed point. Must be positive.
+ * SRC_W:
+ *     Width for the source rectangle within the &drm_framebuffer, in 16.16
+ *     fixed point. SRC_X plus SRC_W must be within the width of the source
+ *     framebuffer. Must be positive.
+ * SRC_H:
+ *     Height for the source rectangle within the &drm_framebuffer, in 16.16
+ *     fixed point. SRC_Y plus SRC_H must be within the height of the source
+ *     framebuffer. Must be positive.
+ * CRTC_X:
+ *     X coordinate offset for the destination rectangle. Can be negative.
+ * CRTC_Y:
+ *     Y coordinate offset for the destination rectangle. Can be negative.
+ * CRTC_W:
+ *     Width for the destination rectangle. CRTC_X plus CRTC_W can extend past
+ *     the currently visible horizontal area of the &drm_crtc.
+ * CRTC_H:
+ *     Height for the destination rectangle. CRTC_Y plus CRTC_H can extend past
+ *     the currently visible vertical area of the &drm_crtc.
+ * FB_ID:
+ *     Mode object ID of the &drm_framebuffer this plane should scan out.
+ * CRTC_ID:
+ *     Mode object ID of the &drm_crtc this plane should be connected to.
+ *
+ * Note that the source rectangle must fully lie within the bounds of the
+ * &drm_framebuffer. The destination rectangle can lie outside of the visible
+ * area of the current mode of the CRTC. It must be apprpriately clipped by the
+ * driver, which can be done by calling drm_plane_helper_check_update(). Drivers
+ * are also allowed to round the subpixel sampling positions appropriately, but
+ * only to the next full pixel. No pixel outside of the source rectangle may
+ * ever be sampled, which is important when applying more sophisticated
+ * filtering than just a bilinear one when scaling. The filtering mode when
+ * scaling is unspecified.
+ *
+ * On top of this basic transformation additional properties can be exposed by
+ * the driver:
+ *
+ * - Rotation is set up with drm_mode_create_rotation_property(). It adds a
+ *   rotation and reflection step between the source and destination rectangles.
+ *   Without this property the rectangle is only scaled, but not rotated or
+ *   reflected.
+ *
+ * - Z position is set up with drm_plane_create_zpos_immutable_property() and
+ *   drm_plane_create_zpos_property(). It controls the visibility of overlapping
+ *   planes. Without this property the primary plane is always below the cursor
+ *   plane, and ordering between all other planes is undefined.
+ *
+ * Note that all the property extensions described here apply either to the
+ * plane or the CRTC (e.g. for the background color, which currently is not
+ * exposed and assumed to be black).
+ */
+
+/**
+ * drm_mode_create_rotation_property - create a new rotation property
+ * @dev: DRM device
+ * @supported_rotations: bitmask of supported rotations and reflections
+ *
+ * This creates a new property with the selected support for transformations.
+ * The resulting property should be stored in @rotation_property in
+ * &drm_mode_config. It then must be attached to each plane which supports
+ * rotations using drm_object_attach_property().
+ *
+ * FIXME: Probably better if the rotation property is created on each plane,
+ * like the zpos property. Otherwise it's not possible to allow different
+ * rotation modes on different planes.
+ *
+ * Since a rotation by 180° degress is the same as reflecting both along the x
+ * and the y axis the rotation property is somewhat redundant. Drivers can use
+ * drm_rotation_simplify() to normalize values of this property.
+ *
+ * The property exposed to userspace is a bitmask property (see
+ * drm_property_create_bitmask()) called "rotation" and has the following
+ * bitmask enumaration values:
+ *
+ * DRM_ROTATE_0:
+ *     "rotate-0"
+ * DRM_ROTATE_90:
+ *     "rotate-90"
+ * DRM_ROTATE_180:
+ *     "rotate-180"
+ * DRM_ROTATE_270:
+ *     "rotate-270"
+ * DRM_REFLECT_X:
+ *     "reflect-x"
+ * DRM_REFELCT_Y:
+ *     "reflect-y"
+ *
+ * Rotation is the specified amount in degrees in counter clockwise direction,
+ * the X and Y axis are within the source rectangle, i.e.  the X/Y axis before
+ * rotation. After reflection, the rotation is applied to the image sampled from
+ * the source rectangle, before scaling it to fit the destination rectangle.
+ */
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+                                                      unsigned int supported_rotations)
+{
+       static const struct drm_prop_enum_list props[] = {
+               { __builtin_ffs(DRM_ROTATE_0) - 1,   "rotate-0" },
+               { __builtin_ffs(DRM_ROTATE_90) - 1,  "rotate-90" },
+               { __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" },
+               { __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" },
+               { __builtin_ffs(DRM_REFLECT_X) - 1,  "reflect-x" },
+               { __builtin_ffs(DRM_REFLECT_Y) - 1,  "reflect-y" },
+       };
+
+       return drm_property_create_bitmask(dev, 0, "rotation",
+                                          props, ARRAY_SIZE(props),
+                                          supported_rotations);
+}
+EXPORT_SYMBOL(drm_mode_create_rotation_property);
+
+/**
+ * drm_rotation_simplify() - Try to simplify the rotation
+ * @rotation: Rotation to be simplified
+ * @supported_rotations: Supported rotations
+ *
+ * Attempt to simplify the rotation to a form that is supported.
+ * Eg. if the hardware supports everything except DRM_REFLECT_X
+ * one could call this function like this:
+ *
+ * drm_rotation_simplify(rotation, DRM_ROTATE_0 |
+ *                       DRM_ROTATE_90 | DRM_ROTATE_180 |
+ *                       DRM_ROTATE_270 | DRM_REFLECT_Y);
+ *
+ * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
+ * transforms the hardware supports, this function may not
+ * be able to produce a supported transform, so the caller should
+ * check the result afterwards.
+ */
+unsigned int drm_rotation_simplify(unsigned int rotation,
+                                  unsigned int supported_rotations)
+{
+       if (rotation & ~supported_rotations) {
+               rotation ^= DRM_REFLECT_X | DRM_REFLECT_Y;
+               rotation = (rotation & DRM_REFLECT_MASK) |
+                          BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
+       }
+
+       return rotation;
+}
+EXPORT_SYMBOL(drm_rotation_simplify);
 
 /**
  * drm_plane_create_zpos_property - create mutable zpos property
  * If zpos of some planes cannot be changed (like fixed background or
  * cursor/topmost planes), driver should adjust min/max values and assign those
  * planes immutable zpos property with lower or higher values (for more
- * information, see drm_mode_create_zpos_immutable_property() function). In such
+ * information, see drm_plane_create_zpos_immutable_property() function). In such
  * case driver should also assign proper initial zpos values for all planes in
  * its plane_reset() callback, so the planes will be always sorted properly.
  *
+ * See also drm_atomic_normalize_zpos().
+ *
+ * The property exposed to userspace is called "zpos".
+ *
  * Returns:
  * Zero on success, negative errno on failure.
  */
@@ -88,7 +253,9 @@ EXPORT_SYMBOL(drm_plane_create_zpos_property);
  * support for it in drm core. Using this property driver lets userspace
  * to get the arrangement of the planes for blending operation and notifies
  * it that the hardware (or driver) doesn't support changing of the planes'
- * order.
+ * order. For mutable zpos see drm_plane_create_zpos_property().
+ *
+ * The property exposed to userspace is called "zpos".
  *
  * Returns:
  * Zero on success, negative errno on failure.
@@ -127,20 +294,6 @@ static int drm_atomic_state_zpos_cmp(const void *a, const void *b)
                return sa->plane->base.id - sb->plane->base.id;
 }
 
-/**
- * drm_atomic_helper_crtc_normalize_zpos - calculate normalized zpos values
- * @crtc: crtc with planes, which have to be considered for normalization
- * @crtc_state: new atomic state to apply
- *
- * This function checks new states of all planes assigned to given crtc and
- * calculates normalized zpos value for them. Planes are compared first by their
- * zpos values, then by plane id (if zpos equals). Plane with lowest zpos value
- * is at the bottom. The plane_state->normalized_zpos is then filled with unique
- * values from 0 to number of active planes in crtc minus one.
- *
- * RETURNS
- * Zero for success or -errno
- */
 static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
                                          struct drm_crtc_state *crtc_state)
 {
@@ -198,8 +351,14 @@ done:
  * @state: atomic state of DRM device
  *
  * This function calculates normalized zpos value for all modified planes in
- * the provided atomic state of DRM device. For more information, see
- * drm_atomic_helper_crtc_normalize_zpos() function.
+ * the provided atomic state of DRM device.
+ *
+ * For every CRTC this function checks new states of all planes assigned to
+ * it and calculates normalized zpos value for these planes. Planes are compared
+ * first by their zpos values, then by plane id (if zpos is equal). The plane
+ * with lowest zpos value is at the bottom. The plane_state->normalized_zpos is
+ * then filled with unique values from 0 to number of active planes in crtc
+ * minus one.
  *
  * RETURNS
  * Zero for success or -errno
index 4840466..0ee052b 100644 (file)
 
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 
-#include <drm/drm_crtc.h>
-
-#include "drm/drmP.h"
+#include <drm/drm_bridge.h>
 
 /**
  * DOC: overview
index 3219151..adb1dd7 100644 (file)
@@ -755,7 +755,7 @@ int drm_legacy_addbufs_agp(struct drm_device *dev,
                return -EINVAL;
        }
 
-       entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+       entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
        if (!entry->buflist) {
                mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
@@ -905,14 +905,14 @@ int drm_legacy_addbufs_pci(struct drm_device *dev,
                return -EINVAL;
        }
 
-       entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+       entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
        if (!entry->buflist) {
                mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
                return -ENOMEM;
        }
 
-       entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
+       entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
        if (!entry->seglist) {
                kfree(entry->buflist);
                mutex_unlock(&dev->struct_mutex);
@@ -923,8 +923,9 @@ int drm_legacy_addbufs_pci(struct drm_device *dev,
        /* Keep the original pagelist until we know all the allocations
         * have succeeded
         */
-       temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
-                              sizeof(*dma->pagelist), GFP_KERNEL);
+       temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
+                                     sizeof(*dma->pagelist),
+                                     GFP_KERNEL);
        if (!temp_pagelist) {
                kfree(entry->buflist);
                kfree(entry->seglist);
@@ -1116,8 +1117,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev,
                return -EINVAL;
        }
 
-       entry->buflist = kzalloc(count * sizeof(*entry->buflist),
-                               GFP_KERNEL);
+       entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
        if (!entry->buflist) {
                mutex_unlock(&dev->struct_mutex);
                atomic_dec(&dev->buf_alloc);
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
new file mode 100644 (file)
index 0000000..d28ffdd
--- /dev/null
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_color_mgmt.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Color management or color space adjustments is supported through a set of 5
+ * properties on the &drm_crtc object. They are set up by calling
+ * drm_crtc_enable_color_mgmt().
+ *
+ * "DEGAMMA_LUT”:
+ *     Blob property to set the degamma lookup table (LUT) mapping pixel data
+ *     from the framebuffer before it is given to the transformation matrix.
+ *     The data is interpreted as an array of struct &drm_color_lut elements.
+ *     Hardware might choose not to use the full precision of the LUT elements
+ *     nor use all the elements of the LUT (for example the hardware might
+ *     choose to interpolate between LUT[0] and LUT[4]).
+ *
+ * “DEGAMMA_LUT_SIZE”:
+ *     Unsinged range property to give the size of the lookup table to be set
+ *     on the DEGAMMA_LUT property (the size depends on the underlying
+ *     hardware). If drivers support multiple LUT sizes then they should
+ *     publish the largest size, and sub-sample smaller sized LUTs (e.g. for
+ *     split-gamma modes) appropriately.
+ *
+ * “CTM”:
+ *     Blob property to set the current transformation matrix (CTM) apply to
+ *     pixel data after the lookup through the degamma LUT and before the
+ *     lookup through the gamma LUT. The data is interpreted as a struct
+ *     &drm_color_ctm.
+ *
+ * “GAMMA_LUT”:
+ *     Blob property to set the gamma lookup table (LUT) mapping pixel data
+ *     after the transformation matrix to data sent to the connector. The
+ *     data is interpreted as an array of struct &drm_color_lut elements.
+ *     Hardware might choose not to use the full precision of the LUT elements
+ *     nor use all the elements of the LUT (for example the hardware might
+ *     choose to interpolate between LUT[0] and LUT[4]).
+ *
+ * “GAMMA_LUT_SIZE”:
+ *     Unsigned range property to give the size of the lookup table to be set
+ *     on the GAMMA_LUT property (the size depends on the underlying hardware).
+ *     If drivers support multiple LUT sizes then they should publish the
+ *     largest size, and sub-sample smaller sized LUTs (e.g. for split-gamma
+ *     modes) appropriately.
+ *
+ * There is also support for a legacy gamma table, which is set up by calling
+ * drm_mode_crtc_set_gamma_size(). Drivers which support both should use
+ * drm_atomic_helper_legacy_gamma_set() to alias the legacy gamma ramp with the
+ * "GAMMA_LUT" property above.
+ */
+
+/**
+ * drm_crtc_enable_color_mgmt - enable color management properties
+ * @crtc: DRM CRTC
+ * @degamma_lut_size: the size of the degamma lut (before CSC)
+ * @has_ctm: whether to attach ctm_property for CSC matrix
+ * @gamma_lut_size: the size of the gamma lut (after CSC)
+ *
+ * This function lets the driver enable the color correction
+ * properties on a CRTC. This includes 3 degamma, csc and gamma
+ * properties that userspace can set and 2 size properties to inform
+ * the userspace of the lut sizes. Each of the properties are
+ * optional. The gamma and degamma properties are only attached if
+ * their size is not 0 and ctm_property is only attached if has_ctm is
+ * true.
+ */
+void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+                               uint degamma_lut_size,
+                               bool has_ctm,
+                               uint gamma_lut_size)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *config = &dev->mode_config;
+
+       if (degamma_lut_size) {
+               drm_object_attach_property(&crtc->base,
+                                          config->degamma_lut_property, 0);
+               drm_object_attach_property(&crtc->base,
+                                          config->degamma_lut_size_property,
+                                          degamma_lut_size);
+       }
+
+       if (has_ctm)
+               drm_object_attach_property(&crtc->base,
+                                          config->ctm_property, 0);
+
+       if (gamma_lut_size) {
+               drm_object_attach_property(&crtc->base,
+                                          config->gamma_lut_property, 0);
+               drm_object_attach_property(&crtc->base,
+                                          config->gamma_lut_size_property,
+                                          gamma_lut_size);
+       }
+}
+EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
+
+/**
+ * drm_mode_crtc_set_gamma_size - set the gamma table size
+ * @crtc: CRTC to set the gamma table size for
+ * @gamma_size: size of the gamma table
+ *
+ * Drivers which support gamma tables should set this to the supported gamma
+ * table size when initializing the CRTC. Currently the drm core only supports a
+ * fixed gamma table size.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+                                int gamma_size)
+{
+       uint16_t *r_base, *g_base, *b_base;
+       int i;
+
+       crtc->gamma_size = gamma_size;
+
+       crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
+                                   GFP_KERNEL);
+       if (!crtc->gamma_store) {
+               crtc->gamma_size = 0;
+               return -ENOMEM;
+       }
+
+       r_base = crtc->gamma_store;
+       g_base = r_base + gamma_size;
+       b_base = g_base + gamma_size;
+       for (i = 0; i < gamma_size; i++) {
+               r_base[i] = i << 8;
+               g_base[i] = i << 8;
+               b_base[i] = i << 8;
+       }
+
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+
+/**
+ * drm_mode_gamma_set_ioctl - set the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
+ * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_crtc *crtc;
+       void *r_base, *g_base, *b_base;
+       int size;
+       int ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       drm_modeset_lock_all(dev);
+       crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+       if (!crtc) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       if (crtc->funcs->gamma_set == NULL) {
+               ret = -ENOSYS;
+               goto out;
+       }
+
+       /* memcpy into gamma store */
+       if (crtc_lut->gamma_size != crtc->gamma_size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       size = crtc_lut->gamma_size * (sizeof(uint16_t));
+       r_base = crtc->gamma_store;
+       if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       g_base = r_base + size;
+       if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       b_base = g_base + size;
+       if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+
+out:
+       drm_modeset_unlock_all(dev);
+       return ret;
+
+}
+
+/**
+ * drm_mode_gamma_get_ioctl - get the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Copy the current gamma table into the storage provided. This also provides
+ * the gamma table size the driver expects, which can be used to size the
+ * allocated storage.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_crtc *crtc;
+       void *r_base, *g_base, *b_base;
+       int size;
+       int ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       drm_modeset_lock_all(dev);
+       crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+       if (!crtc) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       /* memcpy into gamma store */
+       if (crtc_lut->gamma_size != crtc->gamma_size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       size = crtc_lut->gamma_size * (sizeof(uint16_t));
+       r_base = crtc->gamma_store;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       g_base = r_base + size;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       b_base = g_base + size;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+out:
+       drm_modeset_unlock_all(dev);
+       return ret;
+}
index 631691b..2d7bedf 100644 (file)
@@ -151,7 +151,11 @@ static void drm_crtc_unregister_all(struct drm_device *dev)
  * @funcs: callbacks for the new CRTC
  * @name: printf style format string for the CRTC name, or NULL for default name
  *
- * Inits a new object created as base part of a driver crtc object.
+ * Inits a new object created as base part of a driver crtc object. Drivers
+ * should use this function instead of drm_crtc_init(), which is only provided
+ * for backwards compatibility with drivers which do not yet support universal
+ * planes). For really simple hardware which has only 1 plane look at
+ * drm_simple_display_pipe_init() instead.
  *
  * Returns:
  * Zero on success, error code on failure.
@@ -251,255 +255,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
 }
 EXPORT_SYMBOL(drm_crtc_cleanup);
 
-static unsigned int drm_num_planes(struct drm_device *dev)
-{
-       unsigned int num = 0;
-       struct drm_plane *tmp;
-
-       drm_for_each_plane(tmp, dev) {
-               num++;
-       }
-
-       return num;
-}
-
-/**
- * drm_universal_plane_init - Initialize a new universal plane object
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (DRM_FORMAT\_\*)
- * @format_count: number of elements in @formats
- * @type: type of plane (overlay, primary, cursor)
- * @name: printf style format string for the plane name, or NULL for default name
- *
- * Initializes a plane object of type @type.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
-                            unsigned long possible_crtcs,
-                            const struct drm_plane_funcs *funcs,
-                            const uint32_t *formats, unsigned int format_count,
-                            enum drm_plane_type type,
-                            const char *name, ...)
-{
-       struct drm_mode_config *config = &dev->mode_config;
-       int ret;
-
-       ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
-       if (ret)
-               return ret;
-
-       drm_modeset_lock_init(&plane->mutex);
-
-       plane->base.properties = &plane->properties;
-       plane->dev = dev;
-       plane->funcs = funcs;
-       plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
-                                           GFP_KERNEL);
-       if (!plane->format_types) {
-               DRM_DEBUG_KMS("out of memory when allocating plane\n");
-               drm_mode_object_unregister(dev, &plane->base);
-               return -ENOMEM;
-       }
-
-       if (name) {
-               va_list ap;
-
-               va_start(ap, name);
-               plane->name = kvasprintf(GFP_KERNEL, name, ap);
-               va_end(ap);
-       } else {
-               plane->name = kasprintf(GFP_KERNEL, "plane-%d",
-                                       drm_num_planes(dev));
-       }
-       if (!plane->name) {
-               kfree(plane->format_types);
-               drm_mode_object_unregister(dev, &plane->base);
-               return -ENOMEM;
-       }
-
-       memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
-       plane->format_count = format_count;
-       plane->possible_crtcs = possible_crtcs;
-       plane->type = type;
-
-       list_add_tail(&plane->head, &config->plane_list);
-       plane->index = config->num_total_plane++;
-       if (plane->type == DRM_PLANE_TYPE_OVERLAY)
-               config->num_overlay_plane++;
-
-       drm_object_attach_property(&plane->base,
-                                  config->plane_type_property,
-                                  plane->type);
-
-       if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
-               drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
-               drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
-               drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
-               drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
-               drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
-               drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
-               drm_object_attach_property(&plane->base, config->prop_src_x, 0);
-               drm_object_attach_property(&plane->base, config->prop_src_y, 0);
-               drm_object_attach_property(&plane->base, config->prop_src_w, 0);
-               drm_object_attach_property(&plane->base, config->prop_src_h, 0);
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_universal_plane_init);
-
-static int drm_plane_register_all(struct drm_device *dev)
-{
-       struct drm_plane *plane;
-       int ret = 0;
-
-       drm_for_each_plane(plane, dev) {
-               if (plane->funcs->late_register)
-                       ret = plane->funcs->late_register(plane);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static void drm_plane_unregister_all(struct drm_device *dev)
-{
-       struct drm_plane *plane;
-
-       drm_for_each_plane(plane, dev) {
-               if (plane->funcs->early_unregister)
-                       plane->funcs->early_unregister(plane);
-       }
-}
-
-/**
- * drm_plane_init - Initialize a legacy plane
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (DRM_FORMAT\_\*)
- * @format_count: number of elements in @formats
- * @is_primary: plane type (primary vs overlay)
- *
- * Legacy API to initialize a DRM plane.
- *
- * New drivers should call drm_universal_plane_init() instead.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
-                  unsigned long possible_crtcs,
-                  const struct drm_plane_funcs *funcs,
-                  const uint32_t *formats, unsigned int format_count,
-                  bool is_primary)
-{
-       enum drm_plane_type type;
-
-       type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
-       return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
-                                       formats, format_count, type, NULL);
-}
-EXPORT_SYMBOL(drm_plane_init);
-
-/**
- * drm_plane_cleanup - Clean up the core plane usage
- * @plane: plane to cleanup
- *
- * This function cleans up @plane and removes it from the DRM mode setting
- * core. Note that the function does *not* free the plane structure itself,
- * this is the responsibility of the caller.
- */
-void drm_plane_cleanup(struct drm_plane *plane)
-{
-       struct drm_device *dev = plane->dev;
-
-       drm_modeset_lock_all(dev);
-       kfree(plane->format_types);
-       drm_mode_object_unregister(dev, &plane->base);
-
-       BUG_ON(list_empty(&plane->head));
-
-       /* Note that the plane_list is considered to be static; should we
-        * remove the drm_plane at runtime we would have to decrement all
-        * the indices on the drm_plane after us in the plane_list.
-        */
-
-       list_del(&plane->head);
-       dev->mode_config.num_total_plane--;
-       if (plane->type == DRM_PLANE_TYPE_OVERLAY)
-               dev->mode_config.num_overlay_plane--;
-       drm_modeset_unlock_all(dev);
-
-       WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
-       if (plane->state && plane->funcs->atomic_destroy_state)
-               plane->funcs->atomic_destroy_state(plane, plane->state);
-
-       kfree(plane->name);
-
-       memset(plane, 0, sizeof(*plane));
-}
-EXPORT_SYMBOL(drm_plane_cleanup);
-
-/**
- * drm_plane_from_index - find the registered plane at an index
- * @dev: DRM device
- * @idx: index of registered plane to find for
- *
- * Given a plane index, return the registered plane from DRM device's
- * list of planes with matching index.
- */
-struct drm_plane *
-drm_plane_from_index(struct drm_device *dev, int idx)
-{
-       struct drm_plane *plane;
-
-       drm_for_each_plane(plane, dev)
-               if (idx == plane->index)
-                       return plane;
-
-       return NULL;
-}
-EXPORT_SYMBOL(drm_plane_from_index);
-
-/**
- * drm_plane_force_disable - Forcibly disable a plane
- * @plane: plane to disable
- *
- * Forces the plane to be disabled.
- *
- * Used when the plane's current framebuffer is destroyed,
- * and when restoring fbdev mode.
- */
-void drm_plane_force_disable(struct drm_plane *plane)
-{
-       int ret;
-
-       if (!plane->fb)
-               return;
-
-       plane->old_fb = plane->fb;
-       ret = plane->funcs->disable_plane(plane);
-       if (ret) {
-               DRM_ERROR("failed to disable plane with busy fb\n");
-               plane->old_fb = NULL;
-               return;
-       }
-       /* disconnect the plane from the fb and crtc: */
-       drm_framebuffer_unreference(plane->old_fb);
-       plane->old_fb = NULL;
-       plane->fb = NULL;
-       plane->crtc = NULL;
-}
-EXPORT_SYMBOL(drm_plane_force_disable);
-
 int drm_modeset_register_all(struct drm_device *dev)
 {
        int ret;
@@ -855,395 +610,58 @@ int drm_mode_getcrtc(struct drm_device *dev,
 }
 
 /**
- * drm_mode_getplane_res - enumerate all plane resources
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Construct a list of plane ids to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getplane_res(struct drm_device *dev, void *data,
-                         struct drm_file *file_priv)
-{
-       struct drm_mode_get_plane_res *plane_resp = data;
-       struct drm_mode_config *config;
-       struct drm_plane *plane;
-       uint32_t __user *plane_ptr;
-       int copied = 0;
-       unsigned num_planes;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       config = &dev->mode_config;
-
-       if (file_priv->universal_planes)
-               num_planes = config->num_total_plane;
-       else
-               num_planes = config->num_overlay_plane;
-
-       /*
-        * This ioctl is called twice, once to determine how much space is
-        * needed, and the 2nd time to fill it.
-        */
-       if (num_planes &&
-           (plane_resp->count_planes >= num_planes)) {
-               plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
-
-               /* Plane lists are invariant, no locking needed. */
-               drm_for_each_plane(plane, dev) {
-                       /*
-                        * Unless userspace set the 'universal planes'
-                        * capability bit, only advertise overlays.
-                        */
-                       if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
-                           !file_priv->universal_planes)
-                               continue;
-
-                       if (put_user(plane->base.id, plane_ptr + copied))
-                               return -EFAULT;
-                       copied++;
-               }
-       }
-       plane_resp->count_planes = num_planes;
-
-       return 0;
-}
-
-/**
- * drm_mode_getplane - get plane configuration
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Construct a plane configuration structure to return to the user.
+ * drm_mode_set_config_internal - helper to call ->set_config
+ * @set: modeset config to set
  *
- * Called by the user via ioctl.
+ * This is a little helper to wrap internal calls to the ->set_config driver
+ * interface. The only thing it adds is correct refcounting dance.
  *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_getplane(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv)
+int drm_mode_set_config_internal(struct drm_mode_set *set)
 {
-       struct drm_mode_get_plane *plane_resp = data;
-       struct drm_plane *plane;
-       uint32_t __user *format_ptr;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       plane = drm_plane_find(dev, plane_resp->plane_id);
-       if (!plane)
-               return -ENOENT;
-
-       drm_modeset_lock(&plane->mutex, NULL);
-       if (plane->crtc)
-               plane_resp->crtc_id = plane->crtc->base.id;
-       else
-               plane_resp->crtc_id = 0;
-
-       if (plane->fb)
-               plane_resp->fb_id = plane->fb->base.id;
-       else
-               plane_resp->fb_id = 0;
-       drm_modeset_unlock(&plane->mutex);
-
-       plane_resp->plane_id = plane->base.id;
-       plane_resp->possible_crtcs = plane->possible_crtcs;
-       plane_resp->gamma_size = 0;
+       struct drm_crtc *crtc = set->crtc;
+       struct drm_framebuffer *fb;
+       struct drm_crtc *tmp;
+       int ret;
 
        /*
-        * This ioctl is called twice, once to determine how much space is
-        * needed, and the 2nd time to fill it.
+        * NOTE: ->set_config can also disable other crtcs (if we steal all
+        * connectors from it), hence we need to refcount the fbs across all
+        * crtcs. Atomic modeset will have saner semantics ...
         */
-       if (plane->format_count &&
-           (plane_resp->count_format_types >= plane->format_count)) {
-               format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
-               if (copy_to_user(format_ptr,
-                                plane->format_types,
-                                sizeof(uint32_t) * plane->format_count)) {
-                       return -EFAULT;
-               }
-       }
-       plane_resp->count_format_types = plane->format_count;
-
-       return 0;
-}
-
-/**
- * drm_plane_check_pixel_format - Check if the plane supports the pixel format
- * @plane: plane to check for format support
- * @format: the pixel format
- *
- * Returns:
- * Zero of @plane has @format in its list of supported pixel formats, -EINVAL
- * otherwise.
- */
-int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
-{
-       unsigned int i;
-
-       for (i = 0; i < plane->format_count; i++) {
-               if (format == plane->format_types[i])
-                       return 0;
-       }
-
-       return -EINVAL;
-}
-
-static int check_src_coords(uint32_t src_x, uint32_t src_y,
-                           uint32_t src_w, uint32_t src_h,
-                           const struct drm_framebuffer *fb)
-{
-       unsigned int fb_width, fb_height;
-
-       fb_width = fb->width << 16;
-       fb_height = fb->height << 16;
-
-       /* Make sure source coordinates are inside the fb. */
-       if (src_w > fb_width ||
-           src_x > fb_width - src_w ||
-           src_h > fb_height ||
-           src_y > fb_height - src_h) {
-               DRM_DEBUG_KMS("Invalid source coordinates "
-                             "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
-                             src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
-                             src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
-                             src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
-                             src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
-               return -ENOSPC;
-       }
-
-       return 0;
-}
-
-/*
- * setplane_internal - setplane handler for internal callers
- *
- * Note that we assume an extra reference has already been taken on fb.  If the
- * update fails, this reference will be dropped before return; if it succeeds,
- * the previous framebuffer (if any) will be unreferenced instead.
- *
- * src_{x,y,w,h} are provided in 16.16 fixed point format
- */
-static int __setplane_internal(struct drm_plane *plane,
-                              struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              int32_t crtc_x, int32_t crtc_y,
-                              uint32_t crtc_w, uint32_t crtc_h,
-                              /* src_{x,y,w,h} values are 16.16 fixed point */
-                              uint32_t src_x, uint32_t src_y,
-                              uint32_t src_w, uint32_t src_h)
-{
-       int ret = 0;
-
-       /* No fb means shut it down */
-       if (!fb) {
-               plane->old_fb = plane->fb;
-               ret = plane->funcs->disable_plane(plane);
-               if (!ret) {
-                       plane->crtc = NULL;
-                       plane->fb = NULL;
-               } else {
-                       plane->old_fb = NULL;
-               }
-               goto out;
-       }
-
-       /* Check whether this plane is usable on this CRTC */
-       if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
-               DRM_DEBUG_KMS("Invalid crtc for plane\n");
-               ret = -EINVAL;
-               goto out;
-       }
+       drm_for_each_crtc(tmp, crtc->dev)
+               tmp->primary->old_fb = tmp->primary->fb;
 
-       /* Check whether this plane supports the fb pixel format. */
-       ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
-       if (ret) {
-               char *format_name = drm_get_format_name(fb->pixel_format);
-               DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
-               kfree(format_name);
-               goto out;
-       }
+       fb = set->fb;
 
-       /* Give drivers some help against integer overflows */
-       if (crtc_w > INT_MAX ||
-           crtc_x > INT_MAX - (int32_t) crtc_w ||
-           crtc_h > INT_MAX ||
-           crtc_y > INT_MAX - (int32_t) crtc_h) {
-               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
-                             crtc_w, crtc_h, crtc_x, crtc_y);
-               ret = -ERANGE;
-               goto out;
+       ret = crtc->funcs->set_config(set);
+       if (ret == 0) {
+               crtc->primary->crtc = crtc;
+               crtc->primary->fb = fb;
        }
 
-       ret = check_src_coords(src_x, src_y, src_w, src_h, fb);
-       if (ret)
-               goto out;
-
-       plane->old_fb = plane->fb;
-       ret = plane->funcs->update_plane(plane, crtc, fb,
-                                        crtc_x, crtc_y, crtc_w, crtc_h,
-                                        src_x, src_y, src_w, src_h);
-       if (!ret) {
-               plane->crtc = crtc;
-               plane->fb = fb;
-               fb = NULL;
-       } else {
-               plane->old_fb = NULL;
+       drm_for_each_crtc(tmp, crtc->dev) {
+               if (tmp->primary->fb)
+                       drm_framebuffer_reference(tmp->primary->fb);
+               if (tmp->primary->old_fb)
+                       drm_framebuffer_unreference(tmp->primary->old_fb);
+               tmp->primary->old_fb = NULL;
        }
 
-out:
-       if (fb)
-               drm_framebuffer_unreference(fb);
-       if (plane->old_fb)
-               drm_framebuffer_unreference(plane->old_fb);
-       plane->old_fb = NULL;
-
-       return ret;
-}
-
-static int setplane_internal(struct drm_plane *plane,
-                            struct drm_crtc *crtc,
-                            struct drm_framebuffer *fb,
-                            int32_t crtc_x, int32_t crtc_y,
-                            uint32_t crtc_w, uint32_t crtc_h,
-                            /* src_{x,y,w,h} values are 16.16 fixed point */
-                            uint32_t src_x, uint32_t src_y,
-                            uint32_t src_w, uint32_t src_h)
-{
-       int ret;
-
-       drm_modeset_lock_all(plane->dev);
-       ret = __setplane_internal(plane, crtc, fb,
-                                 crtc_x, crtc_y, crtc_w, crtc_h,
-                                 src_x, src_y, src_w, src_h);
-       drm_modeset_unlock_all(plane->dev);
-
        return ret;
 }
+EXPORT_SYMBOL(drm_mode_set_config_internal);
 
 /**
- * drm_mode_setplane - configure a plane's configuration
- * @dev: DRM device
- * @data: ioctl data*
- * @file_priv: DRM file info
- *
- * Set plane configuration, including placement, fb, scaling, and other factors.
- * Or pass a NULL fb to disable (planes may be disabled without providing a
- * valid crtc).
+ * drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode
+ * @mode: mode to query
+ * @hdisplay: hdisplay value to fill in
+ * @vdisplay: vdisplay value to fill in
  *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_setplane(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv)
-{
-       struct drm_mode_set_plane *plane_req = data;
-       struct drm_plane *plane;
-       struct drm_crtc *crtc = NULL;
-       struct drm_framebuffer *fb = NULL;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       /*
-        * First, find the plane, crtc, and fb objects.  If not available,
-        * we don't bother to call the driver.
-        */
-       plane = drm_plane_find(dev, plane_req->plane_id);
-       if (!plane) {
-               DRM_DEBUG_KMS("Unknown plane ID %d\n",
-                             plane_req->plane_id);
-               return -ENOENT;
-       }
-
-       if (plane_req->fb_id) {
-               fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
-               if (!fb) {
-                       DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
-                                     plane_req->fb_id);
-                       return -ENOENT;
-               }
-
-               crtc = drm_crtc_find(dev, plane_req->crtc_id);
-               if (!crtc) {
-                       DRM_DEBUG_KMS("Unknown crtc ID %d\n",
-                                     plane_req->crtc_id);
-                       return -ENOENT;
-               }
-       }
-
-       /*
-        * setplane_internal will take care of deref'ing either the old or new
-        * framebuffer depending on success.
-        */
-       return setplane_internal(plane, crtc, fb,
-                                plane_req->crtc_x, plane_req->crtc_y,
-                                plane_req->crtc_w, plane_req->crtc_h,
-                                plane_req->src_x, plane_req->src_y,
-                                plane_req->src_w, plane_req->src_h);
-}
-
-/**
- * drm_mode_set_config_internal - helper to call ->set_config
- * @set: modeset config to set
- *
- * This is a little helper to wrap internal calls to the ->set_config driver
- * interface. The only thing it adds is correct refcounting dance.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_set_config_internal(struct drm_mode_set *set)
-{
-       struct drm_crtc *crtc = set->crtc;
-       struct drm_framebuffer *fb;
-       struct drm_crtc *tmp;
-       int ret;
-
-       /*
-        * NOTE: ->set_config can also disable other crtcs (if we steal all
-        * connectors from it), hence we need to refcount the fbs across all
-        * crtcs. Atomic modeset will have saner semantics ...
-        */
-       drm_for_each_crtc(tmp, crtc->dev)
-               tmp->primary->old_fb = tmp->primary->fb;
-
-       fb = set->fb;
-
-       ret = crtc->funcs->set_config(set);
-       if (ret == 0) {
-               crtc->primary->crtc = crtc;
-               crtc->primary->fb = fb;
-       }
-
-       drm_for_each_crtc(tmp, crtc->dev) {
-               if (tmp->primary->fb)
-                       drm_framebuffer_reference(tmp->primary->fb);
-               if (tmp->primary->old_fb)
-                       drm_framebuffer_unreference(tmp->primary->old_fb);
-               tmp->primary->old_fb = NULL;
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(drm_mode_set_config_internal);
-
-/**
- * drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode
- * @mode: mode to query
- * @hdisplay: hdisplay value to fill in
- * @vdisplay: vdisplay value to fill in
- *
- * The vdisplay value will be doubled if the specified mode is a stereo mode of
- * the appropriate layout.
+ * The vdisplay value will be doubled if the specified mode is a stereo mode of
+ * the appropriate layout.
  */
 void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
                            int *hdisplay, int *vdisplay)
@@ -1251,807 +669,236 @@ void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
        struct drm_display_mode adjusted;
 
        drm_mode_copy(&adjusted, mode);
-       drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
-       *hdisplay = adjusted.crtc_hdisplay;
-       *vdisplay = adjusted.crtc_vdisplay;
-}
-EXPORT_SYMBOL(drm_crtc_get_hv_timing);
-
-/**
- * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
- *     CRTC viewport
- * @crtc: CRTC that framebuffer will be displayed on
- * @x: x panning
- * @y: y panning
- * @mode: mode that framebuffer will be displayed under
- * @fb: framebuffer to check size of
- */
-int drm_crtc_check_viewport(const struct drm_crtc *crtc,
-                           int x, int y,
-                           const struct drm_display_mode *mode,
-                           const struct drm_framebuffer *fb)
-
-{
-       int hdisplay, vdisplay;
-
-       drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
-
-       if (crtc->state &&
-           crtc->primary->state->rotation & (DRM_ROTATE_90 |
-                                             DRM_ROTATE_270))
-               swap(hdisplay, vdisplay);
-
-       return check_src_coords(x << 16, y << 16,
-                               hdisplay << 16, vdisplay << 16, fb);
-}
-EXPORT_SYMBOL(drm_crtc_check_viewport);
-
-/**
- * drm_mode_setcrtc - set CRTC configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Build a new CRTC configuration based on user request.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_setcrtc(struct drm_device *dev, void *data,
-                    struct drm_file *file_priv)
-{
-       struct drm_mode_config *config = &dev->mode_config;
-       struct drm_mode_crtc *crtc_req = data;
-       struct drm_crtc *crtc;
-       struct drm_connector **connector_set = NULL, *connector;
-       struct drm_framebuffer *fb = NULL;
-       struct drm_display_mode *mode = NULL;
-       struct drm_mode_set set;
-       uint32_t __user *set_connectors_ptr;
-       int ret;
-       int i;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       /*
-        * Universal plane src offsets are only 16.16, prevent havoc for
-        * drivers using universal plane code internally.
-        */
-       if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
-               return -ERANGE;
-
-       drm_modeset_lock_all(dev);
-       crtc = drm_crtc_find(dev, crtc_req->crtc_id);
-       if (!crtc) {
-               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
-               ret = -ENOENT;
-               goto out;
-       }
-       DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
-
-       if (crtc_req->mode_valid) {
-               /* If we have a mode we need a framebuffer. */
-               /* If we pass -1, set the mode with the currently bound fb */
-               if (crtc_req->fb_id == -1) {
-                       if (!crtc->primary->fb) {
-                               DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
-                               ret = -EINVAL;
-                               goto out;
-                       }
-                       fb = crtc->primary->fb;
-                       /* Make refcounting symmetric with the lookup path. */
-                       drm_framebuffer_reference(fb);
-               } else {
-                       fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
-                       if (!fb) {
-                               DRM_DEBUG_KMS("Unknown FB ID%d\n",
-                                               crtc_req->fb_id);
-                               ret = -ENOENT;
-                               goto out;
-                       }
-               }
-
-               mode = drm_mode_create(dev);
-               if (!mode) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               ret = drm_mode_convert_umode(mode, &crtc_req->mode);
-               if (ret) {
-                       DRM_DEBUG_KMS("Invalid mode\n");
-                       goto out;
-               }
-
-               /*
-                * Check whether the primary plane supports the fb pixel format.
-                * Drivers not implementing the universal planes API use a
-                * default formats list provided by the DRM core which doesn't
-                * match real hardware capabilities. Skip the check in that
-                * case.
-                */
-               if (!crtc->primary->format_default) {
-                       ret = drm_plane_check_pixel_format(crtc->primary,
-                                                          fb->pixel_format);
-                       if (ret) {
-                               char *format_name = drm_get_format_name(fb->pixel_format);
-                               DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
-                               kfree(format_name);
-                               goto out;
-                       }
-               }
-
-               ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
-                                             mode, fb);
-               if (ret)
-                       goto out;
-
-       }
-
-       if (crtc_req->count_connectors == 0 && mode) {
-               DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
-               DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
-                         crtc_req->count_connectors);
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (crtc_req->count_connectors > 0) {
-               u32 out_id;
-
-               /* Avoid unbounded kernel memory allocation */
-               if (crtc_req->count_connectors > config->num_connector) {
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-               connector_set = kmalloc_array(crtc_req->count_connectors,
-                                             sizeof(struct drm_connector *),
-                                             GFP_KERNEL);
-               if (!connector_set) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               for (i = 0; i < crtc_req->count_connectors; i++) {
-                       connector_set[i] = NULL;
-                       set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
-                       if (get_user(out_id, &set_connectors_ptr[i])) {
-                               ret = -EFAULT;
-                               goto out;
-                       }
-
-                       connector = drm_connector_lookup(dev, out_id);
-                       if (!connector) {
-                               DRM_DEBUG_KMS("Connector id %d unknown\n",
-                                               out_id);
-                               ret = -ENOENT;
-                               goto out;
-                       }
-                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                                       connector->base.id,
-                                       connector->name);
-
-                       connector_set[i] = connector;
-               }
-       }
-
-       set.crtc = crtc;
-       set.x = crtc_req->x;
-       set.y = crtc_req->y;
-       set.mode = mode;
-       set.connectors = connector_set;
-       set.num_connectors = crtc_req->count_connectors;
-       set.fb = fb;
-       ret = drm_mode_set_config_internal(&set);
-
-out:
-       if (fb)
-               drm_framebuffer_unreference(fb);
-
-       if (connector_set) {
-               for (i = 0; i < crtc_req->count_connectors; i++) {
-                       if (connector_set[i])
-                               drm_connector_unreference(connector_set[i]);
-               }
-       }
-       kfree(connector_set);
-       drm_mode_destroy(dev, mode);
-       drm_modeset_unlock_all(dev);
-       return ret;
-}
-
-/**
- * drm_mode_cursor_universal - translate legacy cursor ioctl call into a
- *     universal plane handler call
- * @crtc: crtc to update cursor for
- * @req: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Legacy cursor ioctl's work directly with driver buffer handles.  To
- * translate legacy ioctl calls into universal plane handler calls, we need to
- * wrap the native buffer handle in a drm_framebuffer.
- *
- * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB
- * buffer with a pitch of 4*width; the universal plane interface should be used
- * directly in cases where the hardware can support other buffer settings and
- * userspace wants to make use of these capabilities.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-static int drm_mode_cursor_universal(struct drm_crtc *crtc,
-                                    struct drm_mode_cursor2 *req,
-                                    struct drm_file *file_priv)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_framebuffer *fb = NULL;
-       struct drm_mode_fb_cmd2 fbreq = {
-               .width = req->width,
-               .height = req->height,
-               .pixel_format = DRM_FORMAT_ARGB8888,
-               .pitches = { req->width * 4 },
-               .handles = { req->handle },
-       };
-       int32_t crtc_x, crtc_y;
-       uint32_t crtc_w = 0, crtc_h = 0;
-       uint32_t src_w = 0, src_h = 0;
-       int ret = 0;
-
-       BUG_ON(!crtc->cursor);
-       WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
-
-       /*
-        * Obtain fb we'll be using (either new or existing) and take an extra
-        * reference to it if fb != null.  setplane will take care of dropping
-        * the reference if the plane update fails.
-        */
-       if (req->flags & DRM_MODE_CURSOR_BO) {
-               if (req->handle) {
-                       fb = drm_internal_framebuffer_create(dev, &fbreq, file_priv);
-                       if (IS_ERR(fb)) {
-                               DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
-                               return PTR_ERR(fb);
-                       }
-                       fb->hot_x = req->hot_x;
-                       fb->hot_y = req->hot_y;
-               } else {
-                       fb = NULL;
-               }
-       } else {
-               fb = crtc->cursor->fb;
-               if (fb)
-                       drm_framebuffer_reference(fb);
-       }
-
-       if (req->flags & DRM_MODE_CURSOR_MOVE) {
-               crtc_x = req->x;
-               crtc_y = req->y;
-       } else {
-               crtc_x = crtc->cursor_x;
-               crtc_y = crtc->cursor_y;
-       }
-
-       if (fb) {
-               crtc_w = fb->width;
-               crtc_h = fb->height;
-               src_w = fb->width << 16;
-               src_h = fb->height << 16;
-       }
-
-       /*
-        * setplane_internal will take care of deref'ing either the old or new
-        * framebuffer depending on success.
-        */
-       ret = __setplane_internal(crtc->cursor, crtc, fb,
-                               crtc_x, crtc_y, crtc_w, crtc_h,
-                               0, 0, src_w, src_h);
-
-       /* Update successful; save new cursor position, if necessary */
-       if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
-               crtc->cursor_x = req->x;
-               crtc->cursor_y = req->y;
-       }
-
-       return ret;
-}
-
-static int drm_mode_cursor_common(struct drm_device *dev,
-                                 struct drm_mode_cursor2 *req,
-                                 struct drm_file *file_priv)
-{
-       struct drm_crtc *crtc;
-       int ret = 0;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
-               return -EINVAL;
-
-       crtc = drm_crtc_find(dev, req->crtc_id);
-       if (!crtc) {
-               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
-               return -ENOENT;
-       }
-
-       /*
-        * If this crtc has a universal cursor plane, call that plane's update
-        * handler rather than using legacy cursor handlers.
-        */
-       drm_modeset_lock_crtc(crtc, crtc->cursor);
-       if (crtc->cursor) {
-               ret = drm_mode_cursor_universal(crtc, req, file_priv);
-               goto out;
-       }
-
-       if (req->flags & DRM_MODE_CURSOR_BO) {
-               if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
-                       ret = -ENXIO;
-                       goto out;
-               }
-               /* Turns off the cursor if handle is 0 */
-               if (crtc->funcs->cursor_set2)
-                       ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
-                                                     req->width, req->height, req->hot_x, req->hot_y);
-               else
-                       ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
-                                                     req->width, req->height);
-       }
-
-       if (req->flags & DRM_MODE_CURSOR_MOVE) {
-               if (crtc->funcs->cursor_move) {
-                       ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
-               } else {
-                       ret = -EFAULT;
-                       goto out;
-               }
-       }
-out:
-       drm_modeset_unlock_crtc(crtc);
-
-       return ret;
-
-}
-
-
-/**
- * drm_mode_cursor_ioctl - set CRTC's cursor configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Set the cursor configuration based on user request.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_cursor_ioctl(struct drm_device *dev,
-                         void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_cursor *req = data;
-       struct drm_mode_cursor2 new_req;
-
-       memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
-       new_req.hot_x = new_req.hot_y = 0;
-
-       return drm_mode_cursor_common(dev, &new_req, file_priv);
-}
-
-/**
- * drm_mode_cursor2_ioctl - set CRTC's cursor configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Set the cursor configuration based on user request. This implements the 2nd
- * version of the cursor ioctl, which allows userspace to additionally specify
- * the hotspot of the pointer.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_cursor2_ioctl(struct drm_device *dev,
-                          void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_cursor2 *req = data;
-
-       return drm_mode_cursor_common(dev, req, file_priv);
-}
-
-int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
-                              struct drm_property *property,
-                              uint64_t value)
-{
-       int ret = -EINVAL;
-       struct drm_crtc *crtc = obj_to_crtc(obj);
-
-       if (crtc->funcs->set_property)
-               ret = crtc->funcs->set_property(crtc, property, value);
-       if (!ret)
-               drm_object_property_set_value(obj, property, value);
-
-       return ret;
-}
-
-/**
- * drm_mode_plane_set_obj_prop - set the value of a property
- * @plane: drm plane object to set property value for
- * @property: property to set
- * @value: value the property should be set to
- *
- * This functions sets a given property on a given plane object. This function
- * calls the driver's ->set_property callback and changes the software state of
- * the property if the callback succeeds.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
-                               struct drm_property *property,
-                               uint64_t value)
-{
-       int ret = -EINVAL;
-       struct drm_mode_object *obj = &plane->base;
-
-       if (plane->funcs->set_property)
-               ret = plane->funcs->set_property(plane, property, value);
-       if (!ret)
-               drm_object_property_set_value(obj, property, value);
-
-       return ret;
-}
-EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
-
-/**
- * drm_mode_crtc_set_gamma_size - set the gamma table size
- * @crtc: CRTC to set the gamma table size for
- * @gamma_size: size of the gamma table
- *
- * Drivers which support gamma tables should set this to the supported gamma
- * table size when initializing the CRTC. Currently the drm core only supports a
- * fixed gamma table size.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
-                                int gamma_size)
-{
-       uint16_t *r_base, *g_base, *b_base;
-       int i;
-
-       crtc->gamma_size = gamma_size;
-
-       crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
-                                   GFP_KERNEL);
-       if (!crtc->gamma_store) {
-               crtc->gamma_size = 0;
-               return -ENOMEM;
-       }
-
-       r_base = crtc->gamma_store;
-       g_base = r_base + gamma_size;
-       b_base = g_base + gamma_size;
-       for (i = 0; i < gamma_size; i++) {
-               r_base[i] = i << 8;
-               g_base[i] = i << 8;
-               b_base[i] = i << 8;
-       }
-
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
-
-/**
- * drm_mode_gamma_set_ioctl - set the gamma table
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
- * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_gamma_set_ioctl(struct drm_device *dev,
-                            void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_crtc_lut *crtc_lut = data;
-       struct drm_crtc *crtc;
-       void *r_base, *g_base, *b_base;
-       int size;
-       int ret = 0;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
-       drm_modeset_lock_all(dev);
-       crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
-       if (!crtc) {
-               ret = -ENOENT;
-               goto out;
-       }
-
-       if (crtc->funcs->gamma_set == NULL) {
-               ret = -ENOSYS;
-               goto out;
-       }
-
-       /* memcpy into gamma store */
-       if (crtc_lut->gamma_size != crtc->gamma_size) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       size = crtc_lut->gamma_size * (sizeof(uint16_t));
-       r_base = crtc->gamma_store;
-       if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
-               ret = -EFAULT;
-               goto out;
-       }
-
-       g_base = r_base + size;
-       if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
-               ret = -EFAULT;
-               goto out;
-       }
-
-       b_base = g_base + size;
-       if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
-               ret = -EFAULT;
-               goto out;
-       }
-
-       ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
-
-out:
-       drm_modeset_unlock_all(dev);
-       return ret;
-
-}
-
-/**
- * drm_mode_gamma_get_ioctl - get the gamma table
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Copy the current gamma table into the storage provided. This also provides
- * the gamma table size the driver expects, which can be used to size the
- * allocated storage.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_gamma_get_ioctl(struct drm_device *dev,
-                            void *data, struct drm_file *file_priv)
-{
-       struct drm_mode_crtc_lut *crtc_lut = data;
-       struct drm_crtc *crtc;
-       void *r_base, *g_base, *b_base;
-       int size;
-       int ret = 0;
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
+       drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
+       *hdisplay = adjusted.crtc_hdisplay;
+       *vdisplay = adjusted.crtc_vdisplay;
+}
+EXPORT_SYMBOL(drm_crtc_get_hv_timing);
 
-       drm_modeset_lock_all(dev);
-       crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
-       if (!crtc) {
-               ret = -ENOENT;
-               goto out;
-       }
+/**
+ * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
+ *     CRTC viewport
+ * @crtc: CRTC that framebuffer will be displayed on
+ * @x: x panning
+ * @y: y panning
+ * @mode: mode that framebuffer will be displayed under
+ * @fb: framebuffer to check size of
+ */
+int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+                           int x, int y,
+                           const struct drm_display_mode *mode,
+                           const struct drm_framebuffer *fb)
 
-       /* memcpy into gamma store */
-       if (crtc_lut->gamma_size != crtc->gamma_size) {
-               ret = -EINVAL;
-               goto out;
-       }
+{
+       int hdisplay, vdisplay;
 
-       size = crtc_lut->gamma_size * (sizeof(uint16_t));
-       r_base = crtc->gamma_store;
-       if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
-               ret = -EFAULT;
-               goto out;
-       }
+       drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
 
-       g_base = r_base + size;
-       if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
-               ret = -EFAULT;
-               goto out;
-       }
+       if (crtc->state &&
+           crtc->primary->state->rotation & (DRM_ROTATE_90 |
+                                             DRM_ROTATE_270))
+               swap(hdisplay, vdisplay);
 
-       b_base = g_base + size;
-       if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
-               ret = -EFAULT;
-               goto out;
-       }
-out:
-       drm_modeset_unlock_all(dev);
-       return ret;
+       return drm_framebuffer_check_src_coords(x << 16, y << 16,
+                                               hdisplay << 16, vdisplay << 16,
+                                               fb);
 }
+EXPORT_SYMBOL(drm_crtc_check_viewport);
 
 /**
- * drm_mode_page_flip_ioctl - schedule an asynchronous fb update
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
+ * drm_mode_setcrtc - set CRTC configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
  *
- * This schedules an asynchronous update on a given CRTC, called page flip.
- * Optionally a drm event is generated to signal the completion of the event.
- * Generic drivers cannot assume that a pageflip with changed framebuffer
- * properties (including driver specific metadata like tiling layout) will work,
- * but some drivers support e.g. pixel format changes through the pageflip
- * ioctl.
+ * Build a new CRTC configuration based on user request.
  *
  * Called by the user via ioctl.
  *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_page_flip_ioctl(struct drm_device *dev,
-                            void *data, struct drm_file *file_priv)
+int drm_mode_setcrtc(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
 {
-       struct drm_mode_crtc_page_flip_target *page_flip = data;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_mode_crtc *crtc_req = data;
        struct drm_crtc *crtc;
+       struct drm_connector **connector_set = NULL, *connector;
        struct drm_framebuffer *fb = NULL;
-       struct drm_pending_vblank_event *e = NULL;
-       u32 target_vblank = page_flip->sequence;
-       int ret = -EINVAL;
-
-       if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS)
-               return -EINVAL;
+       struct drm_display_mode *mode = NULL;
+       struct drm_mode_set set;
+       uint32_t __user *set_connectors_ptr;
+       int ret;
+       int i;
 
-       if (page_flip->sequence != 0 && !(page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET))
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       /* Only one of the DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags
-        * can be specified
+       /*
+        * Universal plane src offsets are only 16.16, prevent havoc for
+        * drivers using universal plane code internally.
         */
-       if ((page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) == DRM_MODE_PAGE_FLIP_TARGET)
-               return -EINVAL;
-
-       if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
-               return -EINVAL;
+       if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
+               return -ERANGE;
 
-       crtc = drm_crtc_find(dev, page_flip->crtc_id);
-       if (!crtc)
-               return -ENOENT;
+       drm_modeset_lock_all(dev);
+       crtc = drm_crtc_find(dev, crtc_req->crtc_id);
+       if (!crtc) {
+               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+               ret = -ENOENT;
+               goto out;
+       }
+       DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
 
-       if (crtc->funcs->page_flip_target) {
-               u32 current_vblank;
-               int r;
+       if (crtc_req->mode_valid) {
+               /* If we have a mode we need a framebuffer. */
+               /* If we pass -1, set the mode with the currently bound fb */
+               if (crtc_req->fb_id == -1) {
+                       if (!crtc->primary->fb) {
+                               DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       fb = crtc->primary->fb;
+                       /* Make refcounting symmetric with the lookup path. */
+                       drm_framebuffer_reference(fb);
+               } else {
+                       fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
+                       if (!fb) {
+                               DRM_DEBUG_KMS("Unknown FB ID%d\n",
+                                               crtc_req->fb_id);
+                               ret = -ENOENT;
+                               goto out;
+                       }
+               }
 
-               r = drm_crtc_vblank_get(crtc);
-               if (r)
-                       return r;
+               mode = drm_mode_create(dev);
+               if (!mode) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
 
-               current_vblank = drm_crtc_vblank_count(crtc);
+               ret = drm_mode_convert_umode(mode, &crtc_req->mode);
+               if (ret) {
+                       DRM_DEBUG_KMS("Invalid mode\n");
+                       goto out;
+               }
 
-               switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) {
-               case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE:
-                       if ((int)(target_vblank - current_vblank) > 1) {
-                               DRM_DEBUG("Invalid absolute flip target %u, "
-                                         "must be <= %u\n", target_vblank,
-                                         current_vblank + 1);
-                               drm_crtc_vblank_put(crtc);
-                               return -EINVAL;
-                       }
-                       break;
-               case DRM_MODE_PAGE_FLIP_TARGET_RELATIVE:
-                       if (target_vblank != 0 && target_vblank != 1) {
-                               DRM_DEBUG("Invalid relative flip target %u, "
-                                         "must be 0 or 1\n", target_vblank);
-                               drm_crtc_vblank_put(crtc);
-                               return -EINVAL;
+               /*
+                * Check whether the primary plane supports the fb pixel format.
+                * Drivers not implementing the universal planes API use a
+                * default formats list provided by the DRM core which doesn't
+                * match real hardware capabilities. Skip the check in that
+                * case.
+                */
+               if (!crtc->primary->format_default) {
+                       ret = drm_plane_check_pixel_format(crtc->primary,
+                                                          fb->pixel_format);
+                       if (ret) {
+                               char *format_name = drm_get_format_name(fb->pixel_format);
+                               DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
+                               kfree(format_name);
+                               goto out;
                        }
-                       target_vblank += current_vblank;
-                       break;
-               default:
-                       target_vblank = current_vblank +
-                               !(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC);
-                       break;
                }
-       } else if (crtc->funcs->page_flip == NULL ||
-                  (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) {
-               return -EINVAL;
-       }
 
-       drm_modeset_lock_crtc(crtc, crtc->primary);
-       if (crtc->primary->fb == NULL) {
-               /* The framebuffer is currently unbound, presumably
-                * due to a hotplug event, that userspace has not
-                * yet discovered.
-                */
-               ret = -EBUSY;
-               goto out;
-       }
+               ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
+                                             mode, fb);
+               if (ret)
+                       goto out;
 
-       fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
-       if (!fb) {
-               ret = -ENOENT;
-               goto out;
        }
 
-       if (crtc->state) {
-               const struct drm_plane_state *state = crtc->primary->state;
-
-               ret = check_src_coords(state->src_x, state->src_y,
-                                      state->src_w, state->src_h, fb);
-       } else {
-               ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
-       }
-       if (ret)
+       if (crtc_req->count_connectors == 0 && mode) {
+               DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
+               ret = -EINVAL;
                goto out;
+       }
 
-       if (crtc->primary->fb->pixel_format != fb->pixel_format) {
-               DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+       if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
+               DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
+                         crtc_req->count_connectors);
                ret = -EINVAL;
                goto out;
        }
 
-       if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-               e = kzalloc(sizeof *e, GFP_KERNEL);
-               if (!e) {
-                       ret = -ENOMEM;
+       if (crtc_req->count_connectors > 0) {
+               u32 out_id;
+
+               /* Avoid unbounded kernel memory allocation */
+               if (crtc_req->count_connectors > config->num_connector) {
+                       ret = -EINVAL;
                        goto out;
                }
-               e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
-               e->event.base.length = sizeof(e->event);
-               e->event.user_data = page_flip->user_data;
-               ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
-               if (ret) {
-                       kfree(e);
+
+               connector_set = kmalloc_array(crtc_req->count_connectors,
+                                             sizeof(struct drm_connector *),
+                                             GFP_KERNEL);
+               if (!connector_set) {
+                       ret = -ENOMEM;
                        goto out;
                }
-       }
 
-       crtc->primary->old_fb = crtc->primary->fb;
-       if (crtc->funcs->page_flip_target)
-               ret = crtc->funcs->page_flip_target(crtc, fb, e,
-                                                   page_flip->flags,
-                                                   target_vblank);
-       else
-               ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
-       if (ret) {
-               if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
-                       drm_event_cancel_free(dev, &e->base);
-               /* Keep the old fb, don't unref it. */
-               crtc->primary->old_fb = NULL;
-       } else {
-               crtc->primary->fb = fb;
-               /* Unref only the old framebuffer. */
-               fb = NULL;
+               for (i = 0; i < crtc_req->count_connectors; i++) {
+                       connector_set[i] = NULL;
+                       set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
+                       if (get_user(out_id, &set_connectors_ptr[i])) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+
+                       connector = drm_connector_lookup(dev, out_id);
+                       if (!connector) {
+                               DRM_DEBUG_KMS("Connector id %d unknown\n",
+                                               out_id);
+                               ret = -ENOENT;
+                               goto out;
+                       }
+                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                                       connector->base.id,
+                                       connector->name);
+
+                       connector_set[i] = connector;
+               }
        }
 
+       set.crtc = crtc;
+       set.x = crtc_req->x;
+       set.y = crtc_req->y;
+       set.mode = mode;
+       set.connectors = connector_set;
+       set.num_connectors = crtc_req->count_connectors;
+       set.fb = fb;
+       ret = drm_mode_set_config_internal(&set);
+
 out:
-       if (ret && crtc->funcs->page_flip_target)
-               drm_crtc_vblank_put(crtc);
        if (fb)
                drm_framebuffer_unreference(fb);
-       if (crtc->primary->old_fb)
-               drm_framebuffer_unreference(crtc->primary->old_fb);
-       crtc->primary->old_fb = NULL;
-       drm_modeset_unlock_crtc(crtc);
+
+       if (connector_set) {
+               for (i = 0; i < crtc_req->count_connectors; i++) {
+                       if (connector_set[i])
+                               drm_connector_unreference(connector_set[i]);
+               }
+       }
+       kfree(connector_set);
+       drm_mode_destroy(dev, mode);
+       drm_modeset_unlock_all(dev);
+       return ret;
+}
+
+int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+                              struct drm_property *property,
+                              uint64_t value)
+{
+       int ret = -EINVAL;
+       struct drm_crtc *crtc = obj_to_crtc(obj);
+
+       if (crtc->funcs->set_property)
+               ret = crtc->funcs->set_property(crtc, property, value);
+       if (!ret)
+               drm_object_property_set_value(obj, property, value);
 
        return ret;
 }
@@ -2200,37 +1047,6 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
        return dev->driver->dumb_destroy(file_priv, dev, args->handle);
 }
 
-/**
- * drm_rotation_simplify() - Try to simplify the rotation
- * @rotation: Rotation to be simplified
- * @supported_rotations: Supported rotations
- *
- * Attempt to simplify the rotation to a form that is supported.
- * Eg. if the hardware supports everything except DRM_REFLECT_X
- * one could call this function like this:
- *
- * drm_rotation_simplify(rotation, DRM_ROTATE_0 |
- *                       DRM_ROTATE_90 | DRM_ROTATE_180 |
- *                       DRM_ROTATE_270 | DRM_REFLECT_Y);
- *
- * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
- * transforms the hardware supports, this function may not
- * be able to produce a supported transform, so the caller should
- * check the result afterwards.
- */
-unsigned int drm_rotation_simplify(unsigned int rotation,
-                                  unsigned int supported_rotations)
-{
-       if (rotation & ~supported_rotations) {
-               rotation ^= DRM_REFLECT_X | DRM_REFLECT_Y;
-               rotation = (rotation & DRM_REFLECT_MASK) |
-                          BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
-       }
-
-       return rotation;
-}
-EXPORT_SYMBOL(drm_rotation_simplify);
-
 /**
  * drm_mode_config_init - initialize DRM mode_configuration structure
  * @dev: DRM device
@@ -2347,24 +1163,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_mode_config_cleanup);
 
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
-                                                      unsigned int supported_rotations)
-{
-       static const struct drm_prop_enum_list props[] = {
-               { __builtin_ffs(DRM_ROTATE_0) - 1,   "rotate-0" },
-               { __builtin_ffs(DRM_ROTATE_90) - 1,  "rotate-90" },
-               { __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" },
-               { __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" },
-               { __builtin_ffs(DRM_REFLECT_X) - 1,  "reflect-x" },
-               { __builtin_ffs(DRM_REFLECT_Y) - 1,  "reflect-y" },
-       };
-
-       return drm_property_create_bitmask(dev, 0, "rotation",
-                                          props, ARRAY_SIZE(props),
-                                          supported_rotations);
-}
-EXPORT_SYMBOL(drm_mode_create_rotation_property);
-
 /**
  * DOC: Tile group
  *
@@ -2463,48 +1261,3 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
        return tg;
 }
 EXPORT_SYMBOL(drm_mode_create_tile_group);
-
-/**
- * drm_crtc_enable_color_mgmt - enable color management properties
- * @crtc: DRM CRTC
- * @degamma_lut_size: the size of the degamma lut (before CSC)
- * @has_ctm: whether to attach ctm_property for CSC matrix
- * @gamma_lut_size: the size of the gamma lut (after CSC)
- *
- * This function lets the driver enable the color correction
- * properties on a CRTC. This includes 3 degamma, csc and gamma
- * properties that userspace can set and 2 size properties to inform
- * the userspace of the lut sizes. Each of the properties are
- * optional. The gamma and degamma properties are only attached if
- * their size is not 0 and ctm_property is only attached if has_ctm is
- * true.
- */
-void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
-                               uint degamma_lut_size,
-                               bool has_ctm,
-                               uint gamma_lut_size)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_mode_config *config = &dev->mode_config;
-
-       if (degamma_lut_size) {
-               drm_object_attach_property(&crtc->base,
-                                          config->degamma_lut_property, 0);
-               drm_object_attach_property(&crtc->base,
-                                          config->degamma_lut_size_property,
-                                          degamma_lut_size);
-       }
-
-       if (has_ctm)
-               drm_object_attach_property(&crtc->base,
-                                          config->ctm_property, 0);
-
-       if (gamma_lut_size) {
-               drm_object_attach_property(&crtc->base,
-                                          config->gamma_lut_property, 0);
-               drm_object_attach_property(&crtc->base,
-                                          config->gamma_lut_size_property,
-                                          gamma_lut_size);
-       }
-}
-EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
index 4e6b57a..28295e5 100644 (file)
 #include <drm/drm_dp_helper.h>
 
 /* drm_fb_helper.c */
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 int drm_fb_helper_modinit(void);
+#else
+static inline int drm_fb_helper_modinit(void)
+{
+       return 0;
+}
+#endif
 
 /* drm_dp_aux_dev.c */
 #ifdef CONFIG_DRM_DP_AUX_CHARDEV
index a362264..c48ba02 100644 (file)
@@ -36,8 +36,6 @@
 int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
                               struct drm_property *property,
                               uint64_t value);
-int drm_plane_check_pixel_format(const struct drm_plane *plane,
-                                u32 format);
 int drm_crtc_check_viewport(const struct drm_crtc *crtc,
                            int x, int y,
                            const struct drm_display_mode *mode,
@@ -56,28 +54,19 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
 /* IOCTLs */
 int drm_mode_getresources(struct drm_device *dev,
                          void *data, struct drm_file *file_priv);
-int drm_mode_getplane_res(struct drm_device *dev, void *data,
-                         struct drm_file *file_priv);
 int drm_mode_getcrtc(struct drm_device *dev,
                     void *data, struct drm_file *file_priv);
 int drm_mode_setcrtc(struct drm_device *dev,
                     void *data, struct drm_file *file_priv);
-int drm_mode_getplane(struct drm_device *dev,
-                     void *data, struct drm_file *file_priv);
-int drm_mode_setplane(struct drm_device *dev,
-                     void *data, struct drm_file *file_priv);
-int drm_mode_cursor_ioctl(struct drm_device *dev,
-                         void *data, struct drm_file *file_priv);
-int drm_mode_cursor2_ioctl(struct drm_device *dev,
-                          void *data, struct drm_file *file_priv);
+
+/* drm_color_mgmt.c */
+
+/* IOCTLs */
 int drm_mode_gamma_get_ioctl(struct drm_device *dev,
                             void *data, struct drm_file *file_priv);
 int drm_mode_gamma_set_ioctl(struct drm_device *dev,
                             void *data, struct drm_file *file_priv);
 
-int drm_mode_page_flip_ioctl(struct drm_device *dev,
-                            void *data, struct drm_file *file_priv);
-
 /* drm_property.c */
 void drm_property_destroy_user_blobs(struct drm_device *dev,
                                     struct drm_file *file_priv);
@@ -115,6 +104,8 @@ int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
                                   uint32_t __user *prop_ptr,
                                   uint64_t __user *prop_values,
                                   uint32_t *arg_count_props);
+struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
+                                              uint32_t prop_id);
 
 /* IOCTL */
 
@@ -153,6 +144,9 @@ drm_internal_framebuffer_create(struct drm_device *dev,
                                const struct drm_mode_fb_cmd2 *r,
                                struct drm_file *file_priv);
 void drm_framebuffer_free(struct kref *kref);
+int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
+                                    uint32_t src_w, uint32_t src_h,
+                                    const struct drm_framebuffer *fb);
 
 /* IOCTL */
 int drm_mode_addfb(struct drm_device *dev,
@@ -175,6 +169,23 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
 int drm_modeset_register_all(struct drm_device *dev);
 void drm_modeset_unregister_all(struct drm_device *dev);
 
-/* drm_blend.c */
-int drm_atomic_normalize_zpos(struct drm_device *dev,
-                             struct drm_atomic_state *state);
+
+/* drm_plane.c */
+int drm_plane_register_all(struct drm_device *dev);
+void drm_plane_unregister_all(struct drm_device *dev);
+int drm_plane_check_pixel_format(const struct drm_plane *plane,
+                                u32 format);
+
+/* IOCTL */
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv);
+int drm_mode_getplane(struct drm_device *dev,
+                     void *data, struct drm_file *file_priv);
+int drm_mode_setplane(struct drm_device *dev,
+                     void *data, struct drm_file *file_priv);
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv);
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv);
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv);
index 031c4d3..ac3924a 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/i2c.h>
+#include <linux/seq_file.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drmP.h>
 
@@ -439,6 +440,179 @@ int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
 }
 EXPORT_SYMBOL(drm_dp_link_configure);
 
+/**
+ * drm_dp_downstream_max_clock() - extract branch device max
+ *                                 pixel rate for legacy VGA
+ *                                 converter or max TMDS clock
+ *                                 rate for others
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns max clock in kHz on success or 0 if max clock not defined
+ */
+int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                               const u8 port_cap[4])
+{
+       int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+       bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+               DP_DETAILED_CAP_INFO_AVAILABLE;
+
+       if (!detailed_cap_info)
+               return 0;
+
+       switch (type) {
+       case DP_DS_PORT_TYPE_VGA:
+               return port_cap[1] * 8 * 1000;
+       case DP_DS_PORT_TYPE_DVI:
+       case DP_DS_PORT_TYPE_HDMI:
+       case DP_DS_PORT_TYPE_DP_DUALMODE:
+               return port_cap[1] * 2500;
+       default:
+               return 0;
+       }
+}
+EXPORT_SYMBOL(drm_dp_downstream_max_clock);
+
+/**
+ * drm_dp_downstream_max_bpc() - extract branch device max
+ *                               bits per component
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns max bpc on success or 0 if max bpc not defined
+ */
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                             const u8 port_cap[4])
+{
+       int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+       bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+               DP_DETAILED_CAP_INFO_AVAILABLE;
+       int bpc;
+
+       if (!detailed_cap_info)
+               return 0;
+
+       switch (type) {
+       case DP_DS_PORT_TYPE_VGA:
+       case DP_DS_PORT_TYPE_DVI:
+       case DP_DS_PORT_TYPE_HDMI:
+       case DP_DS_PORT_TYPE_DP_DUALMODE:
+               bpc = port_cap[2] & DP_DS_MAX_BPC_MASK;
+
+               switch (bpc) {
+               case DP_DS_8BPC:
+                       return 8;
+               case DP_DS_10BPC:
+                       return 10;
+               case DP_DS_12BPC:
+                       return 12;
+               case DP_DS_16BPC:
+                       return 16;
+               }
+       default:
+               return 0;
+       }
+}
+EXPORT_SYMBOL(drm_dp_downstream_max_bpc);
+
+/**
+ * drm_dp_downstream_id() - identify branch device
+ * @aux: DisplayPort AUX channel
+ * @id: DisplayPort branch device id
+ *
+ * Returns branch device id on success or NULL on failure
+ */
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
+{
+       return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
+}
+EXPORT_SYMBOL(drm_dp_downstream_id);
+
+/**
+ * drm_dp_downstream_debug() - debug DP branch devices
+ * @m: pointer for debugfs file
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ * @aux: DisplayPort AUX channel
+ *
+ */
+void drm_dp_downstream_debug(struct seq_file *m,
+                            const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                            const u8 port_cap[4], struct drm_dp_aux *aux)
+{
+       bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+                                DP_DETAILED_CAP_INFO_AVAILABLE;
+       int clk;
+       int bpc;
+       char id[6];
+       int len;
+       uint8_t rev[2];
+       int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+       bool branch_device = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+                            DP_DWN_STRM_PORT_PRESENT;
+
+       seq_printf(m, "\tDP branch device present: %s\n",
+                  branch_device ? "yes" : "no");
+
+       if (!branch_device)
+               return;
+
+       switch (type) {
+       case DP_DS_PORT_TYPE_DP:
+               seq_puts(m, "\t\tType: DisplayPort\n");
+               break;
+       case DP_DS_PORT_TYPE_VGA:
+               seq_puts(m, "\t\tType: VGA\n");
+               break;
+       case DP_DS_PORT_TYPE_DVI:
+               seq_puts(m, "\t\tType: DVI\n");
+               break;
+       case DP_DS_PORT_TYPE_HDMI:
+               seq_puts(m, "\t\tType: HDMI\n");
+               break;
+       case DP_DS_PORT_TYPE_NON_EDID:
+               seq_puts(m, "\t\tType: others without EDID support\n");
+               break;
+       case DP_DS_PORT_TYPE_DP_DUALMODE:
+               seq_puts(m, "\t\tType: DP++\n");
+               break;
+       case DP_DS_PORT_TYPE_WIRELESS:
+               seq_puts(m, "\t\tType: Wireless\n");
+               break;
+       default:
+               seq_puts(m, "\t\tType: N/A\n");
+       }
+
+       drm_dp_downstream_id(aux, id);
+       seq_printf(m, "\t\tID: %s\n", id);
+
+       len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
+       if (len > 0)
+               seq_printf(m, "\t\tHW: %d.%d\n",
+                          (rev[0] & 0xf0) >> 4, rev[0] & 0xf);
+
+       len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2);
+       if (len > 0)
+               seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
+
+       if (detailed_cap_info) {
+               clk = drm_dp_downstream_max_clock(dpcd, port_cap);
+
+               if (clk > 0) {
+                       if (type == DP_DS_PORT_TYPE_VGA)
+                               seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk);
+                       else
+                               seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk);
+               }
+
+               bpc = drm_dp_downstream_max_bpc(dpcd, port_cap);
+
+               if (bpc > 0)
+                       seq_printf(m, "\t\tMax bpc: %d\n", bpc);
+       }
+}
+EXPORT_SYMBOL(drm_dp_downstream_debug);
+
 /*
  * I2C-over-AUX implementation
  */
index acf6a5f..80c7f25 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/mount.h>
 #include <linux/slab.h>
 #include <drm/drmP.h>
-#include <drm/drm_core.h>
 #include "drm_crtc_internal.h"
 #include "drm_legacy.h"
 #include "drm_internal.h"
@@ -46,8 +45,8 @@
 unsigned int drm_debug = 0;
 EXPORT_SYMBOL(drm_debug);
 
-MODULE_AUTHOR(CORE_AUTHOR);
-MODULE_DESCRIPTION(CORE_DESC);
+MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
+MODULE_DESCRIPTION("DRM shared core routines");
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
 "\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
@@ -339,6 +338,9 @@ void drm_minor_release(struct drm_minor *minor)
 
 static int drm_dev_set_unique(struct drm_device *dev, const char *name)
 {
+       if (!name)
+               return -EINVAL;
+
        kfree(dev->unique);
        dev->unique = kstrdup(name, GFP_KERNEL);
 
@@ -589,7 +591,7 @@ EXPORT_SYMBOL(drm_dev_init);
  * own struct should look at using drm_dev_init() instead.
  *
  * RETURNS:
- * Pointer to new DRM device, or NULL if out of memory.
+ * Pointer to new DRM device, or ERR_PTR on failure.
  */
 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
                                 struct device *parent)
@@ -599,12 +601,12 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
 
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        if (!dev)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        ret = drm_dev_init(dev, driver, parent);
        if (ret) {
                kfree(dev);
-               return NULL;
+               return ERR_PTR(ret);
        }
 
        return dev;
@@ -821,53 +823,48 @@ static const struct file_operations drm_stub_fops = {
        .llseek = noop_llseek,
 };
 
+static void drm_core_exit(void)
+{
+       unregister_chrdev(DRM_MAJOR, "drm");
+       debugfs_remove(drm_debugfs_root);
+       drm_sysfs_destroy();
+       idr_destroy(&drm_minors_idr);
+       drm_connector_ida_destroy();
+       drm_global_release();
+}
+
 static int __init drm_core_init(void)
 {
-       int ret = -ENOMEM;
+       int ret;
 
        drm_global_init();
        drm_connector_ida_init();
        idr_init(&drm_minors_idr);
 
-       if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
-               goto err_p1;
-
        ret = drm_sysfs_init();
        if (ret < 0) {
-               printk(KERN_ERR "DRM: Error creating drm class.\n");
-               goto err_p2;
+               DRM_ERROR("Cannot create DRM class: %d\n", ret);
+               goto error;
        }
 
        drm_debugfs_root = debugfs_create_dir("dri", NULL);
        if (!drm_debugfs_root) {
-               DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
-               ret = -1;
-               goto err_p3;
+               ret = -ENOMEM;
+               DRM_ERROR("Cannot create debugfs-root: %d\n", ret);
+               goto error;
        }
 
-       DRM_INFO("Initialized %s %d.%d.%d %s\n",
-                CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+       ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
+       if (ret < 0)
+               goto error;
+
+       DRM_INFO("Initialized\n");
        return 0;
-err_p3:
-       drm_sysfs_destroy();
-err_p2:
-       unregister_chrdev(DRM_MAJOR, "drm");
 
-       idr_destroy(&drm_minors_idr);
-err_p1:
+error:
+       drm_core_exit();
        return ret;
 }
 
-static void __exit drm_core_exit(void)
-{
-       debugfs_remove(drm_debugfs_root);
-       drm_sysfs_destroy();
-
-       unregister_chrdev(DRM_MAJOR, "drm");
-
-       drm_connector_ida_destroy();
-       idr_destroy(&drm_minors_idr);
-}
-
 module_init(drm_core_init);
 module_exit(drm_core_exit);
index 998a674..5c06771 100644 (file)
  *
  * Encoders represent the connecting element between the CRTC (as the overall
  * pixel pipeline, represented by struct &drm_crtc) and the connectors (as the
- * generic sink entity, represented by struct &drm_connector). Encoders are
- * objects exposed to userspace, originally to allow userspace to infer cloning
- * and connector/CRTC restrictions. Unfortunately almost all drivers get this
- * wrong, making the uabi pretty much useless. On top of that the exposed
- * restrictions are too simple for todays hardware, and the recommend way to
- * infer restrictions is by using the DRM_MODE_ATOMIC_TEST_ONLY flag for the
- * atomic IOCTL.
+ * generic sink entity, represented by struct &drm_connector). An encoder takes
+ * pixel data from a CRTC and converts it to a format suitable for any attached
+ * connector. Encoders are objects exposed to userspace, originally to allow
+ * userspace to infer cloning and connector/CRTC restrictions. Unfortunately
+ * almost all drivers get this wrong, making the uabi pretty much useless. On
+ * top of that the exposed restrictions are too simple for today's hardware, and
+ * the recommended way to infer restrictions is by using the
+ * DRM_MODE_ATOMIC_TEST_ONLY flag for the atomic IOCTL.
  *
  * Otherwise encoders aren't used in the uapi at all (any modeset request from
  * userspace directly connects a connector with a CRTC), drivers are therefore
  * free to use them however they wish. Modeset helper libraries make strong use
  * of encoders to facilitate code sharing. But for more complex settings it is
  * usually better to move shared code into a separate &drm_bridge. Compared to
- * encoders bridges also have the benefit of not being purely an internal
+ * encoders, bridges also have the benefit of being purely an internal
  * abstraction since they are not exposed to userspace at all.
  *
  * Encoders are initialized with drm_encoder_init() and cleaned up using
index c4d3500..5e83028 100644 (file)
@@ -41,6 +41,8 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 
+#include "drm_crtc_helper_internal.h"
+
 static bool drm_fbdev_emulation = true;
 module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
 MODULE_PARM_DESC(fbdev_emulation,
index 036cd27..e84faec 100644 (file)
@@ -199,7 +199,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
 
        filp->private_data = priv;
        priv->filp = filp;
-       priv->uid = current_euid();
        priv->pid = get_pid(task_pid(current));
        priv->minor = minor;
 
index 30dc01e..398efd6 100644 (file)
  * &drm_framebuffer.
  */
 
+int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
+                                    uint32_t src_w, uint32_t src_h,
+                                    const struct drm_framebuffer *fb)
+{
+       unsigned int fb_width, fb_height;
+
+       fb_width = fb->width << 16;
+       fb_height = fb->height << 16;
+
+       /* Make sure source coordinates are inside the fb. */
+       if (src_w > fb_width ||
+           src_x > fb_width - src_w ||
+           src_h > fb_height ||
+           src_y > fb_height - src_h) {
+               DRM_DEBUG_KMS("Invalid source coordinates "
+                             "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+                             src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
+                             src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
+                             src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
+                             src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
+               return -ENOSPC;
+       }
+
+       return 0;
+}
+
 /**
  * drm_mode_addfb - add an FB to the graphics configuration
  * @dev: drm device for the ioctl
index 9134ae1..465bacd 100644 (file)
@@ -257,7 +257,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
 
        if (drm_core_check_feature(dev, DRIVER_PRIME))
                drm_gem_remove_prime_handles(obj, file_priv);
-       drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+       drm_vma_node_revoke(&obj->vma_node, file_priv);
 
        if (dev->driver->gem_close_object)
                dev->driver->gem_close_object(obj, file_priv);
@@ -372,7 +372,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
 
        handle = ret;
 
-       ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
+       ret = drm_vma_node_allow(&obj->vma_node, file_priv);
        if (ret)
                goto err_remove;
 
@@ -386,7 +386,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
        return 0;
 
 err_revoke:
-       drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+       drm_vma_node_revoke(&obj->vma_node, file_priv);
 err_remove:
        spin_lock(&file_priv->table_lock);
        idr_remove(&file_priv->object_idr, handle);
@@ -991,7 +991,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        if (!obj)
                return -EINVAL;
 
-       if (!drm_vma_node_is_allowed(node, filp)) {
+       if (!drm_vma_node_is_allowed(node, priv)) {
                drm_gem_object_unreference_unlocked(obj);
                return -EACCES;
        }
index 3d2e91c..b404287 100644 (file)
@@ -65,30 +65,34 @@ void drm_global_release(void)
 
 int drm_global_item_ref(struct drm_global_reference *ref)
 {
-       int ret;
+       int ret = 0;
        struct drm_global_item *item = &glob[ref->global_type];
 
        mutex_lock(&item->mutex);
        if (item->refcount == 0) {
-               item->object = kzalloc(ref->size, GFP_KERNEL);
-               if (unlikely(item->object == NULL)) {
+               ref->object = kzalloc(ref->size, GFP_KERNEL);
+               if (unlikely(ref->object == NULL)) {
                        ret = -ENOMEM;
-                       goto out_err;
+                       goto error_unlock;
                }
-
-               ref->object = item->object;
                ret = ref->init(ref);
                if (unlikely(ret != 0))
-                       goto out_err;
+                       goto error_free;
 
+               item->object = ref->object;
+       } else {
+               ref->object = item->object;
        }
+
        ++item->refcount;
-       ref->object = item->object;
        mutex_unlock(&item->mutex);
        return 0;
-out_err:
+
+error_free:
+       kfree(ref->object);
+       ref->object = NULL;
+error_unlock:
        mutex_unlock(&item->mutex);
-       item->object = NULL;
        return ret;
 }
 EXPORT_SYMBOL(drm_global_item_ref);
index 9ae353f..1df2d33 100644 (file)
@@ -80,6 +80,7 @@ int drm_clients_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_file *priv;
+       kuid_t uid;
 
        seq_printf(m,
                   "%20s %5s %3s master a %5s %10s\n",
@@ -98,13 +99,14 @@ int drm_clients_info(struct seq_file *m, void *data)
 
                rcu_read_lock(); /* locks pid_task()->comm */
                task = pid_task(priv->pid, PIDTYPE_PID);
+               uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
                seq_printf(m, "%20s %5d %3d   %c    %c %5d %10u\n",
                           task ? task->comm : "<unknown>",
                           pid_vnr(priv->pid),
                           priv->minor->index,
                           drm_is_current_master(priv) ? 'y' : 'n',
                           priv->authenticated ? 'y' : 'n',
-                          from_kuid_munged(seq_user_ns(m), priv->uid),
+                          from_kuid_munged(seq_user_ns(m), uid),
                           priv->magic);
                rcu_read_unlock();
        }
index b86dc9b..e66af28 100644 (file)
@@ -21,6 +21,9 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#define DRM_IF_MAJOR 1
+#define DRM_IF_MINOR 4
+
 /* drm_irq.c */
 extern unsigned int drm_timestamp_monotonic;
 
index 32a489b..867ab8c 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/export.h>
 
 #include <drm/drmP.h>
-#include <drm/drm_core.h>
 
 #define DRM_IOCTL_VERSION32            DRM_IOWR(0x00, drm_version32_t)
 #define DRM_IOCTL_GET_UNIQUE32         DRM_IOWR(0x01, drm_unique32_t)
@@ -1016,6 +1015,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
        return 0;
 }
 
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
 typedef struct drm_mode_fb_cmd232 {
        u32 fb_id;
        u32 width;
@@ -1072,6 +1072,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
 
        return 0;
 }
+#endif
 
 static drm_ioctl_compat_t *drm_compat_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
@@ -1105,7 +1106,9 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
 #endif
        [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
        [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
+#endif
 };
 
 /**
index b97c421..0ad2c47 100644 (file)
@@ -29,7 +29,6 @@
  */
 
 #include <drm/drmP.h>
-#include <drm/drm_core.h>
 #include <drm/drm_auth.h>
 #include "drm_legacy.h"
 #include "drm_internal.h"
index 6edda83..9f17085 100644 (file)
@@ -372,14 +372,25 @@ out:
        return ret;
 }
 
+struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
+                                              uint32_t prop_id)
+{
+       int i;
+
+       for (i = 0; i < obj->properties->count; i++)
+               if (obj->properties->properties[i]->base.id == prop_id)
+                       return obj->properties->properties[i];
+
+       return NULL;
+}
+
 int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_priv)
 {
        struct drm_mode_obj_set_property *arg = data;
        struct drm_mode_object *arg_obj;
-       struct drm_mode_object *prop_obj;
        struct drm_property *property;
-       int i, ret = -EINVAL;
+       int ret = -EINVAL;
        struct drm_mode_object *ref;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -392,23 +403,13 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
                ret = -ENOENT;
                goto out;
        }
-       if (!arg_obj->properties)
-               goto out_unref;
-
-       for (i = 0; i < arg_obj->properties->count; i++)
-               if (arg_obj->properties->properties[i]->base.id == arg->prop_id)
-                       break;
 
-       if (i == arg_obj->properties->count)
+       if (!arg_obj->properties)
                goto out_unref;
 
-       prop_obj = drm_mode_object_find(dev, arg->prop_id,
-                                       DRM_MODE_OBJECT_PROPERTY);
-       if (!prop_obj) {
-               ret = -ENOENT;
+       property = drm_mode_obj_find_prop_id(arg_obj, arg->prop_id);
+       if (!property)
                goto out_unref;
-       }
-       property = obj_to_property(prop_obj);
 
        if (!drm_property_change_valid_get(property, arg->value, &ref))
                goto out_unref;
index d86362f..3ceea9c 100644 (file)
@@ -236,8 +236,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
        DRM_DEBUG("\n");
 
        dev = drm_dev_alloc(driver, &pdev->dev);
-       if (!dev)
-               return -ENOMEM;
+       if (IS_ERR(dev))
+               return PTR_ERR(dev);
 
        ret = pci_enable_device(pdev);
        if (ret)
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
new file mode 100644 (file)
index 0000000..cd0d475
--- /dev/null
@@ -0,0 +1,907 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_plane.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * A plane represents an image source that can be blended with or overlayed on
+ * top of a CRTC during the scanout process. Planes take their input data from a
+ * &drm_framebuffer object. The plane itself specifies the cropping and scaling
+ * of that image, and where it is placed on the visible are of a display
+ * pipeline, represented by &drm_crtc. A plane can also have additional
+ * properties that specify how the pixels are positioned and blended, like
+ * rotation or Z-position. All these properties are stored in &drm_plane_state.
+ *
+ * To create a plane, a KMS drivers allocates and zeroes an instances of
+ * struct &drm_plane (possibly as part of a larger structure) and registers it
+ * with a call to drm_universal_plane_init().
+ *
+ * Cursor and overlay planes are optional. All drivers should provide one
+ * primary plane per CRTC to avoid surprising userspace too much. See enum
+ * &drm_plane_type for a more in-depth discussion of these special uapi-relevant
+ * plane types. Special planes are associated with their CRTC by calling
+ * drm_crtc_init_with_planes().
+ *
+ * The type of a plane is exposed in the immutable "type" enumeration property,
+ * which has one of the following values: "Overlay", "Primary", "Cursor".
+ */
+
+static unsigned int drm_num_planes(struct drm_device *dev)
+{
+       unsigned int num = 0;
+       struct drm_plane *tmp;
+
+       drm_for_each_plane(tmp, dev) {
+               num++;
+       }
+
+       return num;
+}
+
+/**
+ * drm_universal_plane_init - Initialize a new universal plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Initializes a plane object of type @type.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
+                            unsigned long possible_crtcs,
+                            const struct drm_plane_funcs *funcs,
+                            const uint32_t *formats, unsigned int format_count,
+                            enum drm_plane_type type,
+                            const char *name, ...)
+{
+       struct drm_mode_config *config = &dev->mode_config;
+       int ret;
+
+       ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+       if (ret)
+               return ret;
+
+       drm_modeset_lock_init(&plane->mutex);
+
+       plane->base.properties = &plane->properties;
+       plane->dev = dev;
+       plane->funcs = funcs;
+       plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
+                                           GFP_KERNEL);
+       if (!plane->format_types) {
+               DRM_DEBUG_KMS("out of memory when allocating plane\n");
+               drm_mode_object_unregister(dev, &plane->base);
+               return -ENOMEM;
+       }
+
+       if (name) {
+               va_list ap;
+
+               va_start(ap, name);
+               plane->name = kvasprintf(GFP_KERNEL, name, ap);
+               va_end(ap);
+       } else {
+               plane->name = kasprintf(GFP_KERNEL, "plane-%d",
+                                       drm_num_planes(dev));
+       }
+       if (!plane->name) {
+               kfree(plane->format_types);
+               drm_mode_object_unregister(dev, &plane->base);
+               return -ENOMEM;
+       }
+
+       memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+       plane->format_count = format_count;
+       plane->possible_crtcs = possible_crtcs;
+       plane->type = type;
+
+       list_add_tail(&plane->head, &config->plane_list);
+       plane->index = config->num_total_plane++;
+       if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+               config->num_overlay_plane++;
+
+       drm_object_attach_property(&plane->base,
+                                  config->plane_type_property,
+                                  plane->type);
+
+       if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+               drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+               drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
+               drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
+               drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
+               drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
+               drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
+               drm_object_attach_property(&plane->base, config->prop_src_x, 0);
+               drm_object_attach_property(&plane->base, config->prop_src_y, 0);
+               drm_object_attach_property(&plane->base, config->prop_src_w, 0);
+               drm_object_attach_property(&plane->base, config->prop_src_h, 0);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_universal_plane_init);
+
+int drm_plane_register_all(struct drm_device *dev)
+{
+       struct drm_plane *plane;
+       int ret = 0;
+
+       drm_for_each_plane(plane, dev) {
+               if (plane->funcs->late_register)
+                       ret = plane->funcs->late_register(plane);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+void drm_plane_unregister_all(struct drm_device *dev)
+{
+       struct drm_plane *plane;
+
+       drm_for_each_plane(plane, dev) {
+               if (plane->funcs->early_unregister)
+                       plane->funcs->early_unregister(plane);
+       }
+}
+
+/**
+ * drm_plane_init - Initialize a legacy plane
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @is_primary: plane type (primary vs overlay)
+ *
+ * Legacy API to initialize a DRM plane.
+ *
+ * New drivers should call drm_universal_plane_init() instead.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+                  unsigned long possible_crtcs,
+                  const struct drm_plane_funcs *funcs,
+                  const uint32_t *formats, unsigned int format_count,
+                  bool is_primary)
+{
+       enum drm_plane_type type;
+
+       type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+       return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+                                       formats, format_count, type, NULL);
+}
+EXPORT_SYMBOL(drm_plane_init);
+
+/**
+ * drm_plane_cleanup - Clean up the core plane usage
+ * @plane: plane to cleanup
+ *
+ * This function cleans up @plane and removes it from the DRM mode setting
+ * core. Note that the function does *not* free the plane structure itself,
+ * this is the responsibility of the caller.
+ */
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+       struct drm_device *dev = plane->dev;
+
+       drm_modeset_lock_all(dev);
+       kfree(plane->format_types);
+       drm_mode_object_unregister(dev, &plane->base);
+
+       BUG_ON(list_empty(&plane->head));
+
+       /* Note that the plane_list is considered to be static; should we
+        * remove the drm_plane at runtime we would have to decrement all
+        * the indices on the drm_plane after us in the plane_list.
+        */
+
+       list_del(&plane->head);
+       dev->mode_config.num_total_plane--;
+       if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+               dev->mode_config.num_overlay_plane--;
+       drm_modeset_unlock_all(dev);
+
+       WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
+       if (plane->state && plane->funcs->atomic_destroy_state)
+               plane->funcs->atomic_destroy_state(plane, plane->state);
+
+       kfree(plane->name);
+
+       memset(plane, 0, sizeof(*plane));
+}
+EXPORT_SYMBOL(drm_plane_cleanup);
+
+/**
+ * drm_plane_from_index - find the registered plane at an index
+ * @dev: DRM device
+ * @idx: index of registered plane to find for
+ *
+ * Given a plane index, return the registered plane from DRM device's
+ * list of planes with matching index.
+ */
+struct drm_plane *
+drm_plane_from_index(struct drm_device *dev, int idx)
+{
+       struct drm_plane *plane;
+
+       drm_for_each_plane(plane, dev)
+               if (idx == plane->index)
+                       return plane;
+
+       return NULL;
+}
+EXPORT_SYMBOL(drm_plane_from_index);
+
+/**
+ * drm_plane_force_disable - Forcibly disable a plane
+ * @plane: plane to disable
+ *
+ * Forces the plane to be disabled.
+ *
+ * Used when the plane's current framebuffer is destroyed,
+ * and when restoring fbdev mode.
+ */
+void drm_plane_force_disable(struct drm_plane *plane)
+{
+       int ret;
+
+       if (!plane->fb)
+               return;
+
+       plane->old_fb = plane->fb;
+       ret = plane->funcs->disable_plane(plane);
+       if (ret) {
+               DRM_ERROR("failed to disable plane with busy fb\n");
+               plane->old_fb = NULL;
+               return;
+       }
+       /* disconnect the plane from the fb and crtc: */
+       drm_framebuffer_unreference(plane->old_fb);
+       plane->old_fb = NULL;
+       plane->fb = NULL;
+       plane->crtc = NULL;
+}
+EXPORT_SYMBOL(drm_plane_force_disable);
+
+/**
+ * drm_mode_plane_set_obj_prop - set the value of a property
+ * @plane: drm plane object to set property value for
+ * @property: property to set
+ * @value: value the property should be set to
+ *
+ * This functions sets a given property on a given plane object. This function
+ * calls the driver's ->set_property callback and changes the software state of
+ * the property if the callback succeeds.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
+                               struct drm_property *property,
+                               uint64_t value)
+{
+       int ret = -EINVAL;
+       struct drm_mode_object *obj = &plane->base;
+
+       if (plane->funcs->set_property)
+               ret = plane->funcs->set_property(plane, property, value);
+       if (!ret)
+               drm_object_property_set_value(obj, property, value);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
+
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_mode_get_plane_res *plane_resp = data;
+       struct drm_mode_config *config;
+       struct drm_plane *plane;
+       uint32_t __user *plane_ptr;
+       int copied = 0;
+       unsigned num_planes;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       config = &dev->mode_config;
+
+       if (file_priv->universal_planes)
+               num_planes = config->num_total_plane;
+       else
+               num_planes = config->num_overlay_plane;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if (num_planes &&
+           (plane_resp->count_planes >= num_planes)) {
+               plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
+               /* Plane lists are invariant, no locking needed. */
+               drm_for_each_plane(plane, dev) {
+                       /*
+                        * Unless userspace set the 'universal planes'
+                        * capability bit, only advertise overlays.
+                        */
+                       if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
+                           !file_priv->universal_planes)
+                               continue;
+
+                       if (put_user(plane->base.id, plane_ptr + copied))
+                               return -EFAULT;
+                       copied++;
+               }
+       }
+       plane_resp->count_planes = num_planes;
+
+       return 0;
+}
+
+int drm_mode_getplane(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv)
+{
+       struct drm_mode_get_plane *plane_resp = data;
+       struct drm_plane *plane;
+       uint32_t __user *format_ptr;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       plane = drm_plane_find(dev, plane_resp->plane_id);
+       if (!plane)
+               return -ENOENT;
+
+       drm_modeset_lock(&plane->mutex, NULL);
+       if (plane->crtc)
+               plane_resp->crtc_id = plane->crtc->base.id;
+       else
+               plane_resp->crtc_id = 0;
+
+       if (plane->fb)
+               plane_resp->fb_id = plane->fb->base.id;
+       else
+               plane_resp->fb_id = 0;
+       drm_modeset_unlock(&plane->mutex);
+
+       plane_resp->plane_id = plane->base.id;
+       plane_resp->possible_crtcs = plane->possible_crtcs;
+       plane_resp->gamma_size = 0;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if (plane->format_count &&
+           (plane_resp->count_format_types >= plane->format_count)) {
+               format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+               if (copy_to_user(format_ptr,
+                                plane->format_types,
+                                sizeof(uint32_t) * plane->format_count)) {
+                       return -EFAULT;
+               }
+       }
+       plane_resp->count_format_types = plane->format_count;
+
+       return 0;
+}
+
+int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
+{
+       unsigned int i;
+
+       for (i = 0; i < plane->format_count; i++) {
+               if (format == plane->format_types[i])
+                       return 0;
+       }
+
+       return -EINVAL;
+}
+
+/*
+ * setplane_internal - setplane handler for internal callers
+ *
+ * Note that we assume an extra reference has already been taken on fb.  If the
+ * update fails, this reference will be dropped before return; if it succeeds,
+ * the previous framebuffer (if any) will be unreferenced instead.
+ *
+ * src_{x,y,w,h} are provided in 16.16 fixed point format
+ */
+static int __setplane_internal(struct drm_plane *plane,
+                              struct drm_crtc *crtc,
+                              struct drm_framebuffer *fb,
+                              int32_t crtc_x, int32_t crtc_y,
+                              uint32_t crtc_w, uint32_t crtc_h,
+                              /* src_{x,y,w,h} values are 16.16 fixed point */
+                              uint32_t src_x, uint32_t src_y,
+                              uint32_t src_w, uint32_t src_h)
+{
+       int ret = 0;
+
+       /* No fb means shut it down */
+       if (!fb) {
+               plane->old_fb = plane->fb;
+               ret = plane->funcs->disable_plane(plane);
+               if (!ret) {
+                       plane->crtc = NULL;
+                       plane->fb = NULL;
+               } else {
+                       plane->old_fb = NULL;
+               }
+               goto out;
+       }
+
+       /* Check whether this plane is usable on this CRTC */
+       if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
+               DRM_DEBUG_KMS("Invalid crtc for plane\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Check whether this plane supports the fb pixel format. */
+       ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
+       if (ret) {
+               char *format_name = drm_get_format_name(fb->pixel_format);
+               DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
+               kfree(format_name);
+               goto out;
+       }
+
+       /* Give drivers some help against integer overflows */
+       if (crtc_w > INT_MAX ||
+           crtc_x > INT_MAX - (int32_t) crtc_w ||
+           crtc_h > INT_MAX ||
+           crtc_y > INT_MAX - (int32_t) crtc_h) {
+               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+                             crtc_w, crtc_h, crtc_x, crtc_y);
+               ret = -ERANGE;
+               goto out;
+       }
+
+       ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
+       if (ret)
+               goto out;
+
+       plane->old_fb = plane->fb;
+       ret = plane->funcs->update_plane(plane, crtc, fb,
+                                        crtc_x, crtc_y, crtc_w, crtc_h,
+                                        src_x, src_y, src_w, src_h);
+       if (!ret) {
+               plane->crtc = crtc;
+               plane->fb = fb;
+               fb = NULL;
+       } else {
+               plane->old_fb = NULL;
+       }
+
+out:
+       if (fb)
+               drm_framebuffer_unreference(fb);
+       if (plane->old_fb)
+               drm_framebuffer_unreference(plane->old_fb);
+       plane->old_fb = NULL;
+
+       return ret;
+}
+
+static int setplane_internal(struct drm_plane *plane,
+                            struct drm_crtc *crtc,
+                            struct drm_framebuffer *fb,
+                            int32_t crtc_x, int32_t crtc_y,
+                            uint32_t crtc_w, uint32_t crtc_h,
+                            /* src_{x,y,w,h} values are 16.16 fixed point */
+                            uint32_t src_x, uint32_t src_y,
+                            uint32_t src_w, uint32_t src_h)
+{
+       int ret;
+
+       drm_modeset_lock_all(plane->dev);
+       ret = __setplane_internal(plane, crtc, fb,
+                                 crtc_x, crtc_y, crtc_w, crtc_h,
+                                 src_x, src_y, src_w, src_h);
+       drm_modeset_unlock_all(plane->dev);
+
+       return ret;
+}
+
+int drm_mode_setplane(struct drm_device *dev, void *data,
+                     struct drm_file *file_priv)
+{
+       struct drm_mode_set_plane *plane_req = data;
+       struct drm_plane *plane;
+       struct drm_crtc *crtc = NULL;
+       struct drm_framebuffer *fb = NULL;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       /*
+        * First, find the plane, crtc, and fb objects.  If not available,
+        * we don't bother to call the driver.
+        */
+       plane = drm_plane_find(dev, plane_req->plane_id);
+       if (!plane) {
+               DRM_DEBUG_KMS("Unknown plane ID %d\n",
+                             plane_req->plane_id);
+               return -ENOENT;
+       }
+
+       if (plane_req->fb_id) {
+               fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+               if (!fb) {
+                       DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+                                     plane_req->fb_id);
+                       return -ENOENT;
+               }
+
+               crtc = drm_crtc_find(dev, plane_req->crtc_id);
+               if (!crtc) {
+                       DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+                                     plane_req->crtc_id);
+                       return -ENOENT;
+               }
+       }
+
+       /*
+        * setplane_internal will take care of deref'ing either the old or new
+        * framebuffer depending on success.
+        */
+       return setplane_internal(plane, crtc, fb,
+                                plane_req->crtc_x, plane_req->crtc_y,
+                                plane_req->crtc_w, plane_req->crtc_h,
+                                plane_req->src_x, plane_req->src_y,
+                                plane_req->src_w, plane_req->src_h);
+}
+
+static int drm_mode_cursor_universal(struct drm_crtc *crtc,
+                                    struct drm_mode_cursor2 *req,
+                                    struct drm_file *file_priv)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_mode_fb_cmd2 fbreq = {
+               .width = req->width,
+               .height = req->height,
+               .pixel_format = DRM_FORMAT_ARGB8888,
+               .pitches = { req->width * 4 },
+               .handles = { req->handle },
+       };
+       int32_t crtc_x, crtc_y;
+       uint32_t crtc_w = 0, crtc_h = 0;
+       uint32_t src_w = 0, src_h = 0;
+       int ret = 0;
+
+       BUG_ON(!crtc->cursor);
+       WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
+
+       /*
+        * Obtain fb we'll be using (either new or existing) and take an extra
+        * reference to it if fb != null.  setplane will take care of dropping
+        * the reference if the plane update fails.
+        */
+       if (req->flags & DRM_MODE_CURSOR_BO) {
+               if (req->handle) {
+                       fb = drm_internal_framebuffer_create(dev, &fbreq, file_priv);
+                       if (IS_ERR(fb)) {
+                               DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
+                               return PTR_ERR(fb);
+                       }
+                       fb->hot_x = req->hot_x;
+                       fb->hot_y = req->hot_y;
+               } else {
+                       fb = NULL;
+               }
+       } else {
+               fb = crtc->cursor->fb;
+               if (fb)
+                       drm_framebuffer_reference(fb);
+       }
+
+       if (req->flags & DRM_MODE_CURSOR_MOVE) {
+               crtc_x = req->x;
+               crtc_y = req->y;
+       } else {
+               crtc_x = crtc->cursor_x;
+               crtc_y = crtc->cursor_y;
+       }
+
+       if (fb) {
+               crtc_w = fb->width;
+               crtc_h = fb->height;
+               src_w = fb->width << 16;
+               src_h = fb->height << 16;
+       }
+
+       /*
+        * setplane_internal will take care of deref'ing either the old or new
+        * framebuffer depending on success.
+        */
+       ret = __setplane_internal(crtc->cursor, crtc, fb,
+                               crtc_x, crtc_y, crtc_w, crtc_h,
+                               0, 0, src_w, src_h);
+
+       /* Update successful; save new cursor position, if necessary */
+       if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
+               crtc->cursor_x = req->x;
+               crtc->cursor_y = req->y;
+       }
+
+       return ret;
+}
+
+static int drm_mode_cursor_common(struct drm_device *dev,
+                                 struct drm_mode_cursor2 *req,
+                                 struct drm_file *file_priv)
+{
+       struct drm_crtc *crtc;
+       int ret = 0;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+               return -EINVAL;
+
+       crtc = drm_crtc_find(dev, req->crtc_id);
+       if (!crtc) {
+               DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+               return -ENOENT;
+       }
+
+       /*
+        * If this crtc has a universal cursor plane, call that plane's update
+        * handler rather than using legacy cursor handlers.
+        */
+       drm_modeset_lock_crtc(crtc, crtc->cursor);
+       if (crtc->cursor) {
+               ret = drm_mode_cursor_universal(crtc, req, file_priv);
+               goto out;
+       }
+
+       if (req->flags & DRM_MODE_CURSOR_BO) {
+               if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
+                       ret = -ENXIO;
+                       goto out;
+               }
+               /* Turns off the cursor if handle is 0 */
+               if (crtc->funcs->cursor_set2)
+                       ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
+                                                     req->width, req->height, req->hot_x, req->hot_y);
+               else
+                       ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+                                                     req->width, req->height);
+       }
+
+       if (req->flags & DRM_MODE_CURSOR_MOVE) {
+               if (crtc->funcs->cursor_move) {
+                       ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+               } else {
+                       ret = -EFAULT;
+                       goto out;
+               }
+       }
+out:
+       drm_modeset_unlock_crtc(crtc);
+
+       return ret;
+
+}
+
+
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_cursor *req = data;
+       struct drm_mode_cursor2 new_req;
+
+       memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
+       new_req.hot_x = new_req.hot_y = 0;
+
+       return drm_mode_cursor_common(dev, &new_req, file_priv);
+}
+
+/*
+ * Set the cursor configuration based on user request. This implements the 2nd
+ * version of the cursor ioctl, which allows userspace to additionally specify
+ * the hotspot of the pointer.
+ */
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_cursor2 *req = data;
+
+       return drm_mode_cursor_common(dev, req, file_priv);
+}
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_page_flip_target *page_flip = data;
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_pending_vblank_event *e = NULL;
+       u32 target_vblank = page_flip->sequence;
+       int ret = -EINVAL;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS)
+               return -EINVAL;
+
+       if (page_flip->sequence != 0 && !(page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET))
+               return -EINVAL;
+
+       /* Only one of the DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags
+        * can be specified
+        */
+       if ((page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) == DRM_MODE_PAGE_FLIP_TARGET)
+               return -EINVAL;
+
+       if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
+               return -EINVAL;
+
+       crtc = drm_crtc_find(dev, page_flip->crtc_id);
+       if (!crtc)
+               return -ENOENT;
+
+       drm_modeset_lock_crtc(crtc, crtc->primary);
+       if (crtc->primary->fb == NULL) {
+               /* The framebuffer is currently unbound, presumably
+                * due to a hotplug event, that userspace has not
+                * yet discovered.
+                */
+               ret = -EBUSY;
+               goto out;
+       }
+
+       if (crtc->funcs->page_flip == NULL)
+               goto out;
+
+       fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+       if (!fb) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       if (crtc->state) {
+               const struct drm_plane_state *state = crtc->primary->state;
+
+               ret = drm_framebuffer_check_src_coords(state->src_x,
+                                                      state->src_y,
+                                                      state->src_w,
+                                                      state->src_h,
+                                                      fb);
+       } else {
+               ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+       }
+       if (ret)
+               goto out;
+
+       if (crtc->primary->fb->pixel_format != fb->pixel_format) {
+               DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+               e = kzalloc(sizeof *e, GFP_KERNEL);
+               if (!e) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+               e->event.base.length = sizeof(e->event);
+               e->event.user_data = page_flip->user_data;
+               ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
+               if (ret) {
+                       kfree(e);
+                       goto out;
+               }
+       }
+
+       crtc->primary->old_fb = crtc->primary->fb;
+       if (crtc->funcs->page_flip_target) {
+               u32 current_vblank;
+               int r;
+
+               r = drm_crtc_vblank_get(crtc);
+               if (r)
+                       return r;
+
+               current_vblank = drm_crtc_vblank_count(crtc);
+
+               switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) {
+               case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE:
+                       if ((int)(target_vblank - current_vblank) > 1) {
+                               DRM_DEBUG("Invalid absolute flip target %u, "
+                                         "must be <= %u\n", target_vblank,
+                                         current_vblank + 1);
+                               drm_crtc_vblank_put(crtc);
+                               return -EINVAL;
+                       }
+                       break;
+               case DRM_MODE_PAGE_FLIP_TARGET_RELATIVE:
+                       if (target_vblank != 0 && target_vblank != 1) {
+                               DRM_DEBUG("Invalid relative flip target %u, "
+                                         "must be 0 or 1\n", target_vblank);
+                               drm_crtc_vblank_put(crtc);
+                               return -EINVAL;
+                       }
+                       target_vblank += current_vblank;
+                       break;
+               default:
+                       target_vblank = current_vblank +
+                               !(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC);
+                       break;
+               }
+       } else if (crtc->funcs->page_flip == NULL ||
+                  (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) {
+               return -EINVAL;
+       }
+
+       if (crtc->funcs->page_flip_target)
+               ret = crtc->funcs->page_flip_target(crtc, fb, e,
+                                                   page_flip->flags,
+                                                   target_vblank);
+       else
+               ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
+       if (ret) {
+               if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
+                       drm_event_cancel_free(dev, &e->base);
+               /* Keep the old fb, don't unref it. */
+               crtc->primary->old_fb = NULL;
+       } else {
+               crtc->primary->fb = fb;
+               /* Unref only the old framebuffer. */
+               fb = NULL;
+       }
+
+out:
+       if (ret && crtc->funcs->page_flip_target)
+               drm_crtc_vblank_put(crtc);
+       if (crtc->primary->old_fb)
+               drm_framebuffer_unreference(crtc->primary->old_fb);
+       crtc->primary->old_fb = NULL;
+       drm_modeset_unlock_crtc(crtc);
+
+       return ret;
+}
index 2c819ef..0262698 100644 (file)
@@ -48,8 +48,8 @@ static int drm_get_platform_dev(struct platform_device *platdev,
        DRM_DEBUG("\n");
 
        dev = drm_dev_alloc(driver, &platdev->dev);
-       if (!dev)
-               return -ENOMEM;
+       if (IS_ERR(dev))
+               return PTR_ERR(dev);
 
        dev->platformdev = platdev;
 
index 32dd821..9a37196 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/export.h>
 
 #include <drm/drm_sysfs.h>
-#include <drm/drm_core.h>
 #include <drm/drmP.h>
 #include "drm_internal.h"
 
@@ -37,12 +36,7 @@ static char *drm_devnode(struct device *dev, umode_t *mode)
        return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
 }
 
-static CLASS_ATTR_STRING(version, S_IRUGO,
-               CORE_NAME " "
-               __stringify(CORE_MAJOR) "."
-               __stringify(CORE_MINOR) "."
-               __stringify(CORE_PATCHLEVEL) " "
-               CORE_DATE);
+static CLASS_ATTR_STRING(version, S_IRUGO, "drm 1.1.0 20060810");
 
 /**
  * drm_sysfs_init - initialize sysfs helpers
index 0aef432..20cc33d 100644 (file)
@@ -25,7 +25,6 @@
 #include <drm/drmP.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_vma_manager.h>
-#include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/rbtree.h>
@@ -252,9 +251,9 @@ EXPORT_SYMBOL(drm_vma_offset_remove);
 /**
  * drm_vma_node_allow - Add open-file to list of allowed users
  * @node: Node to modify
- * @filp: Open file to add
+ * @tag: Tag of file to remove
  *
- * Add @filp to the list of allowed open-files for this node. If @filp is
+ * Add @tag to the list of allowed open-files for this node. If @tag is
  * already on this list, the ref-count is incremented.
  *
  * The list of allowed-users is preserved across drm_vma_offset_add() and
@@ -269,7 +268,7 @@ EXPORT_SYMBOL(drm_vma_offset_remove);
  * RETURNS:
  * 0 on success, negative error code on internal failure (out-of-mem)
  */
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
 {
        struct rb_node **iter;
        struct rb_node *parent = NULL;
@@ -290,10 +289,10 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
                parent = *iter;
                entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
 
-               if (filp == entry->vm_filp) {
+               if (tag == entry->vm_tag) {
                        entry->vm_count++;
                        goto unlock;
-               } else if (filp > entry->vm_filp) {
+               } else if (tag > entry->vm_tag) {
                        iter = &(*iter)->rb_right;
                } else {
                        iter = &(*iter)->rb_left;
@@ -305,7 +304,7 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
                goto unlock;
        }
 
-       new->vm_filp = filp;
+       new->vm_tag = tag;
        new->vm_count = 1;
        rb_link_node(&new->vm_rb, parent, iter);
        rb_insert_color(&new->vm_rb, &node->vm_files);
@@ -321,17 +320,18 @@ EXPORT_SYMBOL(drm_vma_node_allow);
 /**
  * drm_vma_node_revoke - Remove open-file from list of allowed users
  * @node: Node to modify
- * @filp: Open file to remove
+ * @tag: Tag of file to remove
  *
- * Decrement the ref-count of @filp in the list of allowed open-files on @node.
- * If the ref-count drops to zero, remove @filp from the list. You must call
- * this once for every drm_vma_node_allow() on @filp.
+ * Decrement the ref-count of @tag in the list of allowed open-files on @node.
+ * If the ref-count drops to zero, remove @tag from the list. You must call
+ * this once for every drm_vma_node_allow() on @tag.
  *
  * This is locked against concurrent access internally.
  *
- * If @filp is not on the list, nothing is done.
+ * If @tag is not on the list, nothing is done.
  */
-void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
+void drm_vma_node_revoke(struct drm_vma_offset_node *node,
+                        struct drm_file *tag)
 {
        struct drm_vma_offset_file *entry;
        struct rb_node *iter;
@@ -341,13 +341,13 @@ void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
        iter = node->vm_files.rb_node;
        while (likely(iter)) {
                entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
-               if (filp == entry->vm_filp) {
+               if (tag == entry->vm_tag) {
                        if (!--entry->vm_count) {
                                rb_erase(&entry->vm_rb, &node->vm_files);
                                kfree(entry);
                        }
                        break;
-               } else if (filp > entry->vm_filp) {
+               } else if (tag > entry->vm_tag) {
                        iter = iter->rb_right;
                } else {
                        iter = iter->rb_left;
@@ -361,9 +361,9 @@ EXPORT_SYMBOL(drm_vma_node_revoke);
 /**
  * drm_vma_node_is_allowed - Check whether an open-file is granted access
  * @node: Node to check
- * @filp: Open-file to check for
+ * @tag: Tag of file to remove
  *
- * Search the list in @node whether @filp is currently on the list of allowed
+ * Search the list in @node whether @tag is currently on the list of allowed
  * open-files (see drm_vma_node_allow()).
  *
  * This is locked against concurrent access internally.
@@ -372,7 +372,7 @@ EXPORT_SYMBOL(drm_vma_node_revoke);
  * true iff @filp is on the list
  */
 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
-                            struct file *filp)
+                            struct drm_file *tag)
 {
        struct drm_vma_offset_file *entry;
        struct rb_node *iter;
@@ -382,9 +382,9 @@ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
        iter = node->vm_files.rb_node;
        while (likely(iter)) {
                entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
-               if (filp == entry->vm_filp)
+               if (tag == entry->vm_tag)
                        break;
-               else if (filp > entry->vm_filp)
+               else if (tag > entry->vm_tag)
                        iter = iter->rb_right;
                else
                        iter = iter->rb_left;
index e3164d9..aa68766 100644 (file)
@@ -529,8 +529,8 @@ static int etnaviv_bind(struct device *dev)
        int ret;
 
        drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv) {
index 7882387..0884c45 100644 (file)
@@ -270,7 +270,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
        ret = clk_prepare_enable(fsl_dev->pix_clk);
        if (ret < 0) {
                dev_err(dev, "failed to enable pix clk\n");
-               return ret;
+               goto disable_dcu_clk;
        }
 
        fsl_dcu_drm_init_planes(fsl_dev->drm);
@@ -284,6 +284,10 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
        enable_irq(fsl_dev->irq);
 
        return 0;
+
+disable_dcu_clk:
+       clk_disable_unprepare(fsl_dev->clk);
+       return ret;
 }
 #endif
 
@@ -330,6 +334,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
        const char *pix_clk_in_name;
        const struct of_device_id *id;
        int ret;
+       u8 div_ratio_shift = 0;
 
        fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL);
        if (!fsl_dev)
@@ -382,11 +387,14 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
                pix_clk_in = fsl_dev->clk;
        }
 
+       if (of_property_read_bool(dev->of_node, "big-endian"))
+               div_ratio_shift = 24;
+
        pix_clk_in_name = __clk_get_name(pix_clk_in);
        snprintf(pix_clk_name, sizeof(pix_clk_name), "%s_pix", pix_clk_in_name);
        fsl_dev->pix_clk = clk_register_divider(dev, pix_clk_name,
                        pix_clk_in_name, 0, base + DCU_DIV_RATIO,
-                       0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
+                       div_ratio_shift, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
        if (IS_ERR(fsl_dev->pix_clk)) {
                dev_err(dev, "failed to register pix clk\n");
                ret = PTR_ERR(fsl_dev->pix_clk);
@@ -402,8 +410,8 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
        fsl_dev->tcon = fsl_tcon_init(dev);
 
        drm = drm_dev_alloc(driver, dev);
-       if (!drm) {
-               ret = -ENOMEM;
+       if (IS_ERR(drm)) {
+               ret = PTR_ERR(drm);
                goto disable_pix_clk;
        }
 
index bca09ea..3194e54 100644 (file)
@@ -57,10 +57,7 @@ static int fsl_tcon_init_regmap(struct device *dev,
 
        tcon->regs = devm_regmap_init_mmio(dev, regs,
                                           &fsl_tcon_regmap_config);
-       if (IS_ERR(tcon->regs))
-               return PTR_ERR(tcon->regs);
-
-       return 0;
+       return PTR_ERR_OR_ZERO(tcon->regs);
 }
 
 struct fsl_tcon *fsl_tcon_init(struct device *dev)
index 1fc2f50..90377a6 100644 (file)
@@ -207,8 +207,8 @@ static int kirin_drm_bind(struct device *dev)
        int ret;
 
        drm_dev = drm_dev_alloc(driver, dev);
-       if (!drm_dev)
-               return -ENOMEM;
+       if (IS_ERR(drm_dev))
+               return PTR_ERR(drm_dev);
 
        drm_dev->platformdev = to_platform_device(dev);
 
index a7da246..a998c2b 100644 (file)
@@ -16,6 +16,7 @@ i915-y := i915_drv.o \
          i915_params.o \
          i915_pci.o \
           i915_suspend.o \
+         i915_sw_fence.o \
          i915_sysfs.o \
          intel_csr.o \
          intel_device_info.o \
index 3c72b3b..70980f8 100644 (file)
@@ -984,7 +984,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
 
        src = ERR_PTR(-ENODEV);
        if (src_needs_clflush &&
-           i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, 0, 0)) {
+           i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, NULL, 0)) {
                src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
                if (!IS_ERR(src)) {
                        i915_memcpy_from_wc(dst,
index a95d7bc..27b0e34 100644 (file)
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
+static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
+{
+       return to_i915(node->minor->dev);
+}
+
 /* As the drm_debugfs_init() routines are called before dev->dev_private is
  * allocated we need to hook into the minor for release. */
 static int
@@ -57,7 +62,7 @@ drm_add_fake_info_node(struct drm_minor *minor,
 
        node->minor = minor;
        node->dent = ent;
-       node->info_ent = (void *) key;
+       node->info_ent = (void *)key;
 
        mutex_lock(&minor->debugfs_lock);
        list_add(&node->list, &minor->debugfs_list);
@@ -68,12 +73,11 @@ drm_add_fake_info_node(struct drm_minor *minor,
 
 static int i915_capabilities(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       const struct intel_device_info *info = INTEL_INFO(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       const struct intel_device_info *info = INTEL_INFO(dev_priv);
 
-       seq_printf(m, "gen: %d\n", info->gen);
-       seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
+       seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
+       seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
 #define SEP_SEMICOLON ;
        DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
@@ -155,7 +159,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
        seq_printf(m, "] %x %s%s%s",
                   i915_gem_active_get_seqno(&obj->last_write,
                                             &obj->base.dev->struct_mutex),
-                  i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
+                  i915_cache_level_str(dev_priv, obj->cache_level),
                   obj->dirty ? " dirty" : "",
                   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
        if (obj->base.name)
@@ -195,7 +199,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
        }
 
        engine = i915_gem_active_get_engine(&obj->last_write,
-                                           &obj->base.dev->struct_mutex);
+                                           &dev_priv->drm.struct_mutex);
        if (engine)
                seq_printf(m, " (%s)", engine->name);
 
@@ -221,9 +225,8 @@ static int obj_rank_by_stolen(void *priv,
 
 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_i915_gem_object *obj;
        u64 total_obj_size, total_gtt_size;
        LIST_HEAD(stolen);
@@ -365,29 +368,29 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
 static void print_context_stats(struct seq_file *m,
                                struct drm_i915_private *dev_priv)
 {
+       struct drm_device *dev = &dev_priv->drm;
        struct file_stats stats;
        struct drm_file *file;
 
        memset(&stats, 0, sizeof(stats));
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
+       mutex_lock(&dev->struct_mutex);
        if (dev_priv->kernel_context)
                per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
 
-       list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
+       list_for_each_entry(file, &dev->filelist, lhead) {
                struct drm_i915_file_private *fpriv = file->driver_priv;
                idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
        }
-       mutex_unlock(&dev_priv->drm.struct_mutex);
+       mutex_unlock(&dev->struct_mutex);
 
        print_file_stats(m, "[k]contexts", stats);
 }
 
-static int i915_gem_object_info(struct seq_file *m, voiddata)
+static int i915_gem_object_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        u32 count, mapped_count, purgeable_count, dpy_count;
        u64 size, mapped_size, purgeable_size, dpy_size;
@@ -497,9 +500,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 static int i915_gem_gtt_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       bool show_pin_display_only = !!data;
+       struct drm_i915_private *dev_priv = node_to_i915(node);
+       struct drm_device *dev = &dev_priv->drm;
+       bool show_pin_display_only = !!node->info_ent->data;
        struct drm_i915_gem_object *obj;
        u64 total_obj_size, total_gtt_size;
        int count, ret;
@@ -531,9 +534,8 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
 
 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *crtc;
        int ret;
 
@@ -580,7 +582,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                   intel_crtc_get_vblank_counter(crtc));
                        seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 
-                       if (INTEL_INFO(dev)->gen >= 4)
+                       if (INTEL_GEN(dev_priv) >= 4)
                                addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
                        else
                                addr = I915_READ(DSPADDR(crtc->plane));
@@ -601,9 +603,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 
 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_i915_gem_object *obj;
        struct intel_engine_cs *engine;
        int total = 0;
@@ -646,9 +647,8 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 
 static int i915_gem_request_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
        struct drm_i915_gem_request *req;
        int ret, any;
@@ -713,41 +713,25 @@ static void i915_ring_seqno_info(struct seq_file *m,
 
 static int i915_gem_seqno_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_engine_cs *engine;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-       intel_runtime_pm_get(dev_priv);
 
        for_each_engine(engine, dev_priv)
                i915_ring_seqno_info(m, engine);
 
-       intel_runtime_pm_put(dev_priv);
-       mutex_unlock(&dev->struct_mutex);
-
        return 0;
 }
 
 
 static int i915_interrupt_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_engine_cs *engine;
-       int ret, i, pipe;
+       int i, pipe;
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
        intel_runtime_pm_get(dev_priv);
 
-       if (IS_CHERRYVIEW(dev)) {
+       if (IS_CHERRYVIEW(dev_priv)) {
                seq_printf(m, "Master Interrupt Control:\t%08x\n",
                           I915_READ(GEN8_MASTER_IRQ));
 
@@ -786,7 +770,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                           I915_READ(GEN8_PCU_IIR));
                seq_printf(m, "PCU interrupt enable:\t%08x\n",
                           I915_READ(GEN8_PCU_IER));
-       } else if (INTEL_INFO(dev)->gen >= 8) {
+       } else if (INTEL_GEN(dev_priv) >= 8) {
                seq_printf(m, "Master Interrupt Control:\t%08x\n",
                           I915_READ(GEN8_MASTER_IRQ));
 
@@ -842,7 +826,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                           I915_READ(GEN8_PCU_IIR));
                seq_printf(m, "PCU interrupt enable:\t%08x\n",
                           I915_READ(GEN8_PCU_IER));
-       } else if (IS_VALLEYVIEW(dev)) {
+       } else if (IS_VALLEYVIEW(dev_priv)) {
                seq_printf(m, "Display IER:\t%08x\n",
                           I915_READ(VLV_IER));
                seq_printf(m, "Display IIR:\t%08x\n",
@@ -880,7 +864,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                seq_printf(m, "DPINVGTT:\t%08x\n",
                           I915_READ(DPINVGTT));
 
-       } else if (!HAS_PCH_SPLIT(dev)) {
+       } else if (!HAS_PCH_SPLIT(dev_priv)) {
                seq_printf(m, "Interrupt enable:    %08x\n",
                           I915_READ(IER));
                seq_printf(m, "Interrupt identity:  %08x\n",
@@ -912,7 +896,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                           I915_READ(GTIMR));
        }
        for_each_engine(engine, dev_priv) {
-               if (INTEL_INFO(dev)->gen >= 6) {
+               if (INTEL_GEN(dev_priv) >= 6) {
                        seq_printf(m,
                                   "Graphics Interrupt mask (%s):       %08x\n",
                                   engine->name, I915_READ_IMR(engine));
@@ -920,16 +904,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
                i915_ring_seqno_info(m, engine);
        }
        intel_runtime_pm_put(dev_priv);
-       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
 
 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        int i, ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -956,8 +938,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 static int i915_hws_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(node);
        struct intel_engine_cs *engine;
        const u32 *hws;
        int i;
@@ -982,33 +963,25 @@ i915_error_state_write(struct file *filp,
                       loff_t *ppos)
 {
        struct i915_error_state_file_priv *error_priv = filp->private_data;
-       struct drm_device *dev = error_priv->dev;
-       int ret;
 
        DRM_DEBUG_DRIVER("Resetting error state\n");
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       i915_destroy_error_state(dev);
-       mutex_unlock(&dev->struct_mutex);
+       i915_destroy_error_state(error_priv->dev);
 
        return cnt;
 }
 
 static int i915_error_state_open(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
        struct i915_error_state_file_priv *error_priv;
 
        error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
        if (!error_priv)
                return -ENOMEM;
 
-       error_priv->dev = dev;
+       error_priv->dev = &dev_priv->drm;
 
-       i915_error_state_get(dev, error_priv);
+       i915_error_state_get(&dev_priv->drm, error_priv);
 
        file->private_data = error_priv;
 
@@ -1034,7 +1007,8 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
        ssize_t ret_count = 0;
        int ret;
 
-       ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
+       ret = i915_error_state_buf_init(&error_str,
+                                       to_i915(error_priv->dev), count, *pos);
        if (ret)
                return ret;
 
@@ -1067,16 +1041,15 @@ static const struct file_operations i915_error_state_fops = {
 static int
 i915_next_seqno_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
        int ret;
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
        if (ret)
                return ret;
 
        *val = dev_priv->next_seqno;
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 
        return 0;
 }
@@ -1084,7 +1057,8 @@ i915_next_seqno_get(void *data, u64 *val)
 static int
 i915_next_seqno_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
+       struct drm_i915_private *dev_priv = data;
+       struct drm_device *dev = &dev_priv->drm;
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1103,14 +1077,13 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
 
 static int i915_frequency_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        int ret = 0;
 
        intel_runtime_pm_get(dev_priv);
 
-       if (IS_GEN5(dev)) {
+       if (IS_GEN5(dev_priv)) {
                u16 rgvswctl = I915_READ16(MEMSWCTL);
                u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 
@@ -1120,7 +1093,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                           MEMSTAT_VID_SHIFT);
                seq_printf(m, "Current P-state: %d\n",
                           (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
-       } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                u32 freq_sts;
 
                mutex_lock(&dev_priv->rps.hw_lock);
@@ -1147,7 +1120,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                           "efficient (RPe) frequency: %d MHz\n",
                           intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
                mutex_unlock(&dev_priv->rps.hw_lock);
-       } else if (INTEL_INFO(dev)->gen >= 6) {
+       } else if (INTEL_GEN(dev_priv) >= 6) {
                u32 rp_state_limits;
                u32 gt_perf_status;
                u32 rp_state_cap;
@@ -1159,7 +1132,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                int max_freq;
 
                rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
-               if (IS_BROXTON(dev)) {
+               if (IS_BROXTON(dev_priv)) {
                        rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
                        gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
                } else {
@@ -1175,11 +1148,11 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
                reqf = I915_READ(GEN6_RPNSWREQ);
-               if (IS_GEN9(dev))
+               if (IS_GEN9(dev_priv))
                        reqf >>= 23;
                else {
                        reqf &= ~GEN6_TURBO_DISABLE;
-                       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+                       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                                reqf >>= 24;
                        else
                                reqf >>= 25;
@@ -1197,9 +1170,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
                rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
                rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
-               if (IS_GEN9(dev))
+               if (IS_GEN9(dev_priv))
                        cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
-               else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                        cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
                else
                        cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1208,7 +1181,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
                mutex_unlock(&dev->struct_mutex);
 
-               if (IS_GEN6(dev) || IS_GEN7(dev)) {
+               if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
                        pm_ier = I915_READ(GEN6_PMIER);
                        pm_imr = I915_READ(GEN6_PMIMR);
                        pm_isr = I915_READ(GEN6_PMISR);
@@ -1226,7 +1199,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
                seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
                seq_printf(m, "Render p-state ratio: %d\n",
-                          (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
+                          (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
                seq_printf(m, "Render p-state VID: %d\n",
                           gt_perf_status & 0xff);
                seq_printf(m, "Render p-state limit: %d\n",
@@ -1255,22 +1228,22 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "Down threshold: %d%%\n",
                           dev_priv->rps.down_threshold);
 
-               max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+               max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 0 :
                            rp_state_cap >> 16) & 0xff;
-               max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+               max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
                             GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
 
                max_freq = (rp_state_cap & 0xff00) >> 8;
-               max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+               max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
                             GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
 
-               max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+               max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 16 :
                            rp_state_cap >> 0) & 0xff;
-               max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+               max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
                             GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
                           intel_gpu_freq(dev_priv, max_freq));
@@ -1306,9 +1279,7 @@ out:
 
 static int i915_hangcheck_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_engine_cs *engine;
        u64 acthd[I915_NUM_ENGINES];
        u32 seqno[I915_NUM_ENGINES];
@@ -1316,6 +1287,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
        enum intel_engine_id id;
        int j;
 
+       if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+               seq_printf(m, "Wedged\n");
+       if (test_bit(I915_RESET_IN_PROGRESS, &dev_priv->gpu_error.flags))
+               seq_printf(m, "Reset in progress\n");
+       if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
+               seq_printf(m, "Waiter holding struct mutex\n");
+       if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
+               seq_printf(m, "struct_mutex blocked for reset\n");
+
        if (!i915.enable_hangcheck) {
                seq_printf(m, "Hangcheck disabled\n");
                return 0;
@@ -1376,9 +1356,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 
 static int ironlake_drpc_info(struct seq_file *m)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        u32 rgvmodectl, rstdbyctl;
        u16 crstandvid;
        int ret;
@@ -1444,9 +1423,7 @@ static int ironlake_drpc_info(struct seq_file *m)
 
 static int i915_forcewake_domains(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_uncore_forcewake_domain *fw_domain;
 
        spin_lock_irq(&dev_priv->uncore.lock);
@@ -1462,9 +1439,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
 
 static int vlv_drpc_info(struct seq_file *m)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        u32 rpmodectl1, rcctl1, pw_status;
 
        intel_runtime_pm_get(dev_priv);
@@ -1502,9 +1477,8 @@ static int vlv_drpc_info(struct seq_file *m)
 
 static int gen6_drpc_info(struct seq_file *m)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
        u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
        unsigned forcewake_count;
@@ -1534,7 +1508,7 @@ static int gen6_drpc_info(struct seq_file *m)
 
        rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
        rcctl1 = I915_READ(GEN6_RC_CONTROL);
-       if (INTEL_INFO(dev)->gen >= 9) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
                gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
        }
@@ -1556,7 +1530,7 @@ static int gen6_drpc_info(struct seq_file *m)
                   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
        seq_printf(m, "RC6 Enabled: %s\n",
                   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
-       if (INTEL_INFO(dev)->gen >= 9) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                seq_printf(m, "Render Well Gating Enabled: %s\n",
                        yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
                seq_printf(m, "Media Well Gating Enabled: %s\n",
@@ -1590,7 +1564,7 @@ static int gen6_drpc_info(struct seq_file *m)
 
        seq_printf(m, "Core Power Down: %s\n",
                   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
-       if (INTEL_INFO(dev)->gen >= 9) {
+       if (INTEL_GEN(dev_priv) >= 9) {
                seq_printf(m, "Render Power Well: %s\n",
                        (gen9_powergate_status &
                         GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
@@ -1620,12 +1594,11 @@ static int gen6_drpc_info(struct seq_file *m)
 
 static int i915_drpc_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                return vlv_drpc_info(m);
-       else if (INTEL_INFO(dev)->gen >= 6)
+       else if (INTEL_GEN(dev_priv) >= 6)
                return gen6_drpc_info(m);
        else
                return ironlake_drpc_info(m);
@@ -1633,9 +1606,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
 
 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
        seq_printf(m, "FB tracking busy bits: 0x%08x\n",
                   dev_priv->fb_tracking.busy_bits);
@@ -1648,11 +1619,9 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
 
 static int i915_fbc_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
-       if (!HAS_FBC(dev)) {
+       if (!HAS_FBC(dev_priv)) {
                seq_puts(m, "FBC unsupported on this chipset\n");
                return 0;
        }
@@ -1666,7 +1635,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
                seq_printf(m, "FBC disabled: %s\n",
                           dev_priv->fbc.no_fbc_reason);
 
-       if (INTEL_INFO(dev_priv)->gen >= 7)
+       if (INTEL_GEN(dev_priv) >= 7)
                seq_printf(m, "Compressing: %s\n",
                           yesno(I915_READ(FBC_STATUS2) &
                                 FBC_COMPRESSION_MASK));
@@ -1679,10 +1648,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 
 static int i915_fbc_fc_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
-       if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+       if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
                return -ENODEV;
 
        *val = dev_priv->fbc.false_color;
@@ -1692,11 +1660,10 @@ static int i915_fbc_fc_get(void *data, u64 *val)
 
 static int i915_fbc_fc_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
        u32 reg;
 
-       if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+       if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
                return -ENODEV;
 
        mutex_lock(&dev_priv->fbc.lock);
@@ -1718,11 +1685,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
 
 static int i915_ips_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
-       if (!HAS_IPS(dev)) {
+       if (!HAS_IPS(dev_priv)) {
                seq_puts(m, "not supported\n");
                return 0;
        }
@@ -1732,7 +1697,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
        seq_printf(m, "Enabled by kernel parameter: %s\n",
                   yesno(i915.enable_ips));
 
-       if (INTEL_INFO(dev)->gen >= 8) {
+       if (INTEL_GEN(dev_priv) >= 8) {
                seq_puts(m, "Currently: unknown\n");
        } else {
                if (I915_READ(IPS_CTL) & IPS_ENABLE)
@@ -1748,23 +1713,21 @@ static int i915_ips_status(struct seq_file *m, void *unused)
 
 static int i915_sr_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        bool sr_enabled = false;
 
        intel_runtime_pm_get(dev_priv);
 
-       if (HAS_PCH_SPLIT(dev))
+       if (HAS_PCH_SPLIT(dev_priv))
                sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
-       else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
-                IS_I945G(dev) || IS_I945GM(dev))
+       else if (IS_CRESTLINE(dev_priv) || IS_G4X(dev_priv) ||
+                IS_I945G(dev_priv) || IS_I945GM(dev_priv))
                sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
-       else if (IS_I915GM(dev))
+       else if (IS_I915GM(dev_priv))
                sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
-       else if (IS_PINEVIEW(dev))
+       else if (IS_PINEVIEW(dev_priv))
                sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
-       else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
 
        intel_runtime_pm_put(dev_priv);
@@ -1777,13 +1740,12 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 
 static int i915_emon_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        unsigned long temp, chipset, gfx;
        int ret;
 
-       if (!IS_GEN5(dev))
+       if (!IS_GEN5(dev_priv))
                return -ENODEV;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1805,14 +1767,12 @@ static int i915_emon_status(struct seq_file *m, void *unused)
 
 static int i915_ring_freq_table(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        int ret = 0;
        int gpu_freq, ia_freq;
        unsigned int max_gpu_freq, min_gpu_freq;
 
-       if (!HAS_CORE_RING_FREQ(dev)) {
+       if (!HAS_LLC(dev_priv)) {
                seq_puts(m, "unsupported on this chipset\n");
                return 0;
        }
@@ -1823,7 +1783,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
        if (ret)
                goto out;
 
-       if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                /* Convert GT frequency to 50 HZ units */
                min_gpu_freq =
                        dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@@ -1843,7 +1803,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
                                       &ia_freq);
                seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
                           intel_gpu_freq(dev_priv, (gpu_freq *
-                               (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+                               (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
                                 GEN9_FREQ_SCALER : 1))),
                           ((ia_freq >> 0) & 0xff) * 100,
                           ((ia_freq >> 8) & 0xff) * 100);
@@ -1858,9 +1818,8 @@ out:
 
 static int i915_opregion(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_opregion *opregion = &dev_priv->opregion;
        int ret;
 
@@ -1879,10 +1838,7 @@ out:
 
 static int i915_vbt(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_opregion *opregion = &dev_priv->opregion;
+       struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
 
        if (opregion->vbt)
                seq_write(m, opregion->vbt, opregion->vbt_size);
@@ -1892,8 +1848,8 @@ static int i915_vbt(struct seq_file *m, void *unused)
 
 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_framebuffer *fbdev_fb = NULL;
        struct drm_framebuffer *drm_fb;
        int ret;
@@ -1903,8 +1859,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                return ret;
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-       if (to_i915(dev)->fbdev) {
-               fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
+       if (dev_priv->fbdev) {
+               fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
 
                seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
                           fbdev_fb->base.width,
@@ -1949,9 +1905,8 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
 
 static int i915_context_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
        int ret;
@@ -2043,9 +1998,8 @@ static void i915_dump_lrc_obj(struct seq_file *m,
 
 static int i915_dump_lrc(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
        int ret;
@@ -2070,9 +2024,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 
 static int i915_execlists(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = (struct drm_info_node *)m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
        u32 status_pointer;
        u8 read_pointer;
@@ -2107,7 +2060,7 @@ static int i915_execlists(struct seq_file *m, void *data)
                status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
                seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
 
-               read_pointer = engine->next_context_status_buffer;
+               read_pointer = GEN8_CSB_READ_PTR(status_pointer);
                write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
                if (read_pointer > write_pointer)
                        write_pointer += GEN8_CSB_ENTRIES;
@@ -2173,9 +2126,8 @@ static const char *swizzle_string(unsigned swizzle)
 
 static int i915_swizzle_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2188,7 +2140,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
        seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
                   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
 
-       if (IS_GEN3(dev) || IS_GEN4(dev)) {
+       if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
                seq_printf(m, "DDC = 0x%08x\n",
                           I915_READ(DCC));
                seq_printf(m, "DDC2 = 0x%08x\n",
@@ -2197,7 +2149,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
                           I915_READ16(C0DRB3));
                seq_printf(m, "C1DRB3 = 0x%04x\n",
                           I915_READ16(C1DRB3));
-       } else if (INTEL_INFO(dev)->gen >= 6) {
+       } else if (INTEL_GEN(dev_priv) >= 6) {
                seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
                           I915_READ(MAD_DIMM_C0));
                seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
@@ -2206,7 +2158,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
                           I915_READ(MAD_DIMM_C2));
                seq_printf(m, "TILECTL = 0x%08x\n",
                           I915_READ(TILECTL));
-               if (INTEL_INFO(dev)->gen >= 8)
+               if (INTEL_GEN(dev_priv) >= 8)
                        seq_printf(m, "GAMTARBMODE = 0x%08x\n",
                                   I915_READ(GAMTARBMODE));
                else
@@ -2246,9 +2198,9 @@ static int per_file_ctx(int id, void *ptr, void *data)
        return 0;
 }
 
-static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+static void gen8_ppgtt_info(struct seq_file *m,
+                           struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
        int i;
@@ -2267,9 +2219,9 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
        }
 }
 
-static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+static void gen6_ppgtt_info(struct seq_file *m,
+                           struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        if (IS_GEN6(dev_priv))
@@ -2301,22 +2253,23 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 
 static int i915_ppgtt_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_file *file;
+       int ret;
 
-       int ret = mutex_lock_interruptible(&dev->struct_mutex);
+       mutex_lock(&dev->filelist_mutex);
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
-               return ret;
+               goto out_unlock;
+
        intel_runtime_pm_get(dev_priv);
 
-       if (INTEL_INFO(dev)->gen >= 8)
-               gen8_ppgtt_info(m, dev);
-       else if (INTEL_INFO(dev)->gen >= 6)
-               gen6_ppgtt_info(m, dev);
+       if (INTEL_GEN(dev_priv) >= 8)
+               gen8_ppgtt_info(m, dev_priv);
+       else if (INTEL_GEN(dev_priv) >= 6)
+               gen6_ppgtt_info(m, dev_priv);
 
-       mutex_lock(&dev->filelist_mutex);
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct drm_i915_file_private *file_priv = file->driver_priv;
                struct task_struct *task;
@@ -2324,19 +2277,19 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
                task = get_pid_task(file->pid, PIDTYPE_PID);
                if (!task) {
                        ret = -ESRCH;
-                       goto out_unlock;
+                       goto out_rpm;
                }
                seq_printf(m, "\nproc: %s\n", task->comm);
                put_task_struct(task);
                idr_for_each(&file_priv->context_idr, per_file_ctx,
                             (void *)(unsigned long)m);
        }
-out_unlock:
-       mutex_unlock(&dev->filelist_mutex);
 
+out_rpm:
        intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
-
+out_unlock:
+       mutex_unlock(&dev->filelist_mutex);
        return ret;
 }
 
@@ -2367,9 +2320,8 @@ static const char *rps_power_to_str(unsigned int power)
 
 static int i915_rps_boost_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_file *file;
 
        seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
@@ -2437,12 +2389,10 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 
 static int i915_llc(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        const bool edram = INTEL_GEN(dev_priv) > 8;
 
-       seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+       seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
        seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
                   intel_uncore_edram_size(dev_priv)/1024/1024);
 
@@ -2451,8 +2401,7 @@ static int i915_llc(struct seq_file *m, void *data)
 
 static int i915_guc_load_status_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
        u32 tmp, i;
 
@@ -2523,9 +2472,8 @@ static void i915_guc_client_info(struct seq_file *m,
 
 static int i915_guc_info(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_guc guc;
        struct i915_guc_client client = {};
        struct intel_engine_cs *engine;
@@ -2574,9 +2522,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
 
 static int i915_guc_log_dump(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct drm_i915_gem_object *obj;
        int i = 0, pg;
 
@@ -2602,15 +2548,13 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
 
 static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        u32 psrperf = 0;
        u32 stat[3];
        enum pipe pipe;
        bool enabled = false;
 
-       if (!HAS_PSR(dev)) {
+       if (!HAS_PSR(dev_priv)) {
                seq_puts(m, "PSR not supported\n");
                return 0;
        }
@@ -2627,7 +2571,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
        seq_printf(m, "Re-enable work scheduled: %s\n",
                   yesno(work_busy(&dev_priv->psr.work.work)));
 
-       if (HAS_DDI(dev))
+       if (HAS_DDI(dev_priv))
                enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
        else {
                for_each_pipe(dev_priv, pipe) {
@@ -2644,7 +2588,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 
        seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
 
-       if (!HAS_DDI(dev))
+       if (!HAS_DDI(dev_priv))
                for_each_pipe(dev_priv, pipe) {
                        if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
                            (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
@@ -2656,7 +2600,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
         * VLV/CHV PSR has no kind of performance counter
         * SKL+ Perf counter is reset to 0 everytime DC state is entered
         */
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                psrperf = I915_READ(EDP_PSR_PERF_CNT) &
                        EDP_PSR_PERF_CNT_MASK;
 
@@ -2670,8 +2614,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 
 static int i915_sink_crc(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_connector *connector;
        struct intel_dp *intel_dp = NULL;
        int ret;
@@ -2710,13 +2654,11 @@ out:
 
 static int i915_energy_uJ(struct seq_file *m, void *data)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        u64 power;
        u32 units;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
        intel_runtime_pm_get(dev_priv);
@@ -2736,9 +2678,8 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
 
 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
 
        if (!HAS_RUNTIME_PM(dev_priv))
                seq_puts(m, "Runtime power management not supported\n");
@@ -2748,22 +2689,20 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
                   yesno(!intel_irqs_enabled(dev_priv)));
 #ifdef CONFIG_PM
        seq_printf(m, "Usage count: %d\n",
-                  atomic_read(&dev->dev->power.usage_count));
+                  atomic_read(&dev_priv->drm.dev->power.usage_count));
 #else
        seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
 #endif
        seq_printf(m, "PCI device power state: %s [%d]\n",
-                  pci_power_name(dev_priv->drm.pdev->current_state),
-                  dev_priv->drm.pdev->current_state);
+                  pci_power_name(pdev->current_state),
+                  pdev->current_state);
 
        return 0;
 }
 
 static int i915_power_domain_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        int i;
 
@@ -2796,12 +2735,10 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
 
 static int i915_dmc_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
        struct intel_csr *csr;
 
-       if (!HAS_CSR(dev)) {
+       if (!HAS_CSR(dev_priv)) {
                seq_puts(m, "not supported\n");
                return 0;
        }
@@ -2819,12 +2756,12 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
        seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
                   CSR_VERSION_MINOR(csr->version));
 
-       if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
+       if (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6)) {
                seq_printf(m, "DC3 -> DC5 count: %d\n",
                           I915_READ(SKL_CSR_DC3_DC5_COUNT));
                seq_printf(m, "DC5 -> DC6 count: %d\n",
                           I915_READ(SKL_CSR_DC5_DC6_COUNT));
-       } else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
+       } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
                seq_printf(m, "DC3 -> DC5 count: %d\n",
                           I915_READ(BXT_CSR_DC3_DC5_COUNT));
        }
@@ -2861,8 +2798,8 @@ static void intel_encoder_info(struct seq_file *m,
                               struct intel_crtc *intel_crtc,
                               struct intel_encoder *intel_encoder)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_crtc *crtc = &intel_crtc->base;
        struct intel_connector *intel_connector;
        struct drm_encoder *encoder;
@@ -2888,8 +2825,8 @@ static void intel_encoder_info(struct seq_file *m,
 
 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_crtc *crtc = &intel_crtc->base;
        struct intel_encoder *intel_encoder;
        struct drm_plane_state *plane_state = crtc->primary->state;
@@ -2923,6 +2860,9 @@ static void intel_dp_info(struct seq_file *m,
        seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
        if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
                intel_panel_info(m, &intel_connector->panel);
+
+       drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
+                               &intel_dp->aux);
 }
 
 static void intel_hdmi_info(struct seq_file *m,
@@ -2987,12 +2927,11 @@ static void intel_connector_info(struct seq_file *m,
                intel_seq_print_mode(m, 2, mode);
 }
 
-static bool cursor_active(struct drm_device *dev, int pipe)
+static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 state;
 
-       if (IS_845G(dev) || IS_I865G(dev))
+       if (IS_845G(dev_priv) || IS_I865G(dev_priv))
                state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
        else
                state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -3000,9 +2939,9 @@ static bool cursor_active(struct drm_device *dev, int pipe)
        return state;
 }
 
-static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
+static bool cursor_position(struct drm_i915_private *dev_priv,
+                           int pipe, int *x, int *y)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pos;
 
        pos = I915_READ(CURPOS(pipe));
@@ -3015,7 +2954,7 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
        if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
                *y = -*y;
 
-       return cursor_active(dev, pipe);
+       return cursor_active(dev_priv, pipe);
 }
 
 static const char *plane_type(enum drm_plane_type type)
@@ -3058,8 +2997,8 @@ static const char *plane_rotation(unsigned int rotation)
 
 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_plane *intel_plane;
 
        for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
@@ -3130,9 +3069,8 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 
 static int i915_display_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *crtc;
        struct drm_connector *connector;
 
@@ -3156,7 +3094,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
                if (pipe_config->base.active) {
                        intel_crtc_info(m, crtc);
 
-                       active = cursor_position(dev, crtc->pipe, &x, &y);
+                       active = cursor_position(dev_priv, crtc->pipe, &x, &y);
                        seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
                                   yesno(crtc->cursor_base),
                                   x, y, crtc->base.cursor->state->crtc_w,
@@ -3185,11 +3123,10 @@ static int i915_display_info(struct seq_file *m, void *unused)
 
 static int i915_semaphore_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
-       int num_rings = INTEL_INFO(dev)->num_rings;
+       int num_rings = INTEL_INFO(dev_priv)->num_rings;
        enum intel_engine_id id;
        int j, ret;
 
@@ -3203,7 +3140,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
                return ret;
        intel_runtime_pm_get(dev_priv);
 
-       if (IS_BROADWELL(dev)) {
+       if (IS_BROADWELL(dev_priv)) {
                struct page *page;
                uint64_t *seqno;
 
@@ -3258,9 +3195,8 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 
 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        int i;
 
        drm_modeset_lock_all(dev);
@@ -3288,9 +3224,8 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
        int i;
        int ret;
        struct intel_engine_cs *engine;
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct i915_workarounds *workarounds = &dev_priv->workarounds;
        enum intel_engine_id id;
 
@@ -3326,15 +3261,14 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
 
 static int i915_ddb_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct skl_ddb_allocation *ddb;
        struct skl_ddb_entry *entry;
        enum pipe pipe;
        int plane;
 
-       if (INTEL_INFO(dev)->gen < 9)
+       if (INTEL_GEN(dev_priv) < 9)
                return 0;
 
        drm_modeset_lock_all(dev);
@@ -3364,7 +3298,8 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
 }
 
 static void drrs_status_per_crtc(struct seq_file *m,
-               struct drm_device *dev, struct intel_crtc *intel_crtc)
+                                struct drm_device *dev,
+                                struct intel_crtc *intel_crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_drrs *drrs = &dev_priv->drrs;
@@ -3433,8 +3368,8 @@ static void drrs_status_per_crtc(struct seq_file *m,
 
 static int i915_drrs_status(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *intel_crtc;
        int active_crtc_cnt = 0;
 
@@ -3457,14 +3392,14 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
 
 struct pipe_crc_info {
        const char *name;
-       struct drm_device *dev;
+       struct drm_i915_private *dev_priv;
        enum pipe pipe;
 };
 
 static int i915_dp_mst_info(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_encoder *intel_encoder;
        struct intel_digital_port *intel_dig_port;
        struct drm_connector *connector;
@@ -3493,10 +3428,10 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 {
        struct pipe_crc_info *info = inode->i_private;
-       struct drm_i915_private *dev_priv = to_i915(info->dev);
+       struct drm_i915_private *dev_priv = info->dev_priv;
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
-       if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+       if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
                return -ENODEV;
 
        spin_lock_irq(&pipe_crc->lock);
@@ -3517,7 +3452,7 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
 {
        struct pipe_crc_info *info = inode->i_private;
-       struct drm_i915_private *dev_priv = to_i915(info->dev);
+       struct drm_i915_private *dev_priv = info->dev_priv;
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
        spin_lock_irq(&pipe_crc->lock);
@@ -3544,8 +3479,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
                   loff_t *pos)
 {
        struct pipe_crc_info *info = filep->private_data;
-       struct drm_device *dev = info->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = info->dev_priv;
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
        char buf[PIPE_CRC_BUFFER_LEN];
        int n_entries;
@@ -3641,11 +3575,11 @@ static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
                                enum pipe pipe)
 {
-       struct drm_device *dev = minor->dev;
+       struct drm_i915_private *dev_priv = to_i915(minor->dev);
        struct dentry *ent;
        struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
 
-       info->dev = dev;
+       info->dev_priv = dev_priv;
        ent = debugfs_create_file(info->name, S_IRUGO, root, info,
                                  &i915_pipe_crc_fops);
        if (!ent)
@@ -3675,8 +3609,7 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
 
 static int display_crc_ctl_show(struct seq_file *m, void *data)
 {
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        int i;
 
        for (i = 0; i < I915_MAX_PIPES; i++)
@@ -3688,9 +3621,7 @@ static int display_crc_ctl_show(struct seq_file *m, void *data)
 
 static int display_crc_ctl_open(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
-
-       return single_open(file, display_crc_ctl_show, dev);
+       return single_open(file, display_crc_ctl_show, inode->i_private);
 }
 
 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -3713,9 +3644,11 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
        return 0;
 }
 
-static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+                                    enum pipe pipe,
                                     enum intel_pipe_crc_source *source)
 {
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_encoder *encoder;
        struct intel_crtc *crtc;
        struct intel_digital_port *dig_port;
@@ -3765,16 +3698,15 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
        return ret;
 }
 
-static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
                                enum pipe pipe,
                                enum intel_pipe_crc_source *source,
                                uint32_t *val)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        bool need_stable_symbols = false;
 
        if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
-               int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+               int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
                if (ret)
                        return ret;
        }
@@ -3792,7 +3724,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
                need_stable_symbols = true;
                break;
        case INTEL_PIPE_CRC_SOURCE_DP_D:
-               if (!IS_CHERRYVIEW(dev))
+               if (!IS_CHERRYVIEW(dev_priv))
                        return -EINVAL;
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
                need_stable_symbols = true;
@@ -3836,16 +3768,15 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
        return 0;
 }
 
-static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
                                 enum pipe pipe,
                                 enum intel_pipe_crc_source *source,
                                 uint32_t *val)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        bool need_stable_symbols = false;
 
        if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
-               int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+               int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
                if (ret)
                        return ret;
        }
@@ -3855,24 +3786,24 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
                break;
        case INTEL_PIPE_CRC_SOURCE_TV:
-               if (!SUPPORTS_TV(dev))
+               if (!SUPPORTS_TV(dev_priv))
                        return -EINVAL;
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
                break;
        case INTEL_PIPE_CRC_SOURCE_DP_B:
-               if (!IS_G4X(dev))
+               if (!IS_G4X(dev_priv))
                        return -EINVAL;
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
                need_stable_symbols = true;
                break;
        case INTEL_PIPE_CRC_SOURCE_DP_C:
-               if (!IS_G4X(dev))
+               if (!IS_G4X(dev_priv))
                        return -EINVAL;
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
                need_stable_symbols = true;
                break;
        case INTEL_PIPE_CRC_SOURCE_DP_D:
-               if (!IS_G4X(dev))
+               if (!IS_G4X(dev_priv))
                        return -EINVAL;
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
                need_stable_symbols = true;
@@ -3896,7 +3827,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
        if (need_stable_symbols) {
                uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
-               WARN_ON(!IS_G4X(dev));
+               WARN_ON(!IS_G4X(dev_priv));
 
                I915_WRITE(PORT_DFT_I9XX,
                           I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
@@ -3912,10 +3843,9 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
        return 0;
 }
 
-static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
                                         enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
        switch (pipe) {
@@ -3937,10 +3867,9 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
 
 }
 
-static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
                                         enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
        if (pipe == PIPE_A)
@@ -3981,9 +3910,10 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
        return 0;
 }
 
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
+                                       bool enable)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *crtc =
                to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
        struct intel_crtc_state *pipe_config;
@@ -4017,7 +3947,7 @@ out:
                drm_atomic_state_free(state);
 }
 
-static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
+static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
                                enum pipe pipe,
                                enum intel_pipe_crc_source *source,
                                uint32_t *val)
@@ -4033,8 +3963,8 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
                break;
        case INTEL_PIPE_CRC_SOURCE_PF:
-               if (IS_HASWELL(dev) && pipe == PIPE_A)
-                       hsw_trans_edp_pipe_A_crc_wa(dev, true);
+               if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+                       hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
 
                *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
                break;
@@ -4048,13 +3978,14 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
        return 0;
 }
 
-static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
+                              enum pipe pipe,
                               enum intel_pipe_crc_source source)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-       struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
-                                                                       pipe));
+       struct intel_crtc *crtc =
+                       to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
        enum intel_display_power_domain power_domain;
        u32 val = 0; /* shut up gcc */
        int ret;
@@ -4072,16 +4003,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
                return -EIO;
        }
 
-       if (IS_GEN2(dev))
+       if (IS_GEN2(dev_priv))
                ret = i8xx_pipe_crc_ctl_reg(&source, &val);
-       else if (INTEL_INFO(dev)->gen < 5)
-               ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
-       else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-               ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
-       else if (IS_GEN5(dev) || IS_GEN6(dev))
+       else if (INTEL_GEN(dev_priv) < 5)
+               ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
+       else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
                ret = ilk_pipe_crc_ctl_reg(&source, &val);
        else
-               ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+               ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
 
        if (ret != 0)
                goto out;
@@ -4145,12 +4076,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
 
                kfree(entries);
 
-               if (IS_G4X(dev))
-                       g4x_undo_pipe_scramble_reset(dev, pipe);
-               else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-                       vlv_undo_pipe_scramble_reset(dev, pipe);
-               else if (IS_HASWELL(dev) && pipe == PIPE_A)
-                       hsw_trans_edp_pipe_A_crc_wa(dev, false);
+               if (IS_G4X(dev_priv))
+                       g4x_undo_pipe_scramble_reset(dev_priv, pipe);
+               else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+                       vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+               else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+                       hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
 
                hsw_enable_ips(crtc);
        }
@@ -4254,7 +4185,8 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
        return -EINVAL;
 }
 
-static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
+                                char *buf, size_t len)
 {
 #define N_WORDS 3
        int n_words;
@@ -4285,14 +4217,14 @@ static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
                return -EINVAL;
        }
 
-       return pipe_crc_set_source(dev, pipe, source);
+       return pipe_crc_set_source(dev_priv, pipe, source);
 }
 
 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
                                     size_t len, loff_t *offp)
 {
        struct seq_file *m = file->private_data;
-       struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = m->private;
        char *tmpbuf;
        int ret;
 
@@ -4315,7 +4247,7 @@ static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
        }
        tmpbuf[len] = '\0';
 
-       ret = display_crc_ctl_parse(dev, tmpbuf, len);
+       ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
 
 out:
        kfree(tmpbuf);
@@ -4336,8 +4268,8 @@ static const struct file_operations i915_display_crc_ctl_fops = {
 };
 
 static ssize_t i915_displayport_test_active_write(struct file *file,
-                                           const char __user *ubuf,
-                                           size_t len, loff_t *offp)
+                                                 const char __user *ubuf,
+                                                 size_t len, loff_t *offp)
 {
        char *input_buffer;
        int status = 0;
@@ -4367,7 +4299,6 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
        DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
 
        list_for_each_entry(connector, connector_list, head) {
-
                if (connector->connector_type !=
                    DRM_MODE_CONNECTOR_DisplayPort)
                        continue;
@@ -4405,7 +4336,6 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
        struct intel_dp *intel_dp;
 
        list_for_each_entry(connector, connector_list, head) {
-
                if (connector->connector_type !=
                    DRM_MODE_CONNECTOR_DisplayPort)
                        continue;
@@ -4425,11 +4355,12 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
 }
 
 static int i915_displayport_test_active_open(struct inode *inode,
-                                      struct file *file)
+                                            struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       return single_open(file, i915_displayport_test_active_show, dev);
+       return single_open(file, i915_displayport_test_active_show,
+                          &dev_priv->drm);
 }
 
 static const struct file_operations i915_displayport_test_active_fops = {
@@ -4449,7 +4380,6 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
        struct intel_dp *intel_dp;
 
        list_for_each_entry(connector, connector_list, head) {
-
                if (connector->connector_type !=
                    DRM_MODE_CONNECTOR_DisplayPort)
                        continue;
@@ -4465,11 +4395,12 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
        return 0;
 }
 static int i915_displayport_test_data_open(struct inode *inode,
-                                      struct file *file)
+                                          struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       return single_open(file, i915_displayport_test_data_show, dev);
+       return single_open(file, i915_displayport_test_data_show,
+                          &dev_priv->drm);
 }
 
 static const struct file_operations i915_displayport_test_data_fops = {
@@ -4488,7 +4419,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
        struct intel_dp *intel_dp;
 
        list_for_each_entry(connector, connector_list, head) {
-
                if (connector->connector_type !=
                    DRM_MODE_CONNECTOR_DisplayPort)
                        continue;
@@ -4507,9 +4437,10 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
 static int i915_displayport_test_type_open(struct inode *inode,
                                       struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       return single_open(file, i915_displayport_test_type_show, dev);
+       return single_open(file, i915_displayport_test_type_show,
+                          &dev_priv->drm);
 }
 
 static const struct file_operations i915_displayport_test_type_fops = {
@@ -4522,13 +4453,14 @@ static const struct file_operations i915_displayport_test_type_fops = {
 
 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 {
-       struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = m->private;
+       struct drm_device *dev = &dev_priv->drm;
        int level;
        int num_levels;
 
-       if (IS_CHERRYVIEW(dev))
+       if (IS_CHERRYVIEW(dev_priv))
                num_levels = 3;
-       else if (IS_VALLEYVIEW(dev))
+       else if (IS_VALLEYVIEW(dev_priv))
                num_levels = 1;
        else
                num_levels = ilk_wm_max_level(dev) + 1;
@@ -4542,8 +4474,8 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
                 * - WM1+ latency values in 0.5us units
                 * - latencies are in us on gen9/vlv/chv
                 */
-               if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
-                   IS_CHERRYVIEW(dev))
+               if (INTEL_GEN(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) ||
+                   IS_CHERRYVIEW(dev_priv))
                        latency *= 10;
                else if (level > 0)
                        latency *= 5;
@@ -4557,14 +4489,13 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 
 static int pri_wm_latency_show(struct seq_file *m, void *data)
 {
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        const uint16_t *latencies;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
        else
-               latencies = to_i915(dev)->wm.pri_latency;
+               latencies = dev_priv->wm.pri_latency;
 
        wm_latency_show(m, latencies);
 
@@ -4573,14 +4504,13 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
 
 static int spr_wm_latency_show(struct seq_file *m, void *data)
 {
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        const uint16_t *latencies;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
        else
-               latencies = to_i915(dev)->wm.spr_latency;
+               latencies = dev_priv->wm.spr_latency;
 
        wm_latency_show(m, latencies);
 
@@ -4589,14 +4519,13 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
 
 static int cur_wm_latency_show(struct seq_file *m, void *data)
 {
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        const uint16_t *latencies;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
        else
-               latencies = to_i915(dev)->wm.cur_latency;
+               latencies = dev_priv->wm.cur_latency;
 
        wm_latency_show(m, latencies);
 
@@ -4605,48 +4534,49 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
 
 static int pri_wm_latency_open(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       if (INTEL_INFO(dev)->gen < 5)
+       if (INTEL_GEN(dev_priv) < 5)
                return -ENODEV;
 
-       return single_open(file, pri_wm_latency_show, dev);
+       return single_open(file, pri_wm_latency_show, dev_priv);
 }
 
 static int spr_wm_latency_open(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       if (HAS_GMCH_DISPLAY(dev))
+       if (HAS_GMCH_DISPLAY(dev_priv))
                return -ENODEV;
 
-       return single_open(file, spr_wm_latency_show, dev);
+       return single_open(file, spr_wm_latency_show, dev_priv);
 }
 
 static int cur_wm_latency_open(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       if (HAS_GMCH_DISPLAY(dev))
+       if (HAS_GMCH_DISPLAY(dev_priv))
                return -ENODEV;
 
-       return single_open(file, cur_wm_latency_show, dev);
+       return single_open(file, cur_wm_latency_show, dev_priv);
 }
 
 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
                                size_t len, loff_t *offp, uint16_t wm[8])
 {
        struct seq_file *m = file->private_data;
-       struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = m->private;
+       struct drm_device *dev = &dev_priv->drm;
        uint16_t new[8] = { 0 };
        int num_levels;
        int level;
        int ret;
        char tmp[32];
 
-       if (IS_CHERRYVIEW(dev))
+       if (IS_CHERRYVIEW(dev_priv))
                num_levels = 3;
-       else if (IS_VALLEYVIEW(dev))
+       else if (IS_VALLEYVIEW(dev_priv))
                num_levels = 1;
        else
                num_levels = ilk_wm_max_level(dev) + 1;
@@ -4680,14 +4610,13 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
                                    size_t len, loff_t *offp)
 {
        struct seq_file *m = file->private_data;
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        uint16_t *latencies;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
        else
-               latencies = to_i915(dev)->wm.pri_latency;
+               latencies = dev_priv->wm.pri_latency;
 
        return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -4696,14 +4625,13 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
                                    size_t len, loff_t *offp)
 {
        struct seq_file *m = file->private_data;
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        uint16_t *latencies;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
        else
-               latencies = to_i915(dev)->wm.spr_latency;
+               latencies = dev_priv->wm.spr_latency;
 
        return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -4712,14 +4640,13 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
                                    size_t len, loff_t *offp)
 {
        struct seq_file *m = file->private_data;
-       struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = m->private;
        uint16_t *latencies;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                latencies = dev_priv->wm.skl_latency;
        else
-               latencies = to_i915(dev)->wm.cur_latency;
+               latencies = dev_priv->wm.cur_latency;
 
        return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -4754,8 +4681,7 @@ static const struct file_operations i915_cur_wm_latency_fops = {
 static int
 i915_wedged_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
        *val = i915_terminally_wedged(&dev_priv->gpu_error);
 
@@ -4765,8 +4691,7 @@ i915_wedged_get(void *data, u64 *val)
 static int
 i915_wedged_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
        /*
         * There is no safeguard against this debugfs entry colliding
@@ -4796,8 +4721,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
 static int
 i915_ring_missed_irq_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
        *val = dev_priv->gpu_error.missed_irq_rings;
        return 0;
@@ -4806,8 +4730,8 @@ i915_ring_missed_irq_get(void *data, u64 *val)
 static int
 i915_ring_missed_irq_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
+       struct drm_device *dev = &dev_priv->drm;
        int ret;
 
        /* Lock against concurrent debugfs callers */
@@ -4827,8 +4751,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
 static int
 i915_ring_test_irq_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
        *val = dev_priv->gpu_error.test_irq_rings;
 
@@ -4838,8 +4761,7 @@ i915_ring_test_irq_get(void *data, u64 *val)
 static int
 i915_ring_test_irq_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
        val &= INTEL_INFO(dev_priv)->ring_mask;
        DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
@@ -4871,8 +4793,8 @@ i915_drop_caches_get(void *data, u64 *val)
 static int
 i915_drop_caches_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
+       struct drm_device *dev = &dev_priv->drm;
        int ret;
 
        DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -4884,7 +4806,9 @@ i915_drop_caches_set(void *data, u64 val)
                return ret;
 
        if (val & DROP_ACTIVE) {
-               ret = i915_gem_wait_for_idle(dev_priv, true);
+               ret = i915_gem_wait_for_idle(dev_priv,
+                                            I915_WAIT_INTERRUPTIBLE |
+                                            I915_WAIT_LOCKED);
                if (ret)
                        goto unlock;
        }
@@ -4911,10 +4835,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
 static int
 i915_max_freq_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
        *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
@@ -4924,12 +4847,11 @@ i915_max_freq_get(void *data, u64 *val)
 static int
 i915_max_freq_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
        u32 hw_max, hw_min;
        int ret;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
 
        DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
@@ -4967,8 +4889,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
 static int
 i915_min_freq_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
 
        if (INTEL_GEN(dev_priv) < 6)
                return -ENODEV;
@@ -4980,8 +4901,7 @@ i915_min_freq_get(void *data, u64 *val)
 static int
 i915_min_freq_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
        u32 hw_max, hw_min;
        int ret;
 
@@ -5002,7 +4922,8 @@ i915_min_freq_set(void *data, u64 val)
        hw_max = dev_priv->rps.max_freq;
        hw_min = dev_priv->rps.min_freq;
 
-       if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
+       if (val < hw_min ||
+           val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
                mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
@@ -5023,12 +4944,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
 static int
 i915_cache_sharing_get(void *data, u64 *val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
+       struct drm_device *dev = &dev_priv->drm;
        u32 snpcr;
        int ret;
 
-       if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+       if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
                return -ENODEV;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -5039,7 +4960,7 @@ i915_cache_sharing_get(void *data, u64 *val)
        snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 
        intel_runtime_pm_put(dev_priv);
-       mutex_unlock(&dev_priv->drm.struct_mutex);
+       mutex_unlock(&dev->struct_mutex);
 
        *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
 
@@ -5049,11 +4970,10 @@ i915_cache_sharing_get(void *data, u64 *val)
 static int
 i915_cache_sharing_set(void *data, u64 val)
 {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
        u32 snpcr;
 
-       if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+       if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
                return -ENODEV;
 
        if (val > 3)
@@ -5076,18 +4996,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
                        i915_cache_sharing_get, i915_cache_sharing_set,
                        "%llu\n");
 
-struct sseu_dev_status {
-       unsigned int slice_total;
-       unsigned int subslice_total;
-       unsigned int subslice_per_slice;
-       unsigned int eu_total;
-       unsigned int eu_per_subslice;
-};
-
-static void cherryview_sseu_device_status(struct drm_device *dev,
-                                         struct sseu_dev_status *stat)
+static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
+                                         struct sseu_dev_info *sseu)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        int ss_max = 2;
        int ss;
        u32 sig1[ss_max], sig2[ss_max];
@@ -5104,28 +5015,27 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
                        /* skip disabled subslice */
                        continue;
 
-               stat->slice_total = 1;
-               stat->subslice_per_slice++;
+               sseu->slice_mask = BIT(0);
+               sseu->subslice_mask |= BIT(ss);
                eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
                         ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
                         ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
                         ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
-               stat->eu_total += eu_cnt;
-               stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
+               sseu->eu_total += eu_cnt;
+               sseu->eu_per_subslice = max_t(unsigned int,
+                                             sseu->eu_per_subslice, eu_cnt);
        }
-       stat->subslice_total = stat->subslice_per_slice;
 }
 
-static void gen9_sseu_device_status(struct drm_device *dev,
-                                   struct sseu_dev_status *stat)
+static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
+                                   struct sseu_dev_info *sseu)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        int s_max = 3, ss_max = 4;
        int s, ss;
        u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
 
        /* BXT has a single slice and at most 3 subslices. */
-       if (IS_BROXTON(dev)) {
+       if (IS_BROXTON(dev_priv)) {
                s_max = 1;
                ss_max = 3;
        }
@@ -5146,133 +5056,134 @@ static void gen9_sseu_device_status(struct drm_device *dev,
                     GEN9_PGCTL_SSB_EU311_ACK;
 
        for (s = 0; s < s_max; s++) {
-               unsigned int ss_cnt = 0;
-
                if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
                        /* skip disabled slice */
                        continue;
 
-               stat->slice_total++;
+               sseu->slice_mask |= BIT(s);
 
-               if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-                       ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
+               if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+                       sseu->subslice_mask =
+                               INTEL_INFO(dev_priv)->sseu.subslice_mask;
 
                for (ss = 0; ss < ss_max; ss++) {
                        unsigned int eu_cnt;
 
-                       if (IS_BROXTON(dev) &&
-                           !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
-                               /* skip disabled subslice */
-                               continue;
+                       if (IS_BROXTON(dev_priv)) {
+                               if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+                                       /* skip disabled subslice */
+                                       continue;
 
-                       if (IS_BROXTON(dev))
-                               ss_cnt++;
+                               sseu->subslice_mask |= BIT(ss);
+                       }
 
                        eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
                                               eu_mask[ss%2]);
-                       stat->eu_total += eu_cnt;
-                       stat->eu_per_subslice = max(stat->eu_per_subslice,
-                                                   eu_cnt);
+                       sseu->eu_total += eu_cnt;
+                       sseu->eu_per_subslice = max_t(unsigned int,
+                                                     sseu->eu_per_subslice,
+                                                     eu_cnt);
                }
-
-               stat->subslice_total += ss_cnt;
-               stat->subslice_per_slice = max(stat->subslice_per_slice,
-                                              ss_cnt);
        }
 }
 
-static void broadwell_sseu_device_status(struct drm_device *dev,
-                                        struct sseu_dev_status *stat)
+static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
+                                        struct sseu_dev_info *sseu)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int s;
        u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
+       int s;
 
-       stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
+       sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
 
-       if (stat->slice_total) {
-               stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
-               stat->subslice_total = stat->slice_total *
-                                      stat->subslice_per_slice;
-               stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
-               stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
+       if (sseu->slice_mask) {
+               sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
+               sseu->eu_per_subslice =
+                               INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
+               sseu->eu_total = sseu->eu_per_subslice *
+                                sseu_subslice_total(sseu);
 
                /* subtract fused off EU(s) from enabled slice(s) */
-               for (s = 0; s < stat->slice_total; s++) {
-                       u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
+               for (s = 0; s < fls(sseu->slice_mask); s++) {
+                       u8 subslice_7eu =
+                               INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
 
-                       stat->eu_total -= hweight8(subslice_7eu);
+                       sseu->eu_total -= hweight8(subslice_7eu);
                }
        }
 }
 
-static int i915_sseu_status(struct seq_file *m, void *unused)
+static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
+                                const struct sseu_dev_info *sseu)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
-       struct drm_device *dev = &dev_priv->drm;
-       struct sseu_dev_status stat;
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       const char *type = is_available_info ? "Available" : "Enabled";
 
-       if (INTEL_INFO(dev)->gen < 8)
-               return -ENODEV;
+       seq_printf(m, "  %s Slice Mask: %04x\n", type,
+                  sseu->slice_mask);
+       seq_printf(m, "  %s Slice Total: %u\n", type,
+                  hweight8(sseu->slice_mask));
+       seq_printf(m, "  %s Subslice Total: %u\n", type,
+                  sseu_subslice_total(sseu));
+       seq_printf(m, "  %s Subslice Mask: %04x\n", type,
+                  sseu->subslice_mask);
+       seq_printf(m, "  %s Subslice Per Slice: %u\n", type,
+                  hweight8(sseu->subslice_mask));
+       seq_printf(m, "  %s EU Total: %u\n", type,
+                  sseu->eu_total);
+       seq_printf(m, "  %s EU Per Subslice: %u\n", type,
+                  sseu->eu_per_subslice);
+
+       if (!is_available_info)
+               return;
+
+       seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
+       if (HAS_POOLED_EU(dev_priv))
+               seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
 
-       seq_puts(m, "SSEU Device Info\n");
-       seq_printf(m, "  Available Slice Total: %u\n",
-                  INTEL_INFO(dev)->slice_total);
-       seq_printf(m, "  Available Subslice Total: %u\n",
-                  INTEL_INFO(dev)->subslice_total);
-       seq_printf(m, "  Available Subslice Per Slice: %u\n",
-                  INTEL_INFO(dev)->subslice_per_slice);
-       seq_printf(m, "  Available EU Total: %u\n",
-                  INTEL_INFO(dev)->eu_total);
-       seq_printf(m, "  Available EU Per Subslice: %u\n",
-                  INTEL_INFO(dev)->eu_per_subslice);
-       seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
-       if (HAS_POOLED_EU(dev))
-               seq_printf(m, "  Min EU in pool: %u\n",
-                          INTEL_INFO(dev)->min_eu_in_pool);
        seq_printf(m, "  Has Slice Power Gating: %s\n",
-                  yesno(INTEL_INFO(dev)->has_slice_pg));
+                  yesno(sseu->has_slice_pg));
        seq_printf(m, "  Has Subslice Power Gating: %s\n",
-                  yesno(INTEL_INFO(dev)->has_subslice_pg));
+                  yesno(sseu->has_subslice_pg));
        seq_printf(m, "  Has EU Power Gating: %s\n",
-                  yesno(INTEL_INFO(dev)->has_eu_pg));
+                  yesno(sseu->has_eu_pg));
+}
+
+static int i915_sseu_status(struct seq_file *m, void *unused)
+{
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct sseu_dev_info sseu;
+
+       if (INTEL_GEN(dev_priv) < 8)
+               return -ENODEV;
+
+       seq_puts(m, "SSEU Device Info\n");
+       i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
 
        seq_puts(m, "SSEU Device Status\n");
-       memset(&stat, 0, sizeof(stat));
+       memset(&sseu, 0, sizeof(sseu));
 
        intel_runtime_pm_get(dev_priv);
 
-       if (IS_CHERRYVIEW(dev)) {
-               cherryview_sseu_device_status(dev, &stat);
-       } else if (IS_BROADWELL(dev)) {
-               broadwell_sseu_device_status(dev, &stat);
-       } else if (INTEL_INFO(dev)->gen >= 9) {
-               gen9_sseu_device_status(dev, &stat);
+       if (IS_CHERRYVIEW(dev_priv)) {
+               cherryview_sseu_device_status(dev_priv, &sseu);
+       } else if (IS_BROADWELL(dev_priv)) {
+               broadwell_sseu_device_status(dev_priv, &sseu);
+       } else if (INTEL_GEN(dev_priv) >= 9) {
+               gen9_sseu_device_status(dev_priv, &sseu);
        }
 
        intel_runtime_pm_put(dev_priv);
 
-       seq_printf(m, "  Enabled Slice Total: %u\n",
-                  stat.slice_total);
-       seq_printf(m, "  Enabled Subslice Total: %u\n",
-                  stat.subslice_total);
-       seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
-                  stat.subslice_per_slice);
-       seq_printf(m, "  Enabled EU Total: %u\n",
-                  stat.eu_total);
-       seq_printf(m, "  Enabled EU Per Subslice: %u\n",
-                  stat.eu_per_subslice);
+       i915_print_sseu_info(m, false, &sseu);
 
        return 0;
 }
 
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return 0;
 
        intel_runtime_pm_get(dev_priv);
@@ -5283,10 +5194,9 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
 
 static int i915_forcewake_release(struct inode *inode, struct file *file)
 {
-       struct drm_device *dev = inode->i_private;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = inode->i_private;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return 0;
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5303,12 +5213,11 @@ static const struct file_operations i915_forcewake_fops = {
 
 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
 {
-       struct drm_device *dev = minor->dev;
        struct dentry *ent;
 
        ent = debugfs_create_file("i915_forcewake_user",
                                  S_IRUSR,
-                                 root, dev,
+                                 root, to_i915(minor->dev),
                                  &i915_forcewake_fops);
        if (!ent)
                return -ENOMEM;
@@ -5321,12 +5230,11 @@ static int i915_debugfs_create(struct dentry *root,
                               const char *name,
                               const struct file_operations *fops)
 {
-       struct drm_device *dev = minor->dev;
        struct dentry *ent;
 
        ent = debugfs_create_file(name,
                                  S_IRUGO | S_IWUSR,
-                                 root, dev,
+                                 root, to_i915(minor->dev),
                                  fops);
        if (!ent)
                return -ENOMEM;
@@ -5413,9 +5321,8 @@ static const struct i915_debugfs_files {
        {"i915_dp_test_active", &i915_displayport_test_active_fops}
 };
 
-void intel_display_crc_init(struct drm_device *dev)
+void intel_display_crc_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
 
        for_each_pipe(dev_priv, pipe) {
@@ -5463,7 +5370,7 @@ void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
        drm_debugfs_remove_files(i915_debugfs_list,
                                 I915_DEBUGFS_ENTRIES, minor);
 
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
+       drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops,
                                 1, minor);
 
        for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
@@ -5475,7 +5382,7 @@ void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
 
        for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
                struct drm_info_list *info_list =
-                       (struct drm_info_list *) i915_debugfs_files[i].fops;
+                       (struct drm_info_list *)i915_debugfs_files[i].fops;
 
                drm_debugfs_remove_files(info_list, 1, minor);
        }
@@ -5555,6 +5462,40 @@ static const struct file_operations i915_dpcd_fops = {
        .release = single_release,
 };
 
+static int i915_panel_show(struct seq_file *m, void *data)
+{
+       struct drm_connector *connector = m->private;
+       struct intel_dp *intel_dp =
+               enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+
+       if (connector->status != connector_status_connected)
+               return -ENODEV;
+
+       seq_printf(m, "Panel power up delay: %d\n",
+                  intel_dp->panel_power_up_delay);
+       seq_printf(m, "Panel power down delay: %d\n",
+                  intel_dp->panel_power_down_delay);
+       seq_printf(m, "Backlight on delay: %d\n",
+                  intel_dp->backlight_on_delay);
+       seq_printf(m, "Backlight off delay: %d\n",
+                  intel_dp->backlight_off_delay);
+
+       return 0;
+}
+
+static int i915_panel_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, i915_panel_show, inode->i_private);
+}
+
+static const struct file_operations i915_panel_fops = {
+       .owner = THIS_MODULE,
+       .open = i915_panel_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
 /**
  * i915_debugfs_connector_add - add i915 specific connector debugfs files
  * @connector: pointer to a registered drm_connector
@@ -5574,8 +5515,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
 
        if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
            connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-               debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
-                                   &i915_dpcd_fops);
+               debugfs_create_file("i915_dpcd", S_IRUGO, root,
+                                   connector, &i915_dpcd_fops);
+
+       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+               debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+                                   connector, &i915_panel_fops);
 
        return 0;
 }
index 13ae340..7f4e8ad 100644 (file)
@@ -77,7 +77,7 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
              const char *fmt, ...)
 {
        static bool shown_bug_once;
-       struct device *dev = dev_priv->drm.dev;
+       struct device *kdev = dev_priv->drm.dev;
        bool is_error = level[1] <= KERN_ERR[1];
        bool is_debug = level[1] == KERN_DEBUG[1];
        struct va_format vaf;
@@ -91,11 +91,11 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+       dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
                   __builtin_return_address(0), &vaf);
 
        if (is_error && !shown_bug_once) {
-               dev_notice(dev, "%s", FDO_BUG_MSG);
+               dev_notice(kdev, "%s", FDO_BUG_MSG);
                shown_bug_once = true;
        }
 
@@ -232,6 +232,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
                         struct drm_file *file_priv)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        drm_i915_getparam_t *param = data;
        int value;
 
@@ -242,13 +243,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
                /* Reject all old ums/dri params. */
                return -ENODEV;
        case I915_PARAM_CHIPSET_ID:
-               value = dev->pdev->device;
+               value = pdev->device;
                break;
        case I915_PARAM_REVISION:
-               value = dev->pdev->revision;
-               break;
-       case I915_PARAM_HAS_GEM:
-               value = 1;
+               value = pdev->revision;
                break;
        case I915_PARAM_NUM_FENCES_AVAIL:
                value = dev_priv->num_fence_regs;
@@ -256,13 +254,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_OVERLAY:
                value = dev_priv->overlay ? 1 : 0;
                break;
-       case I915_PARAM_HAS_PAGEFLIPPING:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXECBUF2:
-               /* depends on GEM */
-               value = 1;
-               break;
        case I915_PARAM_HAS_BSD:
                value = intel_engine_initialized(&dev_priv->engine[VCS]);
                break;
@@ -275,67 +266,34 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_BSD2:
                value = intel_engine_initialized(&dev_priv->engine[VCS2]);
                break;
-       case I915_PARAM_HAS_RELAXED_FENCING:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_COHERENT_RINGS:
-               value = 1;
-               break;
        case I915_PARAM_HAS_EXEC_CONSTANTS:
-               value = INTEL_INFO(dev)->gen >= 4;
-               break;
-       case I915_PARAM_HAS_RELAXED_DELTA:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_GEN7_SOL_RESET:
-               value = 1;
+               value = INTEL_GEN(dev_priv) >= 4;
                break;
        case I915_PARAM_HAS_LLC:
-               value = HAS_LLC(dev);
+               value = HAS_LLC(dev_priv);
                break;
        case I915_PARAM_HAS_WT:
-               value = HAS_WT(dev);
+               value = HAS_WT(dev_priv);
                break;
        case I915_PARAM_HAS_ALIASING_PPGTT:
-               value = USES_PPGTT(dev);
-               break;
-       case I915_PARAM_HAS_WAIT_TIMEOUT:
-               value = 1;
+               value = USES_PPGTT(dev_priv);
                break;
        case I915_PARAM_HAS_SEMAPHORES:
                value = i915.semaphores;
                break;
-       case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
-               value = 1;
-               break;
        case I915_PARAM_HAS_SECURE_BATCHES:
                value = capable(CAP_SYS_ADMIN);
                break;
-       case I915_PARAM_HAS_PINNED_BATCHES:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXEC_NO_RELOC:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXEC_HANDLE_LUT:
-               value = 1;
-               break;
        case I915_PARAM_CMD_PARSER_VERSION:
                value = i915_cmd_parser_get_version(dev_priv);
                break;
-       case I915_PARAM_HAS_COHERENT_PHYS_GTT:
-               value = 1;
-               break;
-       case I915_PARAM_MMAP_VERSION:
-               value = 1;
-               break;
        case I915_PARAM_SUBSLICE_TOTAL:
-               value = INTEL_INFO(dev)->subslice_total;
+               value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
                if (!value)
                        return -ENODEV;
                break;
        case I915_PARAM_EU_TOTAL:
-               value = INTEL_INFO(dev)->eu_total;
+               value = INTEL_INFO(dev_priv)->sseu.eu_total;
                if (!value)
                        return -ENODEV;
                break;
@@ -343,16 +301,43 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
                break;
        case I915_PARAM_HAS_RESOURCE_STREAMER:
-               value = HAS_RESOURCE_STREAMER(dev);
-               break;
-       case I915_PARAM_HAS_EXEC_SOFTPIN:
-               value = 1;
+               value = HAS_RESOURCE_STREAMER(dev_priv);
                break;
        case I915_PARAM_HAS_POOLED_EU:
-               value = HAS_POOLED_EU(dev);
+               value = HAS_POOLED_EU(dev_priv);
                break;
        case I915_PARAM_MIN_EU_IN_POOL:
-               value = INTEL_INFO(dev)->min_eu_in_pool;
+               value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
+               break;
+       case I915_PARAM_MMAP_GTT_VERSION:
+               /* Though we've started our numbering from 1, and so class all
+                * earlier versions as 0, in effect their value is undefined as
+                * the ioctl will report EINVAL for the unknown param!
+                */
+               value = i915_gem_mmap_gtt_version();
+               break;
+       case I915_PARAM_MMAP_VERSION:
+               /* Remember to bump this if the version changes! */
+       case I915_PARAM_HAS_GEM:
+       case I915_PARAM_HAS_PAGEFLIPPING:
+       case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
+       case I915_PARAM_HAS_RELAXED_FENCING:
+       case I915_PARAM_HAS_COHERENT_RINGS:
+       case I915_PARAM_HAS_RELAXED_DELTA:
+       case I915_PARAM_HAS_GEN7_SOL_RESET:
+       case I915_PARAM_HAS_WAIT_TIMEOUT:
+       case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+       case I915_PARAM_HAS_PINNED_BATCHES:
+       case I915_PARAM_HAS_EXEC_NO_RELOC:
+       case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+       case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+       case I915_PARAM_HAS_EXEC_SOFTPIN:
+               /* For the time being all of these are always true;
+                * if some supported hardware does not have one of these
+                * features this value needs to be provided from
+                * INTEL_INFO(), a feature macro, or similar.
+                */
+               value = 1;
                break;
        default:
                DRM_DEBUG("Unknown parameter %d\n", param->param);
@@ -516,7 +501,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
                pr_info("switched on\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                /* i915 resume handler doesn't set to D0 */
-               pci_set_power_state(dev->pdev, PCI_D0);
+               pci_set_power_state(pdev, PCI_D0);
                i915_resume_switcheroo(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
@@ -574,7 +559,6 @@ static void i915_gem_fini(struct drm_device *dev)
        }
 
        mutex_lock(&dev->struct_mutex);
-       i915_gem_reset(dev);
        i915_gem_cleanup_engines(dev);
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
@@ -585,6 +569,7 @@ static void i915_gem_fini(struct drm_device *dev)
 static int i915_load_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        int ret;
 
        if (i915_inject_load_failure())
@@ -601,13 +586,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
         * then we do not take part in VGA arbitration and the
         * vga_client_register() fails with -ENODEV.
         */
-       ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+       ret = vga_client_register(pdev, dev, NULL, i915_vga_set_decode);
        if (ret && ret != -ENODEV)
                goto out;
 
        intel_register_dsm_handler();
 
-       ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
+       ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
        if (ret)
                goto cleanup_vga_client;
 
@@ -659,9 +644,9 @@ cleanup_irq:
 cleanup_csr:
        intel_csr_ucode_fini(dev_priv);
        intel_power_domains_fini(dev_priv);
-       vga_switcheroo_unregister_client(dev->pdev);
+       vga_switcheroo_unregister_client(pdev);
 cleanup_vga_client:
-       vga_client_register(dev->pdev, NULL, NULL, NULL);
+       vga_client_register(pdev, NULL, NULL, NULL);
 out:
        return ret;
 }
@@ -849,7 +834,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
        intel_init_audio_hooks(dev_priv);
        i915_gem_load_init(&dev_priv->drm);
 
-       intel_display_crc_init(&dev_priv->drm);
+       intel_display_crc_init(dev_priv);
 
        intel_device_info_dump(dev_priv);
 
@@ -881,6 +866,7 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
 static int i915_mmio_setup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        int mmio_bar;
        int mmio_size;
 
@@ -897,7 +883,7 @@ static int i915_mmio_setup(struct drm_device *dev)
                mmio_size = 512 * 1024;
        else
                mmio_size = 2 * 1024 * 1024;
-       dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+       dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
        if (dev_priv->regs == NULL) {
                DRM_ERROR("failed to map registers\n");
 
@@ -913,9 +899,10 @@ static int i915_mmio_setup(struct drm_device *dev)
 static void i915_mmio_cleanup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
 
        intel_teardown_mchbar(dev);
-       pci_iounmap(dev->pdev, dev_priv->regs);
+       pci_iounmap(pdev, dev_priv->regs);
 }
 
 /**
@@ -994,6 +981,7 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
  */
 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 {
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct drm_device *dev = &dev_priv->drm;
        int ret;
 
@@ -1032,11 +1020,11 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
                goto out_ggtt;
        }
 
-       pci_set_master(dev->pdev);
+       pci_set_master(pdev);
 
        /* overlay on gen2 is broken and can't address above 1G */
        if (IS_GEN2(dev)) {
-               ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+               ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
                if (ret) {
                        DRM_ERROR("failed to set DMA mask\n");
 
@@ -1053,7 +1041,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
         * which also needs to be handled carefully.
         */
        if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
-               ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+               ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 
                if (ret) {
                        DRM_ERROR("failed to set DMA mask\n");
@@ -1083,7 +1071,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
         * stuck interrupts on some machines.
         */
        if (!IS_I945G(dev) && !IS_I945GM(dev)) {
-               if (pci_enable_msi(dev->pdev) < 0)
+               if (pci_enable_msi(pdev) < 0)
                        DRM_DEBUG_DRIVER("can't enable MSI");
        }
 
@@ -1101,10 +1089,10 @@ out_ggtt:
  */
 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = &dev_priv->drm;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
 
-       if (dev->pdev->msi_enabled)
-               pci_disable_msi(dev->pdev);
+       if (pdev->msi_enabled)
+               pci_disable_msi(pdev);
 
        pm_qos_remove_request(&dev_priv->pm_qos);
        i915_ggtt_cleanup_hw(dev_priv);
@@ -1133,7 +1121,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
        /* Reveal our presence to userspace */
        if (drm_dev_register(dev, 0) == 0) {
                i915_debugfs_register(dev_priv);
-               i915_setup_sysfs(dev);
+               i915_setup_sysfs(dev_priv);
        } else
                DRM_ERROR("Failed to register driver for userspace access!\n");
 
@@ -1170,7 +1158,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
        acpi_video_unregister();
        intel_opregion_unregister(dev_priv);
 
-       i915_teardown_sysfs(&dev_priv->drm);
+       i915_teardown_sysfs(dev_priv);
        i915_debugfs_unregister(dev_priv);
        drm_dev_unregister(&dev_priv->drm);
 
@@ -1250,6 +1238,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        intel_runtime_pm_enable(dev_priv);
 
+       /* Everything is in place, we can now relax! */
+       DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+                driver.name, driver.major, driver.minor, driver.patchlevel,
+                driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+
        intel_runtime_pm_put(dev_priv);
 
        return 0;
@@ -1274,6 +1267,7 @@ out_free_priv:
 void i915_driver_unload(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
 
        intel_fbdev_fini(dev);
 
@@ -1302,8 +1296,8 @@ void i915_driver_unload(struct drm_device *dev)
        kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
        dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
 
-       vga_switcheroo_unregister_client(dev->pdev);
-       vga_client_register(dev->pdev, NULL, NULL, NULL);
+       vga_switcheroo_unregister_client(pdev);
+       vga_client_register(pdev, NULL, NULL, NULL);
 
        intel_csr_ucode_fini(dev_priv);
 
@@ -1400,6 +1394,7 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
 static int i915_drm_suspend(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        pci_power_t opregion_target_state;
        int error;
 
@@ -1416,11 +1411,11 @@ static int i915_drm_suspend(struct drm_device *dev)
 
        drm_kms_helper_poll_disable(dev);
 
-       pci_save_state(dev->pdev);
+       pci_save_state(pdev);
 
        error = i915_gem_suspend(dev);
        if (error) {
-               dev_err(&dev->pdev->dev,
+               dev_err(&pdev->dev,
                        "GEM idle failed, resume might fail\n");
                goto out;
        }
@@ -1462,9 +1457,10 @@ out:
        return error;
 }
 
-static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
+static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 {
-       struct drm_i915_private *dev_priv = to_i915(drm_dev);
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        bool fw_csr;
        int ret;
 
@@ -1498,7 +1494,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
                goto out;
        }
 
-       pci_disable_device(drm_dev->pdev);
+       pci_disable_device(pdev);
        /*
         * During hibernation on some platforms the BIOS may try to access
         * the device even though it's already in D3 and hang the machine. So
@@ -1512,7 +1508,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
         * Acer Aspire 1830T
         */
        if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
-               pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+               pci_set_power_state(pdev, PCI_D3hot);
 
        dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
 
@@ -1552,6 +1548,7 @@ static int i915_drm_resume(struct drm_device *dev)
        int ret;
 
        disable_rpm_wakeref_asserts(dev_priv);
+       intel_sanitize_gt_powersave(dev_priv);
 
        ret = i915_ggtt_enable_hw(dev_priv);
        if (ret)
@@ -1581,7 +1578,7 @@ static int i915_drm_resume(struct drm_device *dev)
        mutex_lock(&dev->struct_mutex);
        if (i915_gem_init_hw(dev)) {
                DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-               atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+               i915_gem_set_wedged(dev_priv);
        }
        mutex_unlock(&dev->struct_mutex);
 
@@ -1629,6 +1626,7 @@ static int i915_drm_resume(struct drm_device *dev)
 static int i915_drm_resume_early(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        int ret;
 
        /*
@@ -1651,7 +1649,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
         * the device powered we can also remove the following set power state
         * call.
         */
-       ret = pci_set_power_state(dev->pdev, PCI_D0);
+       ret = pci_set_power_state(pdev, PCI_D0);
        if (ret) {
                DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
                goto out;
@@ -1670,12 +1668,12 @@ static int i915_drm_resume_early(struct drm_device *dev)
         * depend on the device enable refcount we can't anyway depend on them
         * disabling/enabling the device.
         */
-       if (pci_enable_device(dev->pdev)) {
+       if (pci_enable_device(pdev)) {
                ret = -EIO;
                goto out;
        }
 
-       pci_set_master(dev->pdev);
+       pci_set_master(pdev);
 
        disable_rpm_wakeref_asserts(dev_priv);
 
@@ -1727,8 +1725,10 @@ int i915_resume_switcheroo(struct drm_device *dev)
  * i915_reset - reset chip after a hang
  * @dev: drm device to reset
  *
- * Reset the chip.  Useful if a hang is detected. Returns zero on successful
- * reset or otherwise an error code.
+ * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
+ * on failure.
+ *
+ * Caller must hold the struct_mutex.
  *
  * Procedure is fairly simple:
  *   - reset the chip using the reset reg
@@ -1738,29 +1738,22 @@ int i915_resume_switcheroo(struct drm_device *dev)
  *   - re-init interrupt state
  *   - re-init display
  */
-int i915_reset(struct drm_i915_private *dev_priv)
+void i915_reset(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = &dev_priv->drm;
        struct i915_gpu_error *error = &dev_priv->gpu_error;
-       unsigned reset_counter;
        int ret;
 
-       mutex_lock(&dev->struct_mutex);
+       lockdep_assert_held(&dev->struct_mutex);
 
-       /* Clear any previous failed attempts at recovery. Time to try again. */
-       atomic_andnot(I915_WEDGED, &error->reset_counter);
+       if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
+               return;
 
-       /* Clear the reset-in-progress flag and increment the reset epoch. */
-       reset_counter = atomic_inc_return(&error->reset_counter);
-       if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
-               ret = -EIO;
-               goto error;
-       }
+       /* Clear any previous failed attempts at recovery. Time to try again. */
+       __clear_bit(I915_WEDGED, &error->flags);
+       error->reset_count++;
 
        pr_notice("drm/i915: Resetting chip after gpu hang\n");
-
-       i915_gem_reset(dev);
-
        ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
        if (ret) {
                if (ret != -ENODEV)
@@ -1770,6 +1763,7 @@ int i915_reset(struct drm_i915_private *dev_priv)
                goto error;
        }
 
+       i915_gem_reset(dev_priv);
        intel_overlay_reset(dev_priv);
 
        /* Ok, now get things going again... */
@@ -1792,43 +1786,43 @@ int i915_reset(struct drm_i915_private *dev_priv)
                goto error;
        }
 
-       mutex_unlock(&dev->struct_mutex);
-
        /*
         * rps/rc6 re-init is necessary to restore state lost after the
         * reset and the re-install of gt irqs. Skip for ironlake per
         * previous concerns that it doesn't respond well to some forms
         * of re-init after reset.
         */
+       intel_sanitize_gt_powersave(dev_priv);
        intel_autoenable_gt_powersave(dev_priv);
 
-       return 0;
+wakeup:
+       wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
+       return;
 
 error:
-       atomic_or(I915_WEDGED, &error->reset_counter);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
+       i915_gem_set_wedged(dev_priv);
+       goto wakeup;
 }
 
-static int i915_pm_suspend(struct device *dev)
+static int i915_pm_suspend(struct device *kdev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       struct pci_dev *pdev = to_pci_dev(kdev);
+       struct drm_device *dev = pci_get_drvdata(pdev);
 
-       if (!drm_dev) {
-               dev_err(dev, "DRM not initialized, aborting suspend.\n");
+       if (!dev) {
+               dev_err(kdev, "DRM not initialized, aborting suspend.\n");
                return -ENODEV;
        }
 
-       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       return i915_drm_suspend(drm_dev);
+       return i915_drm_suspend(dev);
 }
 
-static int i915_pm_suspend_late(struct device *dev)
+static int i915_pm_suspend_late(struct device *kdev)
 {
-       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+       struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
        /*
         * We have a suspend ordering issue with the snd-hda driver also
@@ -1839,57 +1833,57 @@ static int i915_pm_suspend_late(struct device *dev)
         * FIXME: This should be solved with a special hdmi sink device or
         * similar so that power domains can be employed.
         */
-       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       return i915_drm_suspend_late(drm_dev, false);
+       return i915_drm_suspend_late(dev, false);
 }
 
-static int i915_pm_poweroff_late(struct device *dev)
+static int i915_pm_poweroff_late(struct device *kdev)
 {
-       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+       struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
-       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       return i915_drm_suspend_late(drm_dev, true);
+       return i915_drm_suspend_late(dev, true);
 }
 
-static int i915_pm_resume_early(struct device *dev)
+static int i915_pm_resume_early(struct device *kdev)
 {
-       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+       struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
-       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       return i915_drm_resume_early(drm_dev);
+       return i915_drm_resume_early(dev);
 }
 
-static int i915_pm_resume(struct device *dev)
+static int i915_pm_resume(struct device *kdev)
 {
-       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+       struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
-       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       return i915_drm_resume(drm_dev);
+       return i915_drm_resume(dev);
 }
 
 /* freeze: before creating the hibernation_image */
-static int i915_pm_freeze(struct device *dev)
+static int i915_pm_freeze(struct device *kdev)
 {
-       return i915_pm_suspend(dev);
+       return i915_pm_suspend(kdev);
 }
 
-static int i915_pm_freeze_late(struct device *dev)
+static int i915_pm_freeze_late(struct device *kdev)
 {
        int ret;
 
-       ret = i915_pm_suspend_late(dev);
+       ret = i915_pm_suspend_late(kdev);
        if (ret)
                return ret;
 
-       ret = i915_gem_freeze_late(dev_to_i915(dev));
+       ret = i915_gem_freeze_late(kdev_to_i915(kdev));
        if (ret)
                return ret;
 
@@ -1897,25 +1891,25 @@ static int i915_pm_freeze_late(struct device *dev)
 }
 
 /* thaw: called after creating the hibernation image, but before turning off. */
-static int i915_pm_thaw_early(struct device *dev)
+static int i915_pm_thaw_early(struct device *kdev)
 {
-       return i915_pm_resume_early(dev);
+       return i915_pm_resume_early(kdev);
 }
 
-static int i915_pm_thaw(struct device *dev)
+static int i915_pm_thaw(struct device *kdev)
 {
-       return i915_pm_resume(dev);
+       return i915_pm_resume(kdev);
 }
 
 /* restore: called after loading the hibernation image. */
-static int i915_pm_restore_early(struct device *dev)
+static int i915_pm_restore_early(struct device *kdev)
 {
-       return i915_pm_resume_early(dev);
+       return i915_pm_resume_early(kdev);
 }
 
-static int i915_pm_restore(struct device *dev)
+static int i915_pm_restore(struct device *kdev)
 {
-       return i915_pm_resume(dev);
+       return i915_pm_resume(kdev);
 }
 
 /*
@@ -2277,9 +2271,9 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
        return ret;
 }
 
-static int intel_runtime_suspend(struct device *device)
+static int intel_runtime_suspend(struct device *kdev)
 {
-       struct pci_dev *pdev = to_pci_dev(device);
+       struct pci_dev *pdev = to_pci_dev(kdev);
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
@@ -2305,7 +2299,7 @@ static int intel_runtime_suspend(struct device *device)
                 * Bump the expiration timestamp, otherwise the suspend won't
                 * be rescheduled.
                 */
-               pm_runtime_mark_last_busy(device);
+               pm_runtime_mark_last_busy(kdev);
 
                return -EAGAIN;
        }
@@ -2384,9 +2378,9 @@ static int intel_runtime_suspend(struct device *device)
        return 0;
 }
 
-static int intel_runtime_resume(struct device *device)
+static int intel_runtime_resume(struct device *kdev)
 {
-       struct pci_dev *pdev = to_pci_dev(device);
+       struct pci_dev *pdev = to_pci_dev(kdev);
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
index e606905..4dd307e 100644 (file)
@@ -70,7 +70,7 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20160822"
+#define DRIVER_DATE            "20160919"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -510,8 +510,12 @@ struct drm_i915_display_funcs {
                                         struct intel_initial_plane_config *);
        int (*crtc_compute_clock)(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state);
-       void (*crtc_enable)(struct drm_crtc *crtc);
-       void (*crtc_disable)(struct drm_crtc *crtc);
+       void (*crtc_enable)(struct intel_crtc_state *pipe_config,
+                           struct drm_atomic_state *old_state);
+       void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
+                            struct drm_atomic_state *old_state);
+       void (*update_crtcs)(struct drm_atomic_state *state,
+                            unsigned int *crtc_vblank_mask);
        void (*audio_codec_enable)(struct drm_connector *connector,
                                   struct intel_encoder *encoder,
                                   const struct drm_display_mode *adjusted_mode);
@@ -575,8 +579,6 @@ struct intel_uncore_funcs {
                                uint16_t val, bool trace);
        void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
                                uint32_t val, bool trace);
-       void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r,
-                               uint64_t val, bool trace);
 };
 
 struct intel_uncore {
@@ -637,7 +639,7 @@ struct intel_csr {
        func(is_i915g) sep \
        func(is_i945gm) sep \
        func(is_g33) sep \
-       func(need_gfx_hws) sep \
+       func(hws_needs_physical) sep \
        func(is_g4x) sep \
        func(is_pineview) sep \
        func(is_broadwater) sep \
@@ -652,6 +654,19 @@ struct intel_csr {
        func(is_kabylake) sep \
        func(is_preliminary) sep \
        func(has_fbc) sep \
+       func(has_psr) sep \
+       func(has_runtime_pm) sep \
+       func(has_csr) sep \
+       func(has_resource_streamer) sep \
+       func(has_rc6) sep \
+       func(has_rc6p) sep \
+       func(has_dp_mst) sep \
+       func(has_gmbus_irq) sep \
+       func(has_hw_contexts) sep \
+       func(has_logical_ring_contexts) sep \
+       func(has_l3_dpf) sep \
+       func(has_gmch_display) sep \
+       func(has_guc) sep \
        func(has_pipe_cxsr) sep \
        func(has_hotplug) sep \
        func(cursor_needs_physical) sep \
@@ -667,6 +682,24 @@ struct intel_csr {
 #define DEFINE_FLAG(name) u8 name:1
 #define SEP_SEMICOLON ;
 
+struct sseu_dev_info {
+       u8 slice_mask;
+       u8 subslice_mask;
+       u8 eu_total;
+       u8 eu_per_subslice;
+       u8 min_eu_in_pool;
+       /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+       u8 subslice_7eu[3];
+       u8 has_slice_pg:1;
+       u8 has_subslice_pg:1;
+       u8 has_eu_pg:1;
+};
+
+static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+       return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
+}
+
 struct intel_device_info {
        u32 display_mmio_offset;
        u16 device_id;
@@ -677,6 +710,7 @@ struct intel_device_info {
        u8 ring_mask; /* Rings supported by the HW */
        u8 num_rings;
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+       u16 ddb_size; /* in blocks */
        /* Register offsets for the various display pipes and transcoders */
        int pipe_offsets[I915_MAX_TRANSCODERS];
        int trans_offsets[I915_MAX_TRANSCODERS];
@@ -684,17 +718,7 @@ struct intel_device_info {
        int cursor_offsets[I915_MAX_PIPES];
 
        /* Slice/subslice/EU info */
-       u8 slice_total;
-       u8 subslice_total;
-       u8 subslice_per_slice;
-       u8 eu_total;
-       u8 eu_per_subslice;
-       u8 min_eu_in_pool;
-       /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
-       u8 subslice_7eu[3];
-       u8 has_slice_pg:1;
-       u8 has_subslice_pg:1;
-       u8 has_eu_pg:1;
+       struct sseu_dev_info sseu;
 
        struct color_luts {
                u16 degamma_lut_size;
@@ -1161,6 +1185,7 @@ struct intel_gen6_power_mgmt {
        bool interrupts_enabled;
        u32 pm_iir;
 
+       /* PM interrupt bits that should never be masked */
        u32 pm_intr_keep;
 
        /* Frequencies are stored in potentially platform dependent multiples.
@@ -1334,7 +1359,7 @@ struct i915_gem_mm {
        bool interruptible;
 
        /* the indicator for dispatch video commands on two BSD rings */
-       unsigned int bsd_engine_dispatch_index;
+       atomic_t bsd_engine_dispatch_index;
 
        /** Bit 6 swizzling required for X tiling */
        uint32_t bit_6_swizzle_x;
@@ -1382,9 +1407,10 @@ struct i915_gpu_error {
         * State variable controlling the reset flow and count
         *
         * This is a counter which gets incremented when reset is triggered,
-        * and again when reset has been handled. So odd values (lowest bit set)
-        * means that reset is in progress and even values that
-        * (reset_counter >> 1):th reset was successfully completed.
+        *
+        * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set
+        * meaning that any waiters holding onto the struct_mutex should
+        * relinquish the lock immediately in order for the reset to start.
         *
         * If reset is not completed succesfully, the I915_WEDGE bit is
         * set meaning that hardware is terminally sour and there is no
@@ -1399,10 +1425,11 @@ struct i915_gpu_error {
         * naturally enforces the correct ordering between the bail-out of the
         * waiter and the gpu reset work code.
         */
-       atomic_t reset_counter;
+       unsigned long reset_count;
 
-#define I915_RESET_IN_PROGRESS_FLAG    1
-#define I915_WEDGED                    (1 << 31)
+       unsigned long flags;
+#define I915_RESET_IN_PROGRESS 0
+#define I915_WEDGED            (BITS_PER_LONG - 1)
 
        /**
         * Waitqueue to signal when a hang is detected. Used to for waiters
@@ -1956,6 +1983,13 @@ struct drm_i915_private {
        struct i915_suspend_saved_registers regfile;
        struct vlv_s0ix_state vlv_s0ix_state;
 
+       enum {
+               I915_SKL_SAGV_UNKNOWN = 0,
+               I915_SKL_SAGV_DISABLED,
+               I915_SKL_SAGV_ENABLED,
+               I915_SKL_SAGV_NOT_CONTROLLED
+       } skl_sagv_status;
+
        struct {
                /*
                 * Raw watermark latency values:
@@ -2010,6 +2044,7 @@ struct drm_i915_private {
 
        /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
        struct {
+               void (*resume)(struct drm_i915_private *);
                void (*cleanup_engine)(struct intel_engine_cs *engine);
 
                /**
@@ -2057,9 +2092,9 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
        return container_of(dev, struct drm_i915_private, drm);
 }
 
-static inline struct drm_i915_private *dev_to_i915(struct device *dev)
+static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
 {
-       return to_i915(dev_get_drvdata(dev));
+       return to_i915(dev_get_drvdata(kdev));
 }
 
 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
@@ -2082,13 +2117,16 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
                for_each_if (((id__) = (engine__)->id, \
                              intel_engine_initialized(engine__)))
 
+#define __mask_next_bit(mask) ({                                       \
+       int __idx = ffs(mask) - 1;                                      \
+       mask &= ~BIT(__idx);                                            \
+       __idx;                                                          \
+})
+
 /* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, dev_priv__, mask__) \
-       for ((engine__) = &(dev_priv__)->engine[0]; \
-            (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
-            (engine__)++) \
-               for_each_if (((mask__) & intel_engine_flag(engine__)) && \
-                            intel_engine_initialized(engine__))
+#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
+       for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask;        \
+            tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
 
 enum hdmi_force_audio {
        HDMI_AUDIO_OFF_DVI = -2,        /* no aux data for HDMI-DVI converter */
@@ -2549,7 +2587,7 @@ struct drm_i915_cmd_table {
                BUILD_BUG(); \
        __p; \
 })
-#define INTEL_INFO(p)  (&__I915__(p)->info)
+#define INTEL_INFO(p)  (&__I915__(p)->info)
 #define INTEL_GEN(p)   (INTEL_INFO(p)->gen)
 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
 
@@ -2716,10 +2754,10 @@ struct drm_i915_cmd_table {
 #define HAS_EDRAM(dev)         (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
 #define HAS_WT(dev)            ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
                                 HAS_EDRAM(dev))
-#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+#define HWS_NEEDS_PHYSICAL(dev)        (INTEL_INFO(dev)->hws_needs_physical)
 
-#define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->gen >= 6)
-#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
+#define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->has_hw_contexts)
+#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->has_logical_ring_contexts)
 #define USES_PPGTT(dev)                (i915.enable_ppgtt)
 #define USES_FULL_PPGTT(dev)   (i915.enable_ppgtt >= 2)
 #define USES_FULL_48BIT_PPGTT(dev)     (i915.enable_ppgtt == 3)
@@ -2743,7 +2781,7 @@ struct drm_i915_cmd_table {
  * interrupt source and so prevents the other device from working properly.
  */
 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq)
 
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
@@ -2759,38 +2797,27 @@ struct drm_i915_cmd_table {
 
 #define HAS_IPS(dev)           (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
 
-#define HAS_DP_MST(dev)                (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
-                                INTEL_INFO(dev)->gen >= 9)
+#define HAS_DP_MST(dev)        (INTEL_INFO(dev)->has_dp_mst)
 
 #define HAS_DDI(dev)           (INTEL_INFO(dev)->has_ddi)
 #define HAS_FPGA_DBG_UNCLAIMED(dev)    (INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev)           (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
-                                IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
-                                IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-#define HAS_RUNTIME_PM(dev)    (IS_GEN6(dev) || IS_HASWELL(dev) || \
-                                IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
-                                IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
-                                IS_KABYLAKE(dev) || IS_BROXTON(dev))
-#define HAS_RC6(dev)           (INTEL_INFO(dev)->gen >= 6)
-#define HAS_RC6p(dev)          (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
-
-#define HAS_CSR(dev)   (IS_GEN9(dev))
+#define HAS_PSR(dev)           (INTEL_INFO(dev)->has_psr)
+#define HAS_RUNTIME_PM(dev)    (INTEL_INFO(dev)->has_runtime_pm)
+#define HAS_RC6(dev)           (INTEL_INFO(dev)->has_rc6)
+#define HAS_RC6p(dev)          (INTEL_INFO(dev)->has_rc6p)
+
+#define HAS_CSR(dev)   (INTEL_INFO(dev)->has_csr)
 
 /*
  * For now, anything with a GuC requires uCode loading, and then supports
  * command submission once loaded. But these are logically independent
  * properties, so we have separate macros to test them.
  */
-#define HAS_GUC(dev)           (IS_GEN9(dev))
+#define HAS_GUC(dev)           (INTEL_INFO(dev)->has_guc)
 #define HAS_GUC_UCODE(dev)     (HAS_GUC(dev))
 #define HAS_GUC_SCHED(dev)     (HAS_GUC(dev))
 
-#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
-                                   INTEL_INFO(dev)->gen >= 8)
-
-#define HAS_CORE_RING_FREQ(dev)        (INTEL_INFO(dev)->gen >= 6 && \
-                                !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
-                                !IS_BROXTON(dev))
+#define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer)
 
 #define HAS_POOLED_EU(dev)     (INTEL_INFO(dev)->has_pooled_eu)
 
@@ -2818,11 +2845,10 @@ struct drm_i915_cmd_table {
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
-#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \
-                              IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->has_gmch_display)
 
 /* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define HAS_L3_DPF(dev) (INTEL_INFO(dev)->has_l3_dpf)
 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
 
 #define GT_FREQUENCY_MULTIPLIER 50
@@ -2843,7 +2869,7 @@ extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
 extern int i915_resume_switcheroo(struct drm_device *dev);
 
 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
-                               int enable_ppgtt);
+                               int enable_ppgtt);
 
 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
 
@@ -2861,7 +2887,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 #endif
 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
-extern int i915_reset(struct drm_i915_private *dev_priv);
+extern void i915_reset(struct drm_i915_private *dev_priv);
 extern int intel_guc_reset(struct drm_i915_private *dev_priv);
 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -3197,8 +3223,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
 }
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                        struct drm_i915_gem_request *to);
 void i915_vma_move_to_active(struct i915_vma *vma,
                             struct drm_i915_gem_request *req,
                             unsigned int flags);
@@ -3207,6 +3231,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_mode_create_dumb *args);
 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
                      uint32_t handle, uint64_t *offset);
+int i915_gem_mmap_gtt_version(void);
 
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
                       struct drm_i915_gem_object *new,
@@ -3219,54 +3244,35 @@ i915_gem_find_active_request(struct intel_engine_cs *engine);
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
 
-static inline u32 i915_reset_counter(struct i915_gpu_error *error)
-{
-       return atomic_read(&error->reset_counter);
-}
-
-static inline bool __i915_reset_in_progress(u32 reset)
-{
-       return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
-}
-
-static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
-{
-       return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
-}
-
-static inline bool __i915_terminally_wedged(u32 reset)
-{
-       return unlikely(reset & I915_WEDGED);
-}
-
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
-       return __i915_reset_in_progress(i915_reset_counter(error));
+       return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags));
 }
 
-static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 {
-       return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
+       return unlikely(test_bit(I915_WEDGED, &error->flags));
 }
 
-static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
 {
-       return __i915_terminally_wedged(i915_reset_counter(error));
+       return i915_reset_in_progress(error) | i915_terminally_wedged(error);
 }
 
 static inline u32 i915_reset_count(struct i915_gpu_error *error)
 {
-       return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
+       return READ_ONCE(error->reset_count);
 }
 
-void i915_gem_reset(struct drm_device *dev);
+void i915_gem_reset(struct drm_i915_private *dev_priv);
+void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_init(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_engines(struct drm_device *dev);
 int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-                                       bool interruptible);
+                                       unsigned int flags);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void i915_gem_resume(struct drm_device *dev);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -3388,7 +3394,6 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_lost(struct drm_i915_private *dev_priv);
 void i915_gem_context_fini(struct drm_device *dev);
-void i915_gem_context_reset(struct drm_device *dev);
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 int i915_switch_context(struct drm_i915_gem_request *req);
@@ -3507,13 +3512,13 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
 int i915_debugfs_register(struct drm_i915_private *dev_priv);
 void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
 int i915_debugfs_connector_add(struct drm_connector *connector);
-void intel_display_crc_init(struct drm_device *dev);
+void intel_display_crc_init(struct drm_i915_private *dev_priv);
 #else
 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
 static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {}
 static inline int i915_debugfs_connector_add(struct drm_connector *connector)
 { return 0; }
-static inline void intel_display_crc_init(struct drm_device *dev) {}
+static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
 #endif
 
 /* i915_gpu_error.c */
@@ -3557,8 +3562,8 @@ extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
 
 /* i915_sysfs.c */
-void i915_setup_sysfs(struct drm_device *dev_priv);
-void i915_teardown_sysfs(struct drm_device *dev_priv);
+void i915_setup_sysfs(struct drm_i915_private *dev_priv);
+void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
 
 /* intel_i2c.c */
 extern int intel_setup_gmbus(struct drm_device *dev);
@@ -3735,9 +3740,16 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
  * will be implemented using 2 32-bit writes in an arbitrary order with
  * an arbitrary delay between them. This can cause the hardware to
  * act upon the intermediate value, possibly leading to corruption and
- * machine death. You have been warned.
+ * machine death. For this reason we do not support I915_WRITE64, or
+ * dev_priv->uncore.funcs.mmio_writeq.
+ *
+ * When reading a 64-bit value as two 32-bit values, the delay may cause
+ * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
+ * occasionally a 64-bit register does not actualy support a full readq
+ * and must be read using two 32-bit reads.
+ *
+ * You have been warned.
  */
-#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
 #define I915_READ64(reg)       dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
 #define I915_READ64_2x32(lower_reg, upper_reg) ({                      \
@@ -3780,7 +3792,7 @@ __raw_write(64, q)
 #undef __raw_write
 
 /* These are untraced mmio-accessors that are only valid to be used inside
- * criticial sections inside IRQ handlers where forcewake is explicitly
+ * critical sections inside IRQ handlers where forcewake is explicitly
  * controlled.
  * Think twice, and think again, before using these.
  * Note: Should only be used between intel_uncore_forcewake_irqlock() and
@@ -3852,7 +3864,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
                            schedule_timeout_uninterruptible(remaining_jiffies);
        }
 }
-static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
+
+static inline bool
+__i915_request_irq_complete(struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
 
@@ -3914,17 +3928,6 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
                        return true;
        }
 
-       /* We need to check whether any gpu reset happened in between
-        * the request being submitted and now. If a reset has occurred,
-        * the seqno will have been advance past ours and our request
-        * is complete. If we are in the process of handling a reset,
-        * the request is effectively complete as the rendering will
-        * be discarded, but we need to return in order to drop the
-        * struct_mutex.
-        */
-       if (i915_reset_in_progress(&req->i915->gpu_error))
-               return true;
-
        return false;
 }
 
index 04607d4..c8bd022 100644 (file)
@@ -386,7 +386,8 @@ __unsafe_wait_rendering(struct drm_i915_gem_object *obj,
                int ret;
 
                ret = i915_gem_active_wait_unlocked(&active[idx],
-                                                   true, NULL, rps);
+                                                   I915_WAIT_INTERRUPTIBLE,
+                                                   NULL, rps);
                if (ret)
                        return ret;
        }
@@ -1679,6 +1680,56 @@ static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
        return size >> PAGE_SHIFT;
 }
 
+/**
+ * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
+ *
+ * A history of the GTT mmap interface:
+ *
+ * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
+ *     aligned and suitable for fencing, and still fit into the available
+ *     mappable space left by the pinned display objects. A classic problem
+ *     we called the page-fault-of-doom where we would ping-pong between
+ *     two objects that could not fit inside the GTT and so the memcpy
+ *     would page one object in at the expense of the other between every
+ *     single byte.
+ *
+ * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
+ *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
+ *     object is too large for the available space (or simply too large
+ *     for the mappable aperture!), a view is created instead and faulted
+ *     into userspace. (This view is aligned and sized appropriately for
+ *     fenced access.)
+ *
+ * Restrictions:
+ *
+ *  * snoopable objects cannot be accessed via the GTT. It can cause machine
+ *    hangs on some architectures, corruption on others. An attempt to service
+ *    a GTT page fault from a snoopable object will generate a SIGBUS.
+ *
+ *  * the object must be able to fit into RAM (physical memory, though no
+ *    limited to the mappable aperture).
+ *
+ *
+ * Caveats:
+ *
+ *  * a new GTT page fault will synchronize rendering from the GPU and flush
+ *    all data to system memory. Subsequent access will not be synchronized.
+ *
+ *  * all mappings are revoked on runtime device suspend.
+ *
+ *  * there are only 8, 16 or 32 fence registers to share between all users
+ *    (older machines require fence register for display and blitter access
+ *    as well). Contention of the fence registers will cause the previous users
+ *    to be unmapped and any new access will generate new page faults.
+ *
+ *  * running out of memory while servicing a fault may generate a SIGBUS,
+ *    rather than the expected SIGSEGV.
+ */
+int i915_gem_mmap_gtt_version(void)
+{
+       return 1;
+}
+
 /**
  * i915_gem_fault - fault a page into the GTT
  * @area: CPU VMA in question
@@ -1694,6 +1745,9 @@ static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
  * from the GTT and/or fence registers to make room.  So performance may
  * suffer if the GTT working set is large or there are few fence registers
  * left.
+ *
+ * The current feature set supported by i915_gem_fault() and thus GTT mmaps
+ * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
  */
 int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 {
@@ -1973,7 +2027,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
         * to claim that space for ourselves, we need to take the big
         * struct_mutex to free the requests+objects and allocate our slot.
         */
-       err = i915_gem_wait_for_idle(dev_priv, true);
+       err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
        if (err)
                return err;
 
@@ -2495,32 +2549,94 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
                if (i915_gem_request_completed(request))
                        continue;
 
+               if (!i915_sw_fence_done(&request->submit))
+                       break;
+
                return request;
        }
 
        return NULL;
 }
 
-static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
+static void reset_request(struct drm_i915_gem_request *request)
+{
+       void *vaddr = request->ring->vaddr;
+       u32 head;
+
+       /* As this request likely depends on state from the lost
+        * context, clear out all the user operations leaving the
+        * breadcrumb at the end (so we get the fence notifications).
+        */
+       head = request->head;
+       if (request->postfix < head) {
+               memset(vaddr + head, 0, request->ring->size - head);
+               head = 0;
+       }
+       memset(vaddr + head, 0, request->postfix - head);
+}
+
+static void i915_gem_reset_engine(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
+       struct i915_gem_context *incomplete_ctx;
        bool ring_hung;
 
+       /* Ensure irq handler finishes, and not run again. */
+       tasklet_kill(&engine->irq_tasklet);
+       if (engine->irq_seqno_barrier)
+               engine->irq_seqno_barrier(engine);
+
        request = i915_gem_find_active_request(engine);
-       if (request == NULL)
+       if (!request)
                return;
 
        ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
-
        i915_set_reset_status(request->ctx, ring_hung);
+       if (!ring_hung)
+               return;
+
+       DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
+                        engine->name, request->fence.seqno);
+
+       /* Setup the CS to resume from the breadcrumb of the hung request */
+       engine->reset_hw(engine, request);
+
+       /* Users of the default context do not rely on logical state
+        * preserved between batches. They have to emit full state on
+        * every batch and so it is safe to execute queued requests following
+        * the hang.
+        *
+        * Other contexts preserve state, now corrupt. We want to skip all
+        * queued requests that reference the corrupt context.
+        */
+       incomplete_ctx = request->ctx;
+       if (i915_gem_context_is_default(incomplete_ctx))
+               return;
+
        list_for_each_entry_continue(request, &engine->request_list, link)
-               i915_set_reset_status(request->ctx, false);
+               if (request->ctx == incomplete_ctx)
+                       reset_request(request);
 }
 
-static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
+void i915_gem_reset(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_gem_request *request;
-       struct intel_ring *ring;
+       struct intel_engine_cs *engine;
+
+       i915_gem_retire_requests(dev_priv);
+
+       for_each_engine(engine, dev_priv)
+               i915_gem_reset_engine(engine);
+
+       i915_gem_restore_fences(&dev_priv->drm);
+}
+
+static void nop_submit_request(struct drm_i915_gem_request *request)
+{
+}
+
+static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
+{
+       engine->submit_request = nop_submit_request;
 
        /* Mark all pending requests as complete so that any concurrent
         * (lockless) lookup doesn't try and wait upon the request as we
@@ -2535,60 +2651,30 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
         */
 
        if (i915.enable_execlists) {
-               /* Ensure irq handler finishes or is cancelled. */
-               tasklet_kill(&engine->irq_tasklet);
-
-               intel_execlists_cancel_requests(engine);
-       }
-
-       /*
-        * We must free the requests after all the corresponding objects have
-        * been moved off active lists. Which is the same order as the normal
-        * retire_requests function does. This is important if object hold
-        * implicit references on things like e.g. ppgtt address spaces through
-        * the request.
-        */
-       request = i915_gem_active_raw(&engine->last_request,
-                                     &engine->i915->drm.struct_mutex);
-       if (request)
-               i915_gem_request_retire_upto(request);
-       GEM_BUG_ON(intel_engine_is_active(engine));
-
-       /* Having flushed all requests from all queues, we know that all
-        * ringbuffers must now be empty. However, since we do not reclaim
-        * all space when retiring the request (to prevent HEADs colliding
-        * with rapid ringbuffer wraparound) the amount of available space
-        * upon reset is less than when we start. Do one more pass over
-        * all the ringbuffers to reset last_retired_head.
-        */
-       list_for_each_entry(ring, &engine->buffers, link) {
-               ring->last_retired_head = ring->tail;
-               intel_ring_update_space(ring);
+               spin_lock(&engine->execlist_lock);
+               INIT_LIST_HEAD(&engine->execlist_queue);
+               i915_gem_request_put(engine->execlist_port[0].request);
+               i915_gem_request_put(engine->execlist_port[1].request);
+               memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
+               spin_unlock(&engine->execlist_lock);
        }
 
        engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
 }
 
-void i915_gem_reset(struct drm_device *dev)
+void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
-       /*
-        * Before we free the objects from the requests, we need to inspect
-        * them for finding the guilty party. As the requests only borrow
-        * their reference to the objects, the inspection must be done first.
-        */
-       for_each_engine(engine, dev_priv)
-               i915_gem_reset_engine_status(engine);
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
 
+       i915_gem_context_lost(dev_priv);
        for_each_engine(engine, dev_priv)
-               i915_gem_reset_engine_cleanup(engine);
+               i915_gem_cleanup_engine(engine);
        mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
 
-       i915_gem_context_reset(dev);
-
-       i915_gem_restore_fences(dev);
+       i915_gem_retire_requests(dev_priv);
 }
 
 static void
@@ -2721,7 +2807,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        active = __I915_BO_ACTIVE(obj);
        for_each_active(active, idx) {
                s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
-               ret = i915_gem_active_wait_unlocked(&obj->last_read[idx], true,
+               ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
+                                                   I915_WAIT_INTERRUPTIBLE,
                                                    timeout, rps);
                if (ret)
                        break;
@@ -2731,96 +2818,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        return ret;
 }
 
-static int
-__i915_gem_object_sync(struct drm_i915_gem_request *to,
-                      struct drm_i915_gem_request *from)
-{
-       int ret;
-
-       if (to->engine == from->engine)
-               return 0;
-
-       if (!i915.semaphores) {
-               ret = i915_wait_request(from,
-                                       from->i915->mm.interruptible,
-                                       NULL,
-                                       NO_WAITBOOST);
-               if (ret)
-                       return ret;
-       } else {
-               int idx = intel_engine_sync_index(from->engine, to->engine);
-               if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
-                       return 0;
-
-               trace_i915_gem_ring_sync_to(to, from);
-               ret = to->engine->semaphore.sync_to(to, from);
-               if (ret)
-                       return ret;
-
-               from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
-       }
-
-       return 0;
-}
-
-/**
- * i915_gem_object_sync - sync an object to a ring.
- *
- * @obj: object which may be in use on another ring.
- * @to: request we are wishing to use
- *
- * This code is meant to abstract object synchronization with the GPU.
- * Conceptually we serialise writes between engines inside the GPU.
- * We only allow one engine to write into a buffer at any time, but
- * multiple readers. To ensure each has a coherent view of memory, we must:
- *
- * - If there is an outstanding write request to the object, the new
- *   request must wait for it to complete (either CPU or in hw, requests
- *   on the same ring will be naturally ordered).
- *
- * - If we are a write request (pending_write_domain is set), the new
- *   request must wait for outstanding read requests to complete.
- *
- * Returns 0 if successful, else propagates up the lower layer error.
- */
-int
-i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                    struct drm_i915_gem_request *to)
-{
-       struct i915_gem_active *active;
-       unsigned long active_mask;
-       int idx;
-
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
-
-       active_mask = i915_gem_object_get_active(obj);
-       if (!active_mask)
-               return 0;
-
-       if (obj->base.pending_write_domain) {
-               active = obj->last_read;
-       } else {
-               active_mask = 1;
-               active = &obj->last_write;
-       }
-
-       for_each_active(active_mask, idx) {
-               struct drm_i915_gem_request *request;
-               int ret;
-
-               request = i915_gem_active_peek(&active[idx],
-                                              &obj->base.dev->struct_mutex);
-               if (!request)
-                       continue;
-
-               ret = __i915_gem_object_sync(to, request);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
 static void __i915_vma_iounmap(struct i915_vma *vma)
 {
        GEM_BUG_ON(i915_vma_is_pinned(vma));
@@ -2924,7 +2921,7 @@ destroy:
 }
 
 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-                          bool interruptible)
+                          unsigned int flags)
 {
        struct intel_engine_cs *engine;
        int ret;
@@ -2933,7 +2930,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
                if (engine->last_context == NULL)
                        continue;
 
-               ret = intel_engine_idle(engine, interruptible);
+               ret = intel_engine_idle(engine, flags);
                if (ret)
                        return ret;
        }
@@ -3688,7 +3685,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        if (target == NULL)
                return 0;
 
-       ret = i915_wait_request(target, true, NULL, NULL);
+       ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
        i915_gem_request_put(target);
 
        return ret;
@@ -4244,7 +4241,9 @@ int i915_gem_suspend(struct drm_device *dev)
        if (ret)
                goto err;
 
-       ret = i915_gem_wait_for_idle(dev_priv, true);
+       ret = i915_gem_wait_for_idle(dev_priv,
+                                    I915_WAIT_INTERRUPTIBLE |
+                                    I915_WAIT_LOCKED);
        if (ret)
                goto err;
 
@@ -4280,8 +4279,7 @@ void i915_gem_resume(struct drm_device *dev)
         * guarantee that the context image is complete. So let's just reset
         * it and start again.
         */
-       if (i915.enable_execlists)
-               intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
+       dev_priv->gt.resume(dev_priv);
 
        mutex_unlock(&dev->struct_mutex);
 }
@@ -4433,8 +4431,10 @@ int i915_gem_init(struct drm_device *dev)
        mutex_lock(&dev->struct_mutex);
 
        if (!i915.enable_execlists) {
+               dev_priv->gt.resume = intel_legacy_submission_resume;
                dev_priv->gt.cleanup_engine = intel_engine_cleanup;
        } else {
+               dev_priv->gt.resume = intel_lr_context_resume;
                dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
        }
 
@@ -4467,7 +4467,7 @@ int i915_gem_init(struct drm_device *dev)
                 * for all other failure, such as an allocation failure, bail.
                 */
                DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
-               atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+               i915_gem_set_wedged(dev_priv);
                ret = 0;
        }
 
@@ -4569,6 +4569,8 @@ i915_gem_load_init(struct drm_device *dev)
 
        dev_priv->mm.interruptible = true;
 
+       atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
+
        spin_lock_init(&dev_priv->fb_tracking.lock);
 }
 
@@ -4587,6 +4589,11 @@ void i915_gem_load_cleanup(struct drm_device *dev)
 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
 {
        struct drm_i915_gem_object *obj;
+       struct list_head *phases[] = {
+               &dev_priv->mm.unbound_list,
+               &dev_priv->mm.bound_list,
+               NULL
+       }, **p;
 
        /* Called just before we write the hibernation image.
         *
@@ -4597,16 +4604,18 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
         *
         * To make sure the hibernation image contains the latest state,
         * we update that state just before writing out the image.
+        *
+        * To try and reduce the hibernation image, we manually shrink
+        * the objects as well.
         */
 
-       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
-               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-       }
+       i915_gem_shrink_all(dev_priv);
 
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       for (p = phases; *p; p++) {
+               list_for_each_entry(obj, *p, global_list) {
+                       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+                       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+               }
        }
 
        return 0;
index 35950ee..df10f4e 100644 (file)
@@ -420,22 +420,6 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
        }
 }
 
-void i915_gem_context_reset(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       lockdep_assert_held(&dev->struct_mutex);
-
-       if (i915.enable_execlists) {
-               struct i915_gem_context *ctx;
-
-               list_for_each_entry(ctx, &dev_priv->context_list, link)
-                       intel_lr_context_reset(dev_priv, ctx);
-       }
-
-       i915_gem_context_lost(dev_priv);
-}
-
 int i915_gem_context_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
index 815d5fb..5b6f81c 100644 (file)
@@ -170,7 +170,9 @@ search_again:
        if (ret)
                return ret;
 
-       ret = i915_gem_wait_for_idle(dev_priv, true);
+       ret = i915_gem_wait_for_idle(dev_priv,
+                                    I915_WAIT_INTERRUPTIBLE |
+                                    I915_WAIT_LOCKED);
        if (ret)
                return ret;
 
@@ -275,7 +277,9 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
                                return ret;
                }
 
-               ret = i915_gem_wait_for_idle(dev_priv, true);
+               ret = i915_gem_wait_for_idle(dev_priv,
+                                            I915_WAIT_INTERRUPTIBLE |
+                                            I915_WAIT_LOCKED);
                if (ret)
                        return ret;
 
index 601156c..33c8522 100644 (file)
@@ -1131,13 +1131,25 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 
        list_for_each_entry(vma, vmas, exec_list) {
                struct drm_i915_gem_object *obj = vma->obj;
+               struct reservation_object *resv;
 
                if (obj->flags & other_rings) {
-                       ret = i915_gem_object_sync(obj, req);
+                       ret = i915_gem_request_await_object
+                               (req, obj, obj->base.pending_write_domain);
                        if (ret)
                                return ret;
                }
 
+               resv = i915_gem_object_get_dmabuf_resv(obj);
+               if (resv) {
+                       ret = i915_sw_fence_await_reservation
+                               (&req->submit, resv, &i915_fence_ops,
+                                obj->base.pending_write_domain, 10*HZ,
+                                GFP_KERNEL | __GFP_NOWARN);
+                       if (ret < 0)
+                               return ret;
+               }
+
                if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
                        i915_gem_clflush_object(obj, false);
        }
@@ -1253,12 +1265,9 @@ static struct i915_gem_context *
 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
                          struct intel_engine_cs *engine, const u32 ctx_id)
 {
-       struct i915_gem_context *ctx = NULL;
+       struct i915_gem_context *ctx;
        struct i915_ctx_hang_stats *hs;
 
-       if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
-               return ERR_PTR(-EINVAL);
-
        ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
        if (IS_ERR(ctx))
                return ctx;
@@ -1538,13 +1547,9 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
        /* Check whether the file_priv has already selected one ring. */
-       if ((int)file_priv->bsd_engine < 0) {
-               /* If not, use the ping-pong mechanism to select one. */
-               mutex_lock(&dev_priv->drm.struct_mutex);
-               file_priv->bsd_engine = dev_priv->mm.bsd_engine_dispatch_index;
-               dev_priv->mm.bsd_engine_dispatch_index ^= 1;
-               mutex_unlock(&dev_priv->drm.struct_mutex);
-       }
+       if ((int)file_priv->bsd_engine < 0)
+               file_priv->bsd_engine = atomic_fetch_xor(1,
+                        &dev_priv->mm.bsd_engine_dispatch_index);
 
        return file_priv->bsd_engine;
 }
index b90fdce..0bb4232 100644 (file)
@@ -32,6 +32,8 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
+
 /**
  * DOC: Global GTT views
  *
@@ -122,8 +124,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
        has_full_48bit_ppgtt =
                IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
 
-       if (intel_vgpu_active(dev_priv))
-               has_full_ppgtt = false; /* emulation is too hard */
+       if (intel_vgpu_active(dev_priv)) {
+               /* emulation is too hard */
+               has_full_ppgtt = false;
+               has_full_48bit_ppgtt = false;
+       }
 
        if (!has_aliasing_ppgtt)
                return 0;
@@ -158,7 +163,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                return 0;
        }
 
-       if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
+       if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
                return has_full_48bit_ppgtt ? 3 : 2;
        else
                return has_aliasing_ppgtt ? 1 : 0;
@@ -326,16 +331,16 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
 static int __setup_page_dma(struct drm_device *dev,
                            struct i915_page_dma *p, gfp_t flags)
 {
-       struct device *device = &dev->pdev->dev;
+       struct device *kdev = &dev->pdev->dev;
 
        p->page = alloc_page(flags);
        if (!p->page)
                return -ENOMEM;
 
-       p->daddr = dma_map_page(device,
+       p->daddr = dma_map_page(kdev,
                                p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(device, p->daddr)) {
+       if (dma_mapping_error(kdev, p->daddr)) {
                __free_page(p->page);
                return -EINVAL;
        }
@@ -345,15 +350,17 @@ static int __setup_page_dma(struct drm_device *dev,
 
 static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
-       return __setup_page_dma(dev, p, GFP_KERNEL);
+       return __setup_page_dma(dev, p, I915_GFP_DMA);
 }
 
 static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
+       struct pci_dev *pdev = dev->pdev;
+
        if (WARN_ON(!p->page))
                return;
 
-       dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
        __free_page(p->page);
        memset(p, 0, sizeof(*p));
 }
@@ -407,33 +414,18 @@ static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
        fill_page_dma(dev, p, v);
 }
 
-static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
+static int
+setup_scratch_page(struct drm_device *dev,
+                  struct i915_page_dma *scratch,
+                  gfp_t gfp)
 {
-       struct i915_page_scratch *sp;
-       int ret;
-
-       sp = kzalloc(sizeof(*sp), GFP_KERNEL);
-       if (sp == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
-       if (ret) {
-               kfree(sp);
-               return ERR_PTR(ret);
-       }
-
-       set_pages_uc(px_page(sp), 1);
-
-       return sp;
+       return __setup_page_dma(dev, scratch, gfp | __GFP_ZERO);
 }
 
-static void free_scratch_page(struct drm_device *dev,
-                             struct i915_page_scratch *sp)
+static void cleanup_scratch_page(struct drm_device *dev,
+                                struct i915_page_dma *scratch)
 {
-       set_pages_wb(px_page(sp), 1);
-
-       cleanup_px(dev, sp);
-       kfree(sp);
+       cleanup_page_dma(dev, scratch);
 }
 
 static struct i915_page_table *alloc_pt(struct drm_device *dev)
@@ -479,7 +471,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
 {
        gen8_pte_t scratch_pte;
 
-       scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+       scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
                                      I915_CACHE_LLC, true);
 
        fill_px(vm->dev, pt, scratch_pte);
@@ -490,9 +482,9 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
 {
        gen6_pte_t scratch_pte;
 
-       WARN_ON(px_dma(vm->scratch_page) == 0);
+       WARN_ON(vm->scratch_page.daddr == 0);
 
-       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+       scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
                                     I915_CACHE_LLC, true, 0);
 
        fill32_px(vm->dev, pt, scratch_pte);
@@ -776,7 +768,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
                                   bool use_scratch)
 {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-       gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+       gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
                                                 I915_CACHE_LLC, use_scratch);
 
        if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
@@ -882,9 +874,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
        struct drm_device *dev = vm->dev;
        int ret;
 
-       vm->scratch_page = alloc_scratch_page(dev);
-       if (IS_ERR(vm->scratch_page))
-               return PTR_ERR(vm->scratch_page);
+       ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+       if (ret)
+               return ret;
 
        vm->scratch_pt = alloc_pt(dev);
        if (IS_ERR(vm->scratch_pt)) {
@@ -918,7 +910,7 @@ free_pd:
 free_pt:
        free_pt(dev, vm->scratch_pt);
 free_scratch_page:
-       free_scratch_page(dev, vm->scratch_page);
+       cleanup_scratch_page(dev, &vm->scratch_page);
 
        return ret;
 }
@@ -962,7 +954,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
                free_pdp(dev, vm->scratch_pdp);
        free_pd(dev, vm->scratch_pd);
        free_pt(dev, vm->scratch_pt);
-       free_scratch_page(dev, vm->scratch_page);
+       cleanup_scratch_page(dev, &vm->scratch_page);
 }
 
 static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
@@ -1459,7 +1451,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
        struct i915_address_space *vm = &ppgtt->base;
        uint64_t start = ppgtt->base.start;
        uint64_t length = ppgtt->base.total;
-       gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+       gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
                                                 I915_CACHE_LLC, true);
 
        if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
@@ -1576,7 +1568,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
        uint32_t  pte, pde;
        uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
 
-       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+       scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
                                     I915_CACHE_LLC, true, 0);
 
        gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
@@ -1801,7 +1793,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
        unsigned first_pte = first_entry % GEN6_PTES;
        unsigned last_pte, i;
 
-       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+       scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
                                     I915_CACHE_LLC, true, 0);
 
        while (num_entries) {
@@ -1947,14 +1939,15 @@ unwind_out:
 static int gen6_init_scratch(struct i915_address_space *vm)
 {
        struct drm_device *dev = vm->dev;
+       int ret;
 
-       vm->scratch_page = alloc_scratch_page(dev);
-       if (IS_ERR(vm->scratch_page))
-               return PTR_ERR(vm->scratch_page);
+       ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+       if (ret)
+               return ret;
 
        vm->scratch_pt = alloc_pt(dev);
        if (IS_ERR(vm->scratch_pt)) {
-               free_scratch_page(dev, vm->scratch_page);
+               cleanup_scratch_page(dev, &vm->scratch_page);
                return PTR_ERR(vm->scratch_pt);
        }
 
@@ -1968,7 +1961,7 @@ static void gen6_free_scratch(struct i915_address_space *vm)
        struct drm_device *dev = vm->dev;
 
        free_pt(dev, vm->scratch_pt);
-       free_scratch_page(dev, vm->scratch_page);
+       cleanup_scratch_page(dev, &vm->scratch_page);
 }
 
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -2311,12 +2304,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 
 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
 {
-#ifdef writeq
        writeq(pte, addr);
-#else
-       iowrite32((u32)pte, addr);
-       iowrite32(pte >> 32, addr + 4);
-#endif
 }
 
 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
@@ -2509,7 +2497,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+       scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
                                      I915_CACHE_LLC,
                                      use_scratch);
        for (i = 0; i < num_entries; i++)
@@ -2541,7 +2529,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+       scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
                                     I915_CACHE_LLC, use_scratch, 0);
 
        for (i = 0; i < num_entries; i++)
@@ -2685,19 +2673,19 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
 
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       struct device *kdev = &dev_priv->drm.pdev->dev;
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (unlikely(ggtt->do_idle_maps)) {
-               if (i915_gem_wait_for_idle(dev_priv, false)) {
+               if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
                        DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
                }
        }
 
-       dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
+       dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
                     PCI_DMA_BIDIRECTIONAL);
 }
 
@@ -2894,8 +2882,8 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 {
        struct pci_dev *pdev = ggtt->base.dev->pdev;
-       struct i915_page_scratch *scratch_page;
        phys_addr_t phys_addr;
+       int ret;
 
        /* For Modern GENs the PTEs and register space are split in the BAR */
        phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
@@ -2916,16 +2904,16 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
                return -ENOMEM;
        }
 
-       scratch_page = alloc_scratch_page(ggtt->base.dev);
-       if (IS_ERR(scratch_page)) {
+       ret = setup_scratch_page(ggtt->base.dev,
+                                &ggtt->base.scratch_page,
+                                GFP_DMA32);
+       if (ret) {
                DRM_ERROR("Scratch setup failed\n");
                /* iounmap will also get called at remove, but meh */
                iounmap(ggtt->gsm);
-               return PTR_ERR(scratch_page);
+               return ret;
        }
 
-       ggtt->base.scratch_page = scratch_page;
-
        return 0;
 }
 
@@ -3007,7 +2995,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 
        iounmap(ggtt->gsm);
-       free_scratch_page(vm->dev, vm->scratch_page);
+       cleanup_scratch_page(vm->dev, &vm->scratch_page);
 }
 
 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -3244,8 +3232,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct drm_i915_gem_object *obj;
-       struct i915_vma *vma;
+       struct drm_i915_gem_object *obj, *on;
 
        i915_check_and_clear_faults(dev_priv);
 
@@ -3253,20 +3240,32 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
        ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
                               true);
 
-       /* Cache flush objects bound into GGTT and rebind them. */
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+       ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+
+       /* clflush objects bound into the GGTT and rebind them. */
+       list_for_each_entry_safe(obj, on,
+                                &dev_priv->mm.bound_list, global_list) {
+               bool ggtt_bound = false;
+               struct i915_vma *vma;
+
                list_for_each_entry(vma, &obj->vma_list, obj_link) {
                        if (vma->vm != &ggtt->base)
                                continue;
 
+                       if (!i915_vma_unbind(vma))
+                               continue;
+
                        WARN_ON(i915_vma_bind(vma, obj->cache_level,
                                              PIN_UPDATE));
+                       ggtt_bound = true;
                }
 
-               if (obj->pin_display)
+               if (ggtt_bound)
                        WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
        }
 
+       ggtt->base.closed = false;
+
        if (INTEL_INFO(dev)->gen >= 8) {
                if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
                        chv_setup_private_ppat(dev_priv);
index a9aec25..ec78be2 100644 (file)
@@ -312,10 +312,6 @@ struct i915_page_dma {
 #define px_page(px) (px_base(px)->page)
 #define px_dma(px) (px_base(px)->daddr)
 
-struct i915_page_scratch {
-       struct i915_page_dma base;
-};
-
 struct i915_page_table {
        struct i915_page_dma base;
 
@@ -361,7 +357,7 @@ struct i915_address_space {
 
        bool closed;
 
-       struct i915_page_scratch *scratch_page;
+       struct i915_page_dma scratch_page;
        struct i915_page_table *scratch_pt;
        struct i915_page_directory *scratch_pd;
        struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
index 1a21532..40978bc 100644 (file)
@@ -233,16 +233,18 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
        } while (tmp != req);
 }
 
-static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible)
+static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
 {
-       if (__i915_terminally_wedged(reset_counter))
+       struct i915_gpu_error *error = &dev_priv->gpu_error;
+
+       if (i915_terminally_wedged(error))
                return -EIO;
 
-       if (__i915_reset_in_progress(reset_counter)) {
+       if (i915_reset_in_progress(error)) {
                /* Non-interruptible callers can't handle -EAGAIN, hence return
                 * -EIO unconditionally for these.
                 */
-               if (!interruptible)
+               if (!dev_priv->mm.interruptible)
                        return -EIO;
 
                return -EAGAIN;
@@ -258,7 +260,9 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
 
        /* Carefully retire all requests without writing to the rings */
        for_each_engine(engine, dev_priv) {
-               ret = intel_engine_idle(engine, true);
+               ret = intel_engine_idle(engine,
+                                       I915_WAIT_INTERRUPTIBLE |
+                                       I915_WAIT_LOCKED);
                if (ret)
                        return ret;
        }
@@ -314,6 +318,26 @@ static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
        return 0;
 }
 
+static int __i915_sw_fence_call
+submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+       struct drm_i915_gem_request *request =
+               container_of(fence, typeof(*request), submit);
+
+       /* Will be called from irq-context when using foreign DMA fences */
+
+       switch (state) {
+       case FENCE_COMPLETE:
+               request->engine->submit_request(request);
+               break;
+
+       case FENCE_FREE:
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
 /**
  * i915_gem_request_alloc - allocate a request structure
  *
@@ -331,7 +355,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
                       struct i915_gem_context *ctx)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error);
        struct drm_i915_gem_request *req;
        u32 seqno;
        int ret;
@@ -340,7 +363,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
         * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
         * and restart.
         */
-       ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
+       ret = i915_gem_check_wedge(dev_priv);
        if (ret)
                return ERR_PTR(ret);
 
@@ -393,6 +416,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
                   engine->fence_context,
                   seqno);
 
+       i915_sw_fence_init(&req->submit, submit_notify);
+
        INIT_LIST_HEAD(&req->active_list);
        req->i915 = dev_priv;
        req->engine = engine;
@@ -402,7 +427,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        req->previous_context = NULL;
        req->file_priv = NULL;
        req->batch = NULL;
-       req->elsp_submitted = 0;
 
        /*
         * Reserve space in the ring buffer for all the commands required to
@@ -436,6 +460,94 @@ err:
        return ERR_PTR(ret);
 }
 
+static int
+i915_gem_request_await_request(struct drm_i915_gem_request *to,
+                              struct drm_i915_gem_request *from)
+{
+       int idx, ret;
+
+       GEM_BUG_ON(to == from);
+
+       if (to->engine == from->engine)
+               return 0;
+
+       idx = intel_engine_sync_index(from->engine, to->engine);
+       if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+               return 0;
+
+       trace_i915_gem_ring_sync_to(to, from);
+       if (!i915.semaphores) {
+               if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
+                       ret = i915_sw_fence_await_dma_fence(&to->submit,
+                                                           &from->fence, 0,
+                                                           GFP_KERNEL);
+                       if (ret < 0)
+                               return ret;
+               }
+       } else {
+               ret = to->engine->semaphore.sync_to(to, from);
+               if (ret)
+                       return ret;
+       }
+
+       from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+       return 0;
+}
+
+/**
+ * i915_gem_request_await_object - set this request to (async) wait upon a bo
+ *
+ * @to: request we are wishing to use
+ * @obj: object which may be in use on another ring.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Conceptually we serialise writes between engines inside the GPU.
+ * We only allow one engine to write into a buffer at any time, but
+ * multiple readers. To ensure each has a coherent view of memory, we must:
+ *
+ * - If there is an outstanding write request to the object, the new
+ *   request must wait for it to complete (either CPU or in hw, requests
+ *   on the same ring will be naturally ordered).
+ *
+ * - If we are a write request (pending_write_domain is set), the new
+ *   request must wait for outstanding read requests to complete.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_request_await_object(struct drm_i915_gem_request *to,
+                             struct drm_i915_gem_object *obj,
+                             bool write)
+{
+       struct i915_gem_active *active;
+       unsigned long active_mask;
+       int idx;
+
+       if (write) {
+               active_mask = i915_gem_object_get_active(obj);
+               active = obj->last_read;
+       } else {
+               active_mask = 1;
+               active = &obj->last_write;
+       }
+
+       for_each_active(active_mask, idx) {
+               struct drm_i915_gem_request *request;
+               int ret;
+
+               request = i915_gem_active_peek(&active[idx],
+                                              &obj->base.dev->struct_mutex);
+               if (!request)
+                       continue;
+
+               ret = i915_gem_request_await_request(to, request);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
@@ -466,10 +578,13 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 {
        struct intel_engine_cs *engine = request->engine;
        struct intel_ring *ring = request->ring;
+       struct drm_i915_gem_request *prev;
        u32 request_start;
        u32 reserved_tail;
        int ret;
 
+       trace_i915_gem_request_add(request);
+
        /*
         * To ensure that this call will not fail, space for its emissions
         * should already have been reserved in the ring buffer. Let the ring
@@ -493,20 +608,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
                WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
        }
 
-       trace_i915_gem_request_add(request);
-
-       /* Seal the request and mark it as pending execution. Note that
-        * we may inspect this state, without holding any locks, during
-        * hangcheck. Hence we apply the barrier to ensure that we do not
-        * see a more recent value in the hws than we are tracking.
-        */
-       request->emitted_jiffies = jiffies;
-       request->previous_seqno = engine->last_submitted_seqno;
-       engine->last_submitted_seqno = request->fence.seqno;
-       i915_gem_active_set(&engine->last_request, request);
-       list_add_tail(&request->link, &engine->request_list);
-       list_add_tail(&request->ring_link, &ring->request_list);
-
        /* Record the position of the start of the breadcrumb so that
         * should we detect the updated seqno part-way through the
         * GPU processing the request, we never over-estimate the
@@ -527,8 +628,40 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
                  "for adding the request (%d bytes)\n",
                  reserved_tail, ret);
 
+       /* Seal the request and mark it as pending execution. Note that
+        * we may inspect this state, without holding any locks, during
+        * hangcheck. Hence we apply the barrier to ensure that we do not
+        * see a more recent value in the hws than we are tracking.
+        */
+
+       prev = i915_gem_active_raw(&engine->last_request,
+                                  &request->i915->drm.struct_mutex);
+       if (prev)
+               i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
+                                            &request->submitq);
+
+       request->emitted_jiffies = jiffies;
+       request->previous_seqno = engine->last_submitted_seqno;
+       engine->last_submitted_seqno = request->fence.seqno;
+       i915_gem_active_set(&engine->last_request, request);
+       list_add_tail(&request->link, &engine->request_list);
+       list_add_tail(&request->ring_link, &ring->request_list);
+
        i915_gem_mark_busy(engine);
-       engine->submit_request(request);
+
+       local_bh_disable();
+       i915_sw_fence_commit(&request->submit);
+       local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+}
+
+static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&q->lock, flags);
+       if (list_empty(&wait->task_list))
+               __add_wait_queue(q, wait);
+       spin_unlock_irqrestore(&q->lock, flags);
 }
 
 static unsigned long local_clock_us(unsigned int *cpu)
@@ -598,7 +731,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
 /**
  * i915_wait_request - wait until execution of request has finished
  * @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
+ * @flags: how to wait
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  * @rps: client to charge for RPS boosting
  *
@@ -613,17 +746,22 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
  * errno with remaining time filled in timeout argument.
  */
 int i915_wait_request(struct drm_i915_gem_request *req,
-                     bool interruptible,
+                     unsigned int flags,
                      s64 *timeout,
                      struct intel_rps_client *rps)
 {
-       int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+       const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+               TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
        DEFINE_WAIT(reset);
        struct intel_wait wait;
        unsigned long timeout_remain;
        int ret = 0;
 
        might_sleep();
+#if IS_ENABLED(CONFIG_LOCKDEP)
+       GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+                  !!(flags & I915_WAIT_LOCKED));
+#endif
 
        if (i915_gem_request_completed(req))
                return 0;
@@ -666,7 +804,8 @@ int i915_wait_request(struct drm_i915_gem_request *req,
                goto complete;
 
        set_current_state(state);
-       add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
+       if (flags & I915_WAIT_LOCKED)
+               add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
        intel_wait_init(&wait, req->fence.seqno);
        if (intel_engine_add_wait(req->engine, &wait))
@@ -702,14 +841,35 @@ wakeup:
                if (__i915_request_irq_complete(req))
                        break;
 
+               /* If the GPU is hung, and we hold the lock, reset the GPU
+                * and then check for completion. On a full reset, the engine's
+                * HW seqno will be advanced passed us and we are complete.
+                * If we do a partial reset, we have to wait for the GPU to
+                * resume and update the breadcrumb.
+                *
+                * If we don't hold the mutex, we can just wait for the worker
+                * to come along and update the breadcrumb (either directly
+                * itself, or indirectly by recovering the GPU).
+                */
+               if (flags & I915_WAIT_LOCKED &&
+                   i915_reset_in_progress(&req->i915->gpu_error)) {
+                       __set_current_state(TASK_RUNNING);
+                       i915_reset(req->i915);
+                       reset_wait_queue(&req->i915->gpu_error.wait_queue,
+                                        &reset);
+                       continue;
+               }
+
                /* Only spin if we know the GPU is processing this request */
                if (i915_spin_request(req, state, 2))
                        break;
        }
-       remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
        intel_engine_remove_wait(req->engine, &wait);
+       if (flags & I915_WAIT_LOCKED)
+               remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
        __set_current_state(TASK_RUNNING);
+
 complete:
        trace_i915_gem_request_wait_end(req);
 
@@ -749,21 +909,24 @@ complete:
        return ret;
 }
 
-static void engine_retire_requests(struct intel_engine_cs *engine)
+static bool engine_retire_requests(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request, *next;
 
        list_for_each_entry_safe(request, next, &engine->request_list, link) {
                if (!i915_gem_request_completed(request))
-                       break;
+                       return false;
 
                i915_gem_request_retire(request);
        }
+
+       return true;
 }
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
+       unsigned int tmp;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
@@ -772,11 +935,9 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 
        GEM_BUG_ON(!dev_priv->gt.awake);
 
-       for_each_engine(engine, dev_priv) {
-               engine_retire_requests(engine);
-               if (!intel_engine_is_active(engine))
+       for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
+               if (engine_retire_requests(engine))
                        dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
-       }
 
        if (dev_priv->gt.active_engines == 0)
                queue_delayed_work(dev_priv->wq,
index 6c72bd8..974bd7b 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/fence.h>
 
 #include "i915_gem.h"
+#include "i915_sw_fence.h"
 
 struct intel_wait {
        struct rb_node node;
@@ -82,26 +83,32 @@ struct drm_i915_gem_request {
        struct intel_ring *ring;
        struct intel_signal_node signaling;
 
+       struct i915_sw_fence submit;
+       wait_queue_t submitq;
+
        /** GEM sequence number associated with the previous request,
         * when the HWS breadcrumb is equal to this the GPU is processing
         * this request.
         */
        u32 previous_seqno;
 
-       /** Position in the ringbuffer of the start of the request */
+       /** Position in the ring of the start of the request */
        u32 head;
 
        /**
-        * Position in the ringbuffer of the start of the postfix.
-        * This is required to calculate the maximum available ringbuffer
-        * space without overwriting the postfix.
+        * Position in the ring of the start of the postfix.
+        * This is required to calculate the maximum available ring space
+        * without overwriting the postfix.
         */
        u32 postfix;
 
-       /** Position in the ringbuffer of the end of the whole request */
+       /** Position in the ring of the end of the whole request */
        u32 tail;
 
-       /** Preallocate space in the ringbuffer for the emitting the request */
+       /** Position in the ring of the end of any workarounds after the tail */
+       u32 wa_tail;
+
+       /** Preallocate space in the ring for the emitting the request */
        u32 reserved_space;
 
        /**
@@ -134,27 +141,8 @@ struct drm_i915_gem_request {
        /** file_priv list entry for this request */
        struct list_head client_list;
 
-       /**
-        * The ELSP only accepts two elements at a time, so we queue
-        * context/tail pairs on a given queue (ring->execlist_queue) until the
-        * hardware is available. The queue serves a double purpose: we also use
-        * it to keep track of the up to 2 contexts currently in the hardware
-        * (usually one in execution and the other queued up by the GPU): We
-        * only remove elements from the head of the queue when the hardware
-        * informs us that an element has been completed.
-        *
-        * All accesses to the queue are mediated by a spinlock
-        * (ring->execlist_lock).
-        */
-
-       /** Execlist link in the submission queue.*/
+       /** Link in the execlist submission queue, guarded by execlist_lock. */
        struct list_head execlist_link;
-
-       /** Execlists no. of times this request has been sent to the ELSP */
-       int elsp_submitted;
-
-       /** Execlists context hardware id. */
-       unsigned int ctx_hw_id;
 };
 
 extern const struct fence_ops i915_fence_ops;
@@ -222,6 +210,11 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
        *pdst = src;
 }
 
+int
+i915_gem_request_await_object(struct drm_i915_gem_request *to,
+                             struct drm_i915_gem_object *obj,
+                             bool write);
+
 void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
 #define i915_add_request(req) \
        __i915_add_request(req, true)
@@ -234,10 +227,12 @@ struct intel_rps_client;
 #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
 
 int i915_wait_request(struct drm_i915_gem_request *req,
-                     bool interruptible,
+                     unsigned int flags,
                      s64 *timeout,
                      struct intel_rps_client *rps)
        __attribute__((nonnull(1)));
+#define I915_WAIT_INTERRUPTIBLE        BIT(0)
+#define I915_WAIT_LOCKED       BIT(1) /* struct_mutex held, handle GPU reset */
 
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 
@@ -472,6 +467,19 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
                if (!request || i915_gem_request_completed(request))
                        return NULL;
 
+               /* An especially silly compiler could decide to recompute the
+                * result of i915_gem_request_completed, more specifically
+                * re-emit the load for request->fence.seqno. A race would catch
+                * a later seqno value, which could flip the result from true to
+                * false. Which means part of the instructions below might not
+                * be executed, while later on instructions are executed. Due to
+                * barriers within the refcounting the inconsistency can't reach
+                * past the call to i915_gem_request_get_rcu, but not executing
+                * that while still executing i915_gem_request_put() creates
+                * havoc enough.  Prevent this with a compiler barrier.
+                */
+               barrier();
+
                request = i915_gem_request_get_rcu(request);
 
                /* What stops the following rcu_access_pointer() from occurring
@@ -578,13 +586,15 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
        if (!request)
                return 0;
 
-       return i915_wait_request(request, true, NULL, NULL);
+       return i915_wait_request(request,
+                                I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+                                NULL, NULL);
 }
 
 /**
  * i915_gem_active_wait_unlocked - waits until the request is completed
  * @active - the active request on which to wait
- * @interruptible - whether the wait can be woken by a userspace signal
+ * @flags - how to wait
  * @timeout - how long to wait at most
  * @rps - userspace client to charge for a waitboost
  *
@@ -605,7 +615,7 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
  */
 static inline int
 i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
-                             bool interruptible,
+                             unsigned int flags,
                              s64 *timeout,
                              struct intel_rps_client *rps)
 {
@@ -614,7 +624,7 @@ i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
 
        request = i915_gem_active_get_unlocked(active);
        if (request) {
-               ret = i915_wait_request(request, interruptible, timeout, rps);
+               ret = i915_wait_request(request, flags, timeout, rps);
                i915_gem_request_put(request);
        }
 
@@ -641,7 +651,9 @@ i915_gem_active_retire(struct i915_gem_active *active,
        if (!request)
                return 0;
 
-       ret = i915_wait_request(request, true, NULL, NULL);
+       ret = i915_wait_request(request,
+                               I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+                               NULL, NULL);
        if (ret)
                return ret;
 
index b80802b..1c237d0 100644 (file)
@@ -323,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
        unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
 
        do {
-               if (i915_gem_wait_for_idle(dev_priv, false) == 0 &&
+               if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
                    i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
                        break;
 
@@ -414,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
                return NOTIFY_DONE;
 
        /* Force everything onto the inactive lists */
-       ret = i915_gem_wait_for_idle(dev_priv, false);
+       ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
        if (ret)
                goto out;
 
index aa050fa..59989e8 100644 (file)
@@ -92,6 +92,7 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct resource *r;
        u32 base;
@@ -111,7 +112,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen >= 3) {
                u32 bsm;
 
-               pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
+               pci_read_config_dword(pdev, INTEL_BSM, &bsm);
 
                base = bsm & INTEL_BSM_MASK;
        } else if (IS_I865G(dev)) {
@@ -119,7 +120,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                u16 toud = 0;
                u8 tmp;
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
                                         I845_ESMRAMC, &tmp);
 
                if (tmp & TSEG_ENABLE) {
@@ -133,7 +134,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                        }
                }
 
-               pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
                                         I865_TOUD, &toud);
 
                base = (toud << 16) + tseg_size;
@@ -142,13 +143,13 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                u32 tom;
                u8 tmp;
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
                                         I85X_ESMRAMC, &tmp);
 
                if (tmp & TSEG_ENABLE)
                        tseg_size = MB(1);
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
                                         I85X_DRB3, &tmp);
                tom = tmp * MB(32);
 
@@ -158,7 +159,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                u32 tom;
                u8 tmp;
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
                                         I845_ESMRAMC, &tmp);
 
                if (tmp & TSEG_ENABLE) {
@@ -172,7 +173,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                        }
                }
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
                                         I830_DRB3, &tmp);
                tom = tmp * MB(32);
 
@@ -182,7 +183,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                u32 tom;
                u8 tmp;
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
                                         I830_ESMRAMC, &tmp);
 
                if (tmp & TSEG_ENABLE) {
@@ -192,7 +193,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
                                tseg_size = KB(512);
                }
 
-               pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+               pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
                                         I830_DRB3, &tmp);
                tom = tmp * MB(32);
 
index be54825..e537930 100644 (file)
@@ -68,7 +68,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
 
        for_each_active(active, idx)
                i915_gem_active_wait_unlocked(&obj->last_read[idx],
-                                             false, NULL, NULL);
+                                             0, NULL, NULL);
 }
 
 static void cancel_userptr(struct work_struct *work)
index 41ec7a1..334f15d 100644 (file)
@@ -336,6 +336,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 {
        struct drm_device *dev = error_priv->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct drm_i915_error_state *error = error_priv->error;
        struct drm_i915_error_object *obj;
        int i, j, offset, elt;
@@ -367,11 +368,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
        }
        err_printf(m, "Reset count: %u\n", error->reset_count);
        err_printf(m, "Suspend count: %u\n", error->suspend_count);
-       err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
-       err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
+       err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
+       err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
        err_printf(m, "PCI Subsystem: %04x:%04x\n",
-                  dev->pdev->subsystem_vendor,
-                  dev->pdev->subsystem_device);
+                  pdev->subsystem_vendor,
+                  pdev->subsystem_device);
        err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 
        if (HAS_CSR(dev)) {
@@ -488,7 +489,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                        }
                }
 
-               if (ee->num_waiters) {
+               if (IS_ERR(ee->waiters)) {
+                       err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
+                                  dev_priv->engine[i].name);
+               } else if (ee->num_waiters) {
                        err_printf(m, "%s --- %d waiters\n",
                                   dev_priv->engine[i].name,
                                   ee->num_waiters);
@@ -647,7 +651,8 @@ static void i915_error_state_free(struct kref *error_ref)
                i915_error_object_free(ee->wa_ctx);
 
                kfree(ee->requests);
-               kfree(ee->waiters);
+               if (!IS_ERR_OR_NULL(ee->waiters))
+                       kfree(ee->waiters);
        }
 
        i915_error_object_free(error->semaphore);
@@ -932,7 +937,14 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
        ee->num_waiters = 0;
        ee->waiters = NULL;
 
-       spin_lock(&b->lock);
+       if (RB_EMPTY_ROOT(&b->waiters))
+               return;
+
+       if (!spin_trylock(&b->lock)) {
+               ee->waiters = ERR_PTR(-EDEADLK);
+               return;
+       }
+
        count = 0;
        for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
                count++;
@@ -946,9 +958,13 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
        if (!waiter)
                return;
 
-       ee->waiters = waiter;
+       if (!spin_trylock(&b->lock)) {
+               kfree(waiter);
+               ee->waiters = ERR_PTR(-EDEADLK);
+               return;
+       }
 
-       spin_lock(&b->lock);
+       ee->waiters = waiter;
        for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
                struct intel_wait *w = container_of(rb, typeof(*w), node);
 
@@ -1009,7 +1025,7 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
        if (INTEL_GEN(dev_priv) > 2)
                ee->mode = I915_READ_MODE(engine);
 
-       if (I915_NEED_GFX_HWS(dev_priv)) {
+       if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
                i915_reg_t mmio;
 
                if (IS_GEN7(dev_priv)) {
index cf5a65b..a47e1e4 100644 (file)
 #define HOST2GUC_INTERRUPT             _MMIO(0xc4c8)
 #define   HOST2GUC_TRIGGER               (1<<0)
 
-#define DRBMISC1                       0x1984
-#define   DOORBELL_ENABLE                (1<<0)
-
 #define GEN8_DRBREGL(x)                        _MMIO(0x1000 + (x) * 8)
 #define   GEN8_DRB_VALID                 (1<<0)
 #define GEN8_DRBREGU(x)                        _MMIO(0x1000 + (x) * 8 + 4)
index e436941..43358e1 100644 (file)
@@ -59,7 +59,7 @@
  * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
  * represents in-order queue. The kernel driver packs ring tail pointer and an
  * ELSP context descriptor dword into Work Item.
- * See guc_add_workqueue_item()
+ * See guc_wq_item_append()
  *
  */
 
@@ -114,10 +114,8 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
                if (ret != -ETIMEDOUT)
                        ret = -EIO;
 
-               DRM_ERROR("GUC: host2guc action 0x%X failed. ret=%d "
-                               "status=0x%08X response=0x%08X\n",
-                               data[0], ret, status,
-                               I915_READ(SOFT_SCRATCH(15)));
+               DRM_WARN("Action 0x%X failed; ret=%d status=0x%08X response=0x%08X\n",
+                        data[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
 
                dev_priv->guc.action_fail += 1;
                dev_priv->guc.action_err = ret;
@@ -290,7 +288,7 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
 /*
  * Initialise the process descriptor shared with the GuC firmware.
  */
-static void guc_init_proc_desc(struct intel_guc *guc,
+static void guc_proc_desc_init(struct intel_guc *guc,
                               struct i915_guc_client *client)
 {
        struct guc_process_desc *desc;
@@ -322,7 +320,7 @@ static void guc_init_proc_desc(struct intel_guc *guc,
  * write queue, etc).
  */
 
-static void guc_init_ctx_desc(struct intel_guc *guc,
+static void guc_ctx_desc_init(struct intel_guc *guc,
                              struct i915_guc_client *client)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -330,6 +328,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
        struct i915_gem_context *ctx = client->owner;
        struct guc_context_desc desc;
        struct sg_table *sg;
+       unsigned int tmp;
        u32 gfx_addr;
 
        memset(&desc, 0, sizeof(desc));
@@ -339,7 +338,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
        desc.priority = client->priority;
        desc.db_id = client->doorbell_id;
 
-       for_each_engine_masked(engine, dev_priv, client->engines) {
+       for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
                struct intel_context *ce = &ctx->engine[engine->id];
                uint32_t guc_engine_id = engine->guc_id;
                struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
@@ -400,7 +399,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
                             sizeof(desc) * client->ctx_index);
 }
 
-static void guc_fini_ctx_desc(struct intel_guc *guc,
+static void guc_ctx_desc_fini(struct intel_guc *guc,
                              struct i915_guc_client *client)
 {
        struct guc_context_desc desc;
@@ -414,7 +413,7 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
 }
 
 /**
- * i915_guc_wq_check_space() - check that the GuC can accept a request
+ * i915_guc_wq_reserve() - reserve space in the GuC's workqueue
  * @request:   request associated with the commands
  *
  * Return:     0 if space is available
@@ -422,35 +421,39 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
  *
  * This function must be called (and must return 0) before a request
  * is submitted to the GuC via i915_guc_submit() below. Once a result
- * of 0 has been returned, it remains valid until (but only until)
- * the next call to submit().
+ * of 0 has been returned, it must be balanced by a corresponding
+ * call to submit().
  *
- * This precheck allows the caller to determine in advance that space
+ * Reservation allows the caller to determine in advance that space
  * will be available for the next submission before committing resources
  * to it, and helps avoid late failures with complicated recovery paths.
  */
-int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
+int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
 {
        const size_t wqi_size = sizeof(struct guc_wq_item);
        struct i915_guc_client *gc = request->i915->guc.execbuf_client;
-       struct guc_process_desc *desc;
+       struct guc_process_desc *desc = gc->client_base + gc->proc_desc_offset;
        u32 freespace;
+       int ret;
 
-       GEM_BUG_ON(gc == NULL);
-
-       desc = gc->client_base + gc->proc_desc_offset;
-
+       spin_lock(&gc->wq_lock);
        freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
-       if (likely(freespace >= wqi_size))
-               return 0;
-
-       gc->no_wq_space += 1;
+       freespace -= gc->wq_rsvd;
+       if (likely(freespace >= wqi_size)) {
+               gc->wq_rsvd += wqi_size;
+               ret = 0;
+       } else {
+               gc->no_wq_space++;
+               ret = -EAGAIN;
+       }
+       spin_unlock(&gc->wq_lock);
 
-       return -EAGAIN;
+       return ret;
 }
 
-static void guc_add_workqueue_item(struct i915_guc_client *gc,
-                                  struct drm_i915_gem_request *rq)
+/* Construct a Work Item and append it to the GuC's Work Queue */
+static void guc_wq_item_append(struct i915_guc_client *gc,
+                              struct drm_i915_gem_request *rq)
 {
        /* wqi_len is in DWords, and does not include the one-word header */
        const size_t wqi_size = sizeof(struct guc_wq_item);
@@ -463,7 +466,7 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc,
 
        desc = gc->client_base + gc->proc_desc_offset;
 
-       /* Free space is guaranteed, see i915_guc_wq_check_space() above */
+       /* Free space is guaranteed, see i915_guc_wq_reserve() above */
        freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
        GEM_BUG_ON(freespace < wqi_size);
 
@@ -481,12 +484,14 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc,
         * workqueue buffer dw by dw.
         */
        BUILD_BUG_ON(wqi_size != 16);
+       GEM_BUG_ON(gc->wq_rsvd < wqi_size);
 
        /* postincrement WQ tail for next time */
        wq_off = gc->wq_tail;
+       GEM_BUG_ON(wq_off & (wqi_size - 1));
        gc->wq_tail += wqi_size;
        gc->wq_tail &= gc->wq_size - 1;
-       GEM_BUG_ON(wq_off & (wqi_size - 1));
+       gc->wq_rsvd -= wqi_size;
 
        /* WQ starts from the page after doorbell / process_desc */
        wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
@@ -551,8 +556,8 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
                if (db_ret.db_status == GUC_DOORBELL_DISABLED)
                        break;
 
-               DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
-                         db_cmp.cookie, db_ret.cookie);
+               DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
+                        db_cmp.cookie, db_ret.cookie);
 
                /* update the cookie to newly read cookie from GuC */
                db_cmp.cookie = db_ret.cookie;
@@ -571,14 +576,13 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
  * Return:     0 on success, otherwise an errno.
  *             (Note: nonzero really shouldn't happen!)
  *
- * The caller must have already called i915_guc_wq_check_space() above
- * with a result of 0 (success) since the last request submission. This
- * guarantees that there is space in the work queue for the new request,
- * so enqueuing the item cannot fail.
+ * The caller must have already called i915_guc_wq_reserve() above with
+ * a result of 0 (success), guaranteeing that there is space in the work
+ * queue for the new request, so enqueuing the item cannot fail.
  *
  * Bad Things Will Happen if the caller violates this protocol e.g. calls
- * submit() when check() says there's no space, or calls submit() multiple
- * times with no intervening check().
+ * submit() when _reserve() says there's no space, or calls _submit()
+ * a different number of times from (successful) calls to _reserve().
  *
  * The only error here arises if the doorbell hardware isn't functioning
  * as expected, which really shouln't happen.
@@ -590,7 +594,8 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
        struct i915_guc_client *client = guc->execbuf_client;
        int b_ret;
 
-       guc_add_workqueue_item(client, rq);
+       spin_lock(&client->wq_lock);
+       guc_wq_item_append(client, rq);
        b_ret = guc_ring_doorbell(client);
 
        client->submissions[engine_id] += 1;
@@ -600,6 +605,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
 
        guc->submissions[engine_id] += 1;
        guc->last_seqno[engine_id] = rq->fence.seqno;
+       spin_unlock(&client->wq_lock);
 }
 
 /*
@@ -680,7 +686,7 @@ guc_client_free(struct drm_i915_private *dev_priv,
        i915_vma_unpin_and_release(&client->vma);
 
        if (client->ctx_index != GUC_INVALID_CTX_ID) {
-               guc_fini_ctx_desc(guc, client);
+               guc_ctx_desc_fini(guc, client);
                ida_simple_remove(&guc->ctx_ids, client->ctx_index);
        }
 
@@ -733,8 +739,8 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
        /* Restore to original value */
        err = guc_update_doorbell_id(guc, client, db_id);
        if (err)
-               DRM_ERROR("Failed to restore doorbell to %d, err %d\n",
-                       db_id, err);
+               DRM_WARN("Failed to restore doorbell to %d, err %d\n",
+                        db_id, err);
 
        /* Read back & verify all doorbell registers */
        for (i = 0; i < GUC_MAX_DOORBELLS; ++i)
@@ -790,6 +796,8 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
        /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
        client->vma = vma;
        client->client_base = kmap(i915_vma_first_page(vma));
+
+       spin_lock_init(&client->wq_lock);
        client->wq_offset = GUC_DB_SIZE;
        client->wq_size = GUC_WQ_SIZE;
 
@@ -810,8 +818,8 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
        else
                client->proc_desc_offset = (GUC_DB_SIZE / 2);
 
-       guc_init_proc_desc(guc, client);
-       guc_init_ctx_desc(guc, client);
+       guc_proc_desc_init(guc, client);
+       guc_ctx_desc_init(guc, client);
        if (guc_init_doorbell(guc, client, db_id))
                goto err;
 
@@ -823,13 +831,11 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
        return client;
 
 err:
-       DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
-
        guc_client_free(dev_priv, client);
        return NULL;
 }
 
-static void guc_create_log(struct intel_guc *guc)
+static void guc_log_create(struct intel_guc *guc)
 {
        struct i915_vma *vma;
        unsigned long offset;
@@ -869,7 +875,7 @@ static void guc_create_log(struct intel_guc *guc)
        guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
 }
 
-static void init_guc_policies(struct guc_policies *policies)
+static void guc_policies_init(struct guc_policies *policies)
 {
        struct guc_policy *policy;
        u32 p, i;
@@ -891,7 +897,7 @@ static void init_guc_policies(struct guc_policies *policies)
        policies->is_valid = 1;
 }
 
-static void guc_create_ads(struct intel_guc *guc)
+static void guc_addon_create(struct intel_guc *guc)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
        struct i915_vma *vma;
@@ -934,7 +940,7 @@ static void guc_create_ads(struct intel_guc *guc)
 
        /* GuC scheduling policies */
        policies = (void *)ads + sizeof(struct guc_ads);
-       init_guc_policies(policies);
+       guc_policies_init(policies);
 
        ads->scheduler_policies =
                i915_ggtt_offset(vma) + sizeof(struct guc_ads);
@@ -965,9 +971,11 @@ static void guc_create_ads(struct intel_guc *guc)
  */
 int i915_guc_submission_init(struct drm_i915_private *dev_priv)
 {
+       const size_t ctxsize = sizeof(struct guc_context_desc);
+       const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
+       const size_t gemsize = round_up(poolsize, PAGE_SIZE);
        struct intel_guc *guc = &dev_priv->guc;
        struct i915_vma *vma;
-       u32 size;
 
        /* Wipe bitmap & delete client in case of reinitialisation */
        bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
@@ -979,15 +987,14 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
        if (guc->ctx_pool_vma)
                return 0; /* already allocated */
 
-       size = PAGE_ALIGN(GUC_MAX_GPU_CONTEXTS*sizeof(struct guc_context_desc));
-       vma = guc_allocate_vma(guc, size);
+       vma = guc_allocate_vma(guc, gemsize);
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
        guc->ctx_pool_vma = vma;
        ida_init(&guc->ctx_ids);
-       guc_create_log(guc);
-       guc_create_ads(guc);
+       guc_log_create(guc);
+       guc_addon_create(guc);
 
        return 0;
 }
@@ -997,6 +1004,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
        struct intel_guc *guc = &dev_priv->guc;
        struct i915_guc_client *client;
        struct intel_engine_cs *engine;
+       struct drm_i915_gem_request *request;
 
        /* client for execbuf submission */
        client = guc_client_alloc(dev_priv,
@@ -1004,7 +1012,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
                                  GUC_CTX_PRIORITY_KMD_NORMAL,
                                  dev_priv->kernel_context);
        if (!client) {
-               DRM_ERROR("Failed to create execbuf guc_client\n");
+               DRM_ERROR("Failed to create normal GuC client!\n");
                return -ENOMEM;
        }
 
@@ -1013,9 +1021,17 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
        guc_init_doorbell_hw(guc);
 
        /* Take over from manual control of ELSP (execlists) */
-       for_each_engine(engine, dev_priv)
+       for_each_engine(engine, dev_priv) {
                engine->submit_request = i915_guc_submit;
 
+               /* Replay the current set of previously submitted requests */
+               list_for_each_entry(request, &engine->request_list, link) {
+                       client->wq_rsvd += sizeof(struct guc_wq_item);
+                       if (i915_sw_fence_done(&request->submit))
+                               i915_guc_submit(request);
+               }
+       }
+
        return 0;
 }
 
index ebb83d5..c128fdb 100644 (file)
@@ -371,7 +371,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
        spin_lock_irq(&dev_priv->irq_lock);
        dev_priv->rps.interrupts_enabled = false;
 
-       I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
+       I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
 
        __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
        I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
@@ -2497,57 +2497,52 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
        char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
        char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
        char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
-       int ret;
 
        kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
 
+       DRM_DEBUG_DRIVER("resetting chip\n");
+       kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
+
        /*
-        * Note that there's only one work item which does gpu resets, so we
-        * need not worry about concurrent gpu resets potentially incrementing
-        * error->reset_counter twice. We only need to take care of another
-        * racing irq/hangcheck declaring the gpu dead for a second time. A
-        * quick check for that is good enough: schedule_work ensures the
-        * correct ordering between hang detection and this work item, and since
-        * the reset in-progress bit is only ever set by code outside of this
-        * work we don't need to worry about any other races.
+        * In most cases it's guaranteed that we get here with an RPM
+        * reference held, for example because there is a pending GPU
+        * request that won't finish until the reset is done. This
+        * isn't the case at least when we get here by doing a
+        * simulated reset via debugs, so get an RPM reference.
         */
-       if (i915_reset_in_progress(&dev_priv->gpu_error)) {
-               DRM_DEBUG_DRIVER("resetting chip\n");
-               kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
-
-               /*
-                * In most cases it's guaranteed that we get here with an RPM
-                * reference held, for example because there is a pending GPU
-                * request that won't finish until the reset is done. This
-                * isn't the case at least when we get here by doing a
-                * simulated reset via debugs, so get an RPM reference.
-                */
-               intel_runtime_pm_get(dev_priv);
-
-               intel_prepare_reset(dev_priv);
+       intel_runtime_pm_get(dev_priv);
+       intel_prepare_reset(dev_priv);
 
+       do {
                /*
                 * All state reset _must_ be completed before we update the
                 * reset counter, for otherwise waiters might miss the reset
                 * pending state and not properly drop locks, resulting in
                 * deadlocks with the reset work.
                 */
-               ret = i915_reset(dev_priv);
+               if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
+                       i915_reset(dev_priv);
+                       mutex_unlock(&dev_priv->drm.struct_mutex);
+               }
 
-               intel_finish_reset(dev_priv);
+               /* We need to wait for anyone holding the lock to wakeup */
+       } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
+                                    I915_RESET_IN_PROGRESS,
+                                    TASK_UNINTERRUPTIBLE,
+                                    HZ));
 
-               intel_runtime_pm_put(dev_priv);
+       intel_finish_reset(dev_priv);
+       intel_runtime_pm_put(dev_priv);
 
-               if (ret == 0)
-                       kobject_uevent_env(kobj,
-                                          KOBJ_CHANGE, reset_done_event);
+       if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+               kobject_uevent_env(kobj,
+                                  KOBJ_CHANGE, reset_done_event);
 
-               /*
-                * Note: The wake_up also serves as a memory barrier so that
-                * waiters see the update value of the reset counter atomic_t.
-                */
-               wake_up_all(&dev_priv->gpu_error.reset_queue);
-       }
+       /*
+        * Note: The wake_up also serves as a memory barrier so that
+        * waiters see the updated value of the dev_priv->gpu_error.
+        */
+       wake_up_all(&dev_priv->gpu_error.reset_queue);
 }
 
 static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
@@ -2666,25 +2661,26 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
        i915_capture_error_state(dev_priv, engine_mask, error_msg);
        i915_report_and_clear_eir(dev_priv);
 
-       if (engine_mask) {
-               atomic_or(I915_RESET_IN_PROGRESS_FLAG,
-                               &dev_priv->gpu_error.reset_counter);
+       if (!engine_mask)
+               return;
+
+       if (test_and_set_bit(I915_RESET_IN_PROGRESS,
+                            &dev_priv->gpu_error.flags))
+               return;
 
-               /*
-                * Wakeup waiting processes so that the reset function
-                * i915_reset_and_wakeup doesn't deadlock trying to grab
-                * various locks. By bumping the reset counter first, the woken
-                * processes will see a reset in progress and back off,
-                * releasing their locks and then wait for the reset completion.
-                * We must do this for _all_ gpu waiters that might hold locks
-                * that the reset work needs to acquire.
-                *
-                * Note: The wake_up serves as the required memory barrier to
-                * ensure that the waiters see the updated value of the reset
-                * counter atomic_t.
-                */
-               i915_error_wake_up(dev_priv);
-       }
+       /*
+        * Wakeup waiting processes so that the reset function
+        * i915_reset_and_wakeup doesn't deadlock trying to grab
+        * various locks. By bumping the reset counter first, the woken
+        * processes will see a reset in progress and back off,
+        * releasing their locks and then wait for the reset completion.
+        * We must do this for _all_ gpu waiters that might hold locks
+        * that the reset work needs to acquire.
+        *
+        * Note: The wake_up also provides a memory barrier to ensure that the
+        * waiters see the updated value of the reset flags.
+        */
+       i915_error_wake_up(dev_priv);
 
        i915_reset_and_wakeup(dev_priv);
 }
@@ -2835,10 +2831,10 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
                }
        }
 
-       DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
-                 engine->id, ipehr, offset);
+       DRM_DEBUG_DRIVER("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
+                        engine->id, ipehr, offset);
 
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 
 static struct intel_engine_cs *
@@ -2926,6 +2922,9 @@ static int semaphore_passed(struct intel_engine_cs *engine)
        if (signaller == NULL)
                return -1;
 
+       if (IS_ERR(signaller))
+               return 0;
+
        /* Prevent pathological recursion due to driver bugs */
        if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
                return -1;
@@ -3079,6 +3078,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                bool busy = intel_engine_has_waiter(engine);
                u64 acthd;
                u32 seqno;
+               u32 submit;
 
                semaphore_clear_deadlocks(dev_priv);
 
@@ -3094,14 +3094,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 
                acthd = intel_engine_get_active_head(engine);
                seqno = intel_engine_get_seqno(engine);
+               submit = READ_ONCE(engine->last_submitted_seqno);
 
                if (engine->hangcheck.seqno == seqno) {
-                       if (!intel_engine_is_active(engine)) {
+                       if (i915_seqno_passed(seqno, submit)) {
                                engine->hangcheck.action = HANGCHECK_IDLE;
-                               if (busy) {
-                                       /* Safeguard against driver failure */
-                                       engine->hangcheck.score += BUSY;
-                               }
                        } else {
                                /* We always increment the hangcheck score
                                 * if the engine is busy and still processing
@@ -3167,6 +3164,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 
        if (hung) {
                char msg[80];
+               unsigned int tmp;
                int len;
 
                /* If some rings hung but others were still busy, only
@@ -3176,7 +3174,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                        hung &= ~stuck;
                len = scnprintf(msg, sizeof(msg),
                                "%s on ", stuck == hung ? "No progress" : "Hang");
-               for_each_engine_masked(engine, dev_priv, hung)
+               for_each_engine_masked(engine, dev_priv, hung, tmp)
                        len += scnprintf(msg + len, sizeof(msg) - len,
                                         "%s, ", engine->name);
                msg[len-2] = '\0';
@@ -4502,7 +4500,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
 
        if (INTEL_INFO(dev_priv)->gen >= 8)
-               dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+               dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
 
        INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
                          i915_hangcheck_elapsed);
index 2587b1b..687c768 100644 (file)
 #define CHV_COLORS \
        .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
 
+#define GEN2_FEATURES \
+       .gen = 2, .num_pipes = 1, \
+       .has_overlay = 1, .overlay_needs_physical = 1, \
+       .has_gmch_display = 1, \
+       .hws_needs_physical = 1, \
+       .ring_mask = RENDER_RING, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       CURSOR_OFFSETS
+
 static const struct intel_device_info intel_i830_info = {
-       .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN2_FEATURES,
+       .is_mobile = 1, .cursor_needs_physical = 1,
+       .num_pipes = 2, /* legal, last one wins */
 };
 
 static const struct intel_device_info intel_845g_info = {
-       .gen = 2, .num_pipes = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN2_FEATURES,
 };
 
 static const struct intel_device_info intel_i85x_info = {
-       .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+       GEN2_FEATURES,
+       .is_i85x = 1, .is_mobile = 1,
+       .num_pipes = 2, /* legal, last one wins */
        .cursor_needs_physical = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
        .has_fbc = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_i865g_info = {
-       .gen = 2, .num_pipes = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN2_FEATURES,
 };
 
+#define GEN3_FEATURES \
+       .gen = 3, .num_pipes = 2, \
+       .has_gmch_display = 1, \
+       .ring_mask = RENDER_RING, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       CURSOR_OFFSETS
+
 static const struct intel_device_info intel_i915g_info = {
-       .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+       GEN3_FEATURES,
+       .is_i915g = 1, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       .hws_needs_physical = 1,
 };
 static const struct intel_device_info intel_i915gm_info = {
-       .gen = 3, .is_mobile = 1, .num_pipes = 2,
+       GEN3_FEATURES,
+       .is_mobile = 1,
        .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .supports_tv = 1,
        .has_fbc = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       .hws_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945g_info = {
-       .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+       GEN3_FEATURES,
+       .has_hotplug = 1, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       .hws_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945gm_info = {
-       .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+       GEN3_FEATURES,
+       .is_i945gm = 1, .is_mobile = 1,
        .has_hotplug = 1, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .supports_tv = 1,
        .has_fbc = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       .hws_needs_physical = 1,
 };
 
+#define GEN4_FEATURES \
+       .gen = 4, .num_pipes = 2, \
+       .has_hotplug = 1, \
+       .has_gmch_display = 1, \
+       .ring_mask = RENDER_RING, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       CURSOR_OFFSETS
+
 static const struct intel_device_info intel_i965g_info = {
-       .gen = 4, .is_broadwater = 1, .num_pipes = 2,
-       .has_hotplug = 1,
+       GEN4_FEATURES,
+       .is_broadwater = 1,
        .has_overlay = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       .hws_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
-       .gen = 4, .is_crestline = 1, .num_pipes = 2,
-       .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+       GEN4_FEATURES,
+       .is_crestline = 1,
+       .is_mobile = 1, .has_fbc = 1,
        .has_overlay = 1,
        .supports_tv = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       .hws_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_g33_info = {
-       .gen = 3, .is_g33 = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
+       GEN3_FEATURES,
+       .is_g33 = 1,
+       .has_hotplug = 1,
        .has_overlay = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_g45_info = {
-       .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
-       .has_pipe_cxsr = 1, .has_hotplug = 1,
+       GEN4_FEATURES,
+       .is_g4x = 1,
+       .has_pipe_cxsr = 1,
        .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_gm45_info = {
-       .gen = 4, .is_g4x = 1, .num_pipes = 2,
-       .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
-       .has_pipe_cxsr = 1, .has_hotplug = 1,
+       GEN4_FEATURES,
+       .is_g4x = 1,
+       .is_mobile = 1, .has_fbc = 1,
+       .has_pipe_cxsr = 1,
        .supports_tv = 1,
        .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_pineview_info = {
-       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
+       GEN3_FEATURES,
+       .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
+       .has_hotplug = 1,
        .has_overlay = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
 };
 
+#define GEN5_FEATURES \
+       .gen = 5, .num_pipes = 2, \
+       .has_hotplug = 1, \
+       .has_gmbus_irq = 1, \
+       .ring_mask = RENDER_RING | BSD_RING, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       CURSOR_OFFSETS
+
 static const struct intel_device_info intel_ironlake_d_info = {
-       .gen = 5, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN5_FEATURES,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
-       .gen = 5, .is_mobile = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN5_FEATURES,
+       .is_mobile = 1,
 };
 
+#define GEN6_FEATURES \
+       .gen = 6, .num_pipes = 2, \
+       .has_hotplug = 1, \
+       .has_fbc = 1, \
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+       .has_llc = 1, \
+       .has_rc6 = 1, \
+       .has_rc6p = 1, \
+       .has_gmbus_irq = 1, \
+       .has_hw_contexts = 1, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       CURSOR_OFFSETS
+
 static const struct intel_device_info intel_sandybridge_d_info = {
-       .gen = 6, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
-       .has_llc = 1,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN6_FEATURES,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
-       .gen = 6, .is_mobile = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
-       .has_llc = 1,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       GEN6_FEATURES,
+       .is_mobile = 1,
 };
 
 #define GEN7_FEATURES  \
        .gen = 7, .num_pipes = 3, \
-       .need_gfx_hws = 1, .has_hotplug = 1, \
+       .has_hotplug = 1, \
        .has_fbc = 1, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
        .has_llc = 1, \
+       .has_rc6 = 1, \
+       .has_rc6p = 1, \
+       .has_gmbus_irq = 1, \
+       .has_hw_contexts = 1, \
        GEN_DEFAULT_PIPEOFFSETS, \
        IVB_CURSOR_OFFSETS
 
 static const struct intel_device_info intel_ivybridge_d_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
+       .has_l3_dpf = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_m_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
        .is_mobile = 1,
+       .has_l3_dpf = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_q_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
        .num_pipes = 0, /* legal, last one wins */
+       .has_l3_dpf = 1,
 };
 
 #define VLV_FEATURES  \
        .gen = 7, .num_pipes = 2, \
-       .need_gfx_hws = 1, .has_hotplug = 1, \
+       .has_psr = 1, \
+       .has_runtime_pm = 1, \
+       .has_rc6 = 1, \
+       .has_gmbus_irq = 1, \
+       .has_hw_contexts = 1, \
+       .has_gmch_display = 1, \
+       .has_hotplug = 1, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
        .display_mmio_offset = VLV_DISPLAY_BASE, \
        GEN_DEFAULT_PIPEOFFSETS, \
        CURSOR_OFFSETS
 
-static const struct intel_device_info intel_valleyview_m_info = {
-       VLV_FEATURES,
-       .is_valleyview = 1,
-       .is_mobile = 1,
-};
-
-static const struct intel_device_info intel_valleyview_d_info = {
+static const struct intel_device_info intel_valleyview_info = {
        VLV_FEATURES,
        .is_valleyview = 1,
 };
@@ -264,54 +272,50 @@ static const struct intel_device_info intel_valleyview_d_info = {
        GEN7_FEATURES, \
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
        .has_ddi = 1, \
-       .has_fpga_dbg = 1
-
-static const struct intel_device_info intel_haswell_d_info = {
-       HSW_FEATURES,
-       .is_haswell = 1,
-};
-
-static const struct intel_device_info intel_haswell_m_info = {
+       .has_fpga_dbg = 1, \
+       .has_psr = 1, \
+       .has_resource_streamer = 1, \
+       .has_dp_mst = 1, \
+       .has_rc6p = 0 /* RC6p removed-by HSW */, \
+       .has_runtime_pm = 1
+
+static const struct intel_device_info intel_haswell_info = {
        HSW_FEATURES,
        .is_haswell = 1,
-       .is_mobile = 1,
+       .has_l3_dpf = 1,
 };
 
 #define BDW_FEATURES \
        HSW_FEATURES, \
-       BDW_COLORS
+       BDW_COLORS, \
+       .has_logical_ring_contexts = 1
 
-static const struct intel_device_info intel_broadwell_d_info = {
+static const struct intel_device_info intel_broadwell_info = {
        BDW_FEATURES,
        .gen = 8,
        .is_broadwell = 1,
 };
 
-static const struct intel_device_info intel_broadwell_m_info = {
-       BDW_FEATURES,
-       .gen = 8, .is_mobile = 1,
-       .is_broadwell = 1,
-};
-
-static const struct intel_device_info intel_broadwell_gt3d_info = {
+static const struct intel_device_info intel_broadwell_gt3_info = {
        BDW_FEATURES,
        .gen = 8,
        .is_broadwell = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
-static const struct intel_device_info intel_broadwell_gt3m_info = {
-       BDW_FEATURES,
-       .gen = 8, .is_mobile = 1,
-       .is_broadwell = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
-
 static const struct intel_device_info intel_cherryview_info = {
        .gen = 8, .num_pipes = 3,
-       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .is_cherryview = 1,
+       .has_psr = 1,
+       .has_runtime_pm = 1,
+       .has_resource_streamer = 1,
+       .has_rc6 = 1,
+       .has_gmbus_irq = 1,
+       .has_hw_contexts = 1,
+       .has_logical_ring_contexts = 1,
+       .has_gmch_display = 1,
        .display_mmio_offset = VLV_DISPLAY_BASE,
        GEN_CHV_PIPEOFFSETS,
        CURSOR_OFFSETS,
@@ -322,25 +326,41 @@ static const struct intel_device_info intel_skylake_info = {
        BDW_FEATURES,
        .is_skylake = 1,
        .gen = 9,
+       .has_csr = 1,
+       .has_guc = 1,
+       .ddb_size = 896,
 };
 
 static const struct intel_device_info intel_skylake_gt3_info = {
        BDW_FEATURES,
        .is_skylake = 1,
        .gen = 9,
+       .has_csr = 1,
+       .has_guc = 1,
+       .ddb_size = 896,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
 static const struct intel_device_info intel_broxton_info = {
        .is_broxton = 1,
        .gen = 9,
-       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .num_pipes = 3,
        .has_ddi = 1,
        .has_fpga_dbg = 1,
        .has_fbc = 1,
+       .has_runtime_pm = 1,
        .has_pooled_eu = 0,
+       .has_csr = 1,
+       .has_resource_streamer = 1,
+       .has_rc6 = 1,
+       .has_dp_mst = 1,
+       .has_gmbus_irq = 1,
+       .has_hw_contexts = 1,
+       .has_logical_ring_contexts = 1,
+       .has_guc = 1,
+       .ddb_size = 512,
        GEN_DEFAULT_PIPEOFFSETS,
        IVB_CURSOR_OFFSETS,
        BDW_COLORS,
@@ -350,12 +370,18 @@ static const struct intel_device_info intel_kabylake_info = {
        BDW_FEATURES,
        .is_kabylake = 1,
        .gen = 9,
+       .has_csr = 1,
+       .has_guc = 1,
+       .ddb_size = 896,
 };
 
 static const struct intel_device_info intel_kabylake_gt3_info = {
        BDW_FEATURES,
        .is_kabylake = 1,
        .gen = 9,
+       .has_csr = 1,
+       .has_guc = 1,
+       .ddb_size = 896,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
@@ -387,14 +413,10 @@ static const struct pci_device_id pciidlist[] = {
        INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
        INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
        INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
-       INTEL_HSW_D_IDS(&intel_haswell_d_info),
-       INTEL_HSW_M_IDS(&intel_haswell_m_info),
-       INTEL_VLV_M_IDS(&intel_valleyview_m_info),
-       INTEL_VLV_D_IDS(&intel_valleyview_d_info),
-       INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
-       INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
-       INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
-       INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
+       INTEL_HSW_IDS(&intel_haswell_info),
+       INTEL_VLV_IDS(&intel_valleyview_info),
+       INTEL_BDW_GT12_IDS(&intel_broadwell_info),
+       INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
        INTEL_CHV_IDS(&intel_cherryview_info),
        INTEL_SKL_GT1_IDS(&intel_skylake_info),
        INTEL_SKL_GT2_IDS(&intel_skylake_info),
index d4adf28..70d9616 100644 (file)
@@ -7067,7 +7067,7 @@ enum {
 #define VLV_RCEDATA                            _MMIO(0xA0BC)
 #define GEN6_RC6pp_THRESHOLD                   _MMIO(0xA0C0)
 #define GEN6_PMINTRMSK                         _MMIO(0xA168)
-#define   GEN8_PMINTR_REDIRECT_TO_NON_DISP     (1<<31)
+#define   GEN8_PMINTR_REDIRECT_TO_GUC            (1<<31)
 #define GEN8_MISC_CTRL0                                _MMIO(0xA180)
 #define VLV_PWRDWNUPCTL                                _MMIO(0xA294)
 #define GEN9_MEDIA_PG_IDLE_HYSTERESIS          _MMIO(0xA0C4)
@@ -7123,6 +7123,15 @@ enum {
 
 #define GEN6_PCODE_MAILBOX                     _MMIO(0x138124)
 #define   GEN6_PCODE_READY                     (1<<31)
+#define   GEN6_PCODE_ERROR_MASK                        0xFF
+#define     GEN6_PCODE_SUCCESS                 0x0
+#define     GEN6_PCODE_ILLEGAL_CMD             0x1
+#define     GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
+#define     GEN6_PCODE_TIMEOUT                 0x3
+#define     GEN6_PCODE_UNIMPLEMENTED_CMD       0xFF
+#define     GEN7_PCODE_TIMEOUT                 0x2
+#define     GEN7_PCODE_ILLEGAL_DATA            0x3
+#define     GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
 #define          GEN6_PCODE_WRITE_RC6VIDS              0x4
 #define          GEN6_PCODE_READ_RC6VIDS               0x5
 #define     GEN6_ENCODE_RC6_VID(mv)            (((mv) - 245) / 5)
@@ -7144,6 +7153,10 @@ enum {
 #define   HSW_PCODE_DE_WRITE_FREQ_REQ          0x17
 #define   DISPLAY_IPS_CONTROL                  0x19
 #define          HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL  0x1A
+#define   GEN9_PCODE_SAGV_CONTROL              0x21
+#define     GEN9_SAGV_DISABLE                  0x0
+#define     GEN9_SAGV_IS_DISABLED              0x1
+#define     GEN9_SAGV_ENABLE                   0x3
 #define GEN6_PCODE_DATA                                _MMIO(0x138128)
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT       8
 #define   GEN6_PCODE_FREQ_RING_RATIO_SHIFT     16
index 4f27277..a0af170 100644 (file)
@@ -63,6 +63,7 @@ static void i915_restore_display(struct drm_device *dev)
 int i915_save_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        int i;
 
        mutex_lock(&dev->struct_mutex);
@@ -70,7 +71,7 @@ int i915_save_state(struct drm_device *dev)
        i915_save_display(dev);
 
        if (IS_GEN4(dev))
-               pci_read_config_word(dev->pdev, GCDGMBUS,
+               pci_read_config_word(pdev, GCDGMBUS,
                                     &dev_priv->regfile.saveGCDGMBUS);
 
        /* Cache mode state */
@@ -108,6 +109,7 @@ int i915_save_state(struct drm_device *dev)
 int i915_restore_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        int i;
 
        mutex_lock(&dev->struct_mutex);
@@ -115,7 +117,7 @@ int i915_restore_state(struct drm_device *dev)
        i915_gem_restore_fences(dev);
 
        if (IS_GEN4(dev))
-               pci_write_config_word(dev->pdev, GCDGMBUS,
+               pci_write_config_word(pdev, GCDGMBUS,
                                      dev_priv->regfile.saveGCDGMBUS);
        i915_restore_display(dev);
 
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
new file mode 100644 (file)
index 0000000..1e5cbc5
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+ * (C) Copyright 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/slab.h>
+#include <linux/fence.h>
+#include <linux/reservation.h>
+
+#include "i915_sw_fence.h"
+
+static DEFINE_SPINLOCK(i915_sw_fence_lock);
+
+static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
+                                 enum i915_sw_fence_notify state)
+{
+       i915_sw_fence_notify_t fn;
+
+       fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK);
+       return fn(fence, state);
+}
+
+static void i915_sw_fence_free(struct kref *kref)
+{
+       struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref);
+
+       WARN_ON(atomic_read(&fence->pending) > 0);
+
+       if (fence->flags & I915_SW_FENCE_MASK)
+               __i915_sw_fence_notify(fence, FENCE_FREE);
+       else
+               kfree(fence);
+}
+
+static void i915_sw_fence_put(struct i915_sw_fence *fence)
+{
+       kref_put(&fence->kref, i915_sw_fence_free);
+}
+
+static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence)
+{
+       kref_get(&fence->kref);
+       return fence;
+}
+
+static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
+                                       struct list_head *continuation)
+{
+       wait_queue_head_t *x = &fence->wait;
+       wait_queue_t *pos, *next;
+       unsigned long flags;
+
+       atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
+
+       /*
+        * To prevent unbounded recursion as we traverse the graph of
+        * i915_sw_fences, we move the task_list from this, the next ready
+        * fence, to the tail of the original fence's task_list
+        * (and so added to the list to be woken).
+        */
+
+       spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
+       if (continuation) {
+               list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
+                       if (pos->func == autoremove_wake_function)
+                               pos->func(pos, TASK_NORMAL, 0, continuation);
+                       else
+                               list_move_tail(&pos->task_list, continuation);
+               }
+       } else {
+               LIST_HEAD(extra);
+
+               do {
+                       list_for_each_entry_safe(pos, next,
+                                                &x->task_list, task_list)
+                               pos->func(pos, TASK_NORMAL, 0, &extra);
+
+                       if (list_empty(&extra))
+                               break;
+
+                       list_splice_tail_init(&extra, &x->task_list);
+               } while (1);
+       }
+       spin_unlock_irqrestore(&x->lock, flags);
+}
+
+static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
+                                    struct list_head *continuation)
+{
+       if (!atomic_dec_and_test(&fence->pending))
+               return;
+
+       if (fence->flags & I915_SW_FENCE_MASK &&
+           __i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
+               return;
+
+       __i915_sw_fence_wake_up_all(fence, continuation);
+}
+
+static void i915_sw_fence_complete(struct i915_sw_fence *fence)
+{
+       if (WARN_ON(i915_sw_fence_done(fence)))
+               return;
+
+       __i915_sw_fence_complete(fence, NULL);
+}
+
+static void i915_sw_fence_await(struct i915_sw_fence *fence)
+{
+       WARN_ON(atomic_inc_return(&fence->pending) <= 1);
+}
+
+void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn)
+{
+       BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
+
+       init_waitqueue_head(&fence->wait);
+       kref_init(&fence->kref);
+       atomic_set(&fence->pending, 1);
+       fence->flags = (unsigned long)fn;
+}
+
+void i915_sw_fence_commit(struct i915_sw_fence *fence)
+{
+       i915_sw_fence_complete(fence);
+       i915_sw_fence_put(fence);
+}
+
+static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
+{
+       list_del(&wq->task_list);
+       __i915_sw_fence_complete(wq->private, key);
+       i915_sw_fence_put(wq->private);
+       return 0;
+}
+
+static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
+                                   const struct i915_sw_fence * const signaler)
+{
+       wait_queue_t *wq;
+
+       if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
+               return false;
+
+       if (fence == signaler)
+               return true;
+
+       list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+               if (wq->func != i915_sw_fence_wake)
+                       continue;
+
+               if (__i915_sw_fence_check_if_after(wq->private, signaler))
+                       return true;
+       }
+
+       return false;
+}
+
+static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
+{
+       wait_queue_t *wq;
+
+       if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
+               return;
+
+       list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+               if (wq->func != i915_sw_fence_wake)
+                       continue;
+
+               __i915_sw_fence_clear_checked_bit(wq->private);
+       }
+}
+
+static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
+                                 const struct i915_sw_fence * const signaler)
+{
+       unsigned long flags;
+       bool err;
+
+       if (!IS_ENABLED(CONFIG_I915_SW_FENCE_CHECK_DAG))
+               return false;
+
+       spin_lock_irqsave(&i915_sw_fence_lock, flags);
+       err = __i915_sw_fence_check_if_after(fence, signaler);
+       __i915_sw_fence_clear_checked_bit(fence);
+       spin_unlock_irqrestore(&i915_sw_fence_lock, flags);
+
+       return err;
+}
+
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+                                struct i915_sw_fence *signaler,
+                                wait_queue_t *wq)
+{
+       unsigned long flags;
+       int pending;
+
+       if (i915_sw_fence_done(signaler))
+               return 0;
+
+       /* The dependency graph must be acyclic. */
+       if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
+               return -EINVAL;
+
+       INIT_LIST_HEAD(&wq->task_list);
+       wq->flags = 0;
+       wq->func = i915_sw_fence_wake;
+       wq->private = i915_sw_fence_get(fence);
+
+       i915_sw_fence_await(fence);
+
+       spin_lock_irqsave(&signaler->wait.lock, flags);
+       if (likely(!i915_sw_fence_done(signaler))) {
+               __add_wait_queue_tail(&signaler->wait, wq);
+               pending = 1;
+       } else {
+               i915_sw_fence_wake(wq, 0, 0, NULL);
+               pending = 0;
+       }
+       spin_unlock_irqrestore(&signaler->wait.lock, flags);
+
+       return pending;
+}
+
+struct dma_fence_cb {
+       struct fence_cb base;
+       struct i915_sw_fence *fence;
+       struct fence *dma;
+       struct timer_list timer;
+};
+
+static void timer_i915_sw_fence_wake(unsigned long data)
+{
+       struct dma_fence_cb *cb = (struct dma_fence_cb *)data;
+
+       printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
+              cb->dma->ops->get_driver_name(cb->dma),
+              cb->dma->ops->get_timeline_name(cb->dma),
+              cb->dma->seqno);
+       fence_put(cb->dma);
+       cb->dma = NULL;
+
+       i915_sw_fence_commit(cb->fence);
+       cb->timer.function = NULL;
+}
+
+static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data)
+{
+       struct dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+
+       del_timer_sync(&cb->timer);
+       if (cb->timer.function)
+               i915_sw_fence_commit(cb->fence);
+       fence_put(cb->dma);
+
+       kfree(cb);
+}
+
+int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+                                 struct fence *dma,
+                                 unsigned long timeout,
+                                 gfp_t gfp)
+{
+       struct dma_fence_cb *cb;
+       int ret;
+
+       if (fence_is_signaled(dma))
+               return 0;
+
+       cb = kmalloc(sizeof(*cb), gfp);
+       if (!cb) {
+               if (!gfpflags_allow_blocking(gfp))
+                       return -ENOMEM;
+
+               return fence_wait(dma, false);
+       }
+
+       cb->fence = i915_sw_fence_get(fence);
+       i915_sw_fence_await(fence);
+
+       cb->dma = NULL;
+       __setup_timer(&cb->timer,
+                     timer_i915_sw_fence_wake, (unsigned long)cb,
+                     TIMER_IRQSAFE);
+       if (timeout) {
+               cb->dma = fence_get(dma);
+               mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
+       }
+
+       ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+       if (ret == 0) {
+               ret = 1;
+       } else {
+               dma_i915_sw_fence_wake(dma, &cb->base);
+               if (ret == -ENOENT) /* fence already signaled */
+                       ret = 0;
+       }
+
+       return ret;
+}
+
+int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
+                                   struct reservation_object *resv,
+                                   const struct fence_ops *exclude,
+                                   bool write,
+                                   unsigned long timeout,
+                                   gfp_t gfp)
+{
+       struct fence *excl;
+       int ret = 0, pending;
+
+       if (write) {
+               struct fence **shared;
+               unsigned int count, i;
+
+               ret = reservation_object_get_fences_rcu(resv,
+                                                       &excl, &count, &shared);
+               if (ret)
+                       return ret;
+
+               for (i = 0; i < count; i++) {
+                       if (shared[i]->ops == exclude)
+                               continue;
+
+                       pending = i915_sw_fence_await_dma_fence(fence,
+                                                               shared[i],
+                                                               timeout,
+                                                               gfp);
+                       if (pending < 0) {
+                               ret = pending;
+                               break;
+                       }
+
+                       ret |= pending;
+               }
+
+               for (i = 0; i < count; i++)
+                       fence_put(shared[i]);
+               kfree(shared);
+       } else {
+               excl = reservation_object_get_excl_rcu(resv);
+       }
+
+       if (ret >= 0 && excl && excl->ops != exclude) {
+               pending = i915_sw_fence_await_dma_fence(fence,
+                                                       excl,
+                                                       timeout,
+                                                       gfp);
+               if (pending < 0)
+                       ret = pending;
+               else
+                       ret |= pending;
+       }
+
+       fence_put(excl);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
new file mode 100644 (file)
index 0000000..3731416
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * i915_sw_fence.h - library routines for N:M synchronisation points
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#ifndef _I915_SW_FENCE_H_
+#define _I915_SW_FENCE_H_
+
+#include <linux/gfp.h>
+#include <linux/kref.h>
+#include <linux/notifier.h> /* for NOTIFY_DONE */
+#include <linux/wait.h>
+
+struct completion;
+struct fence;
+struct fence_ops;
+struct reservation_object;
+
+struct i915_sw_fence {
+       wait_queue_head_t wait;
+       unsigned long flags;
+       struct kref kref;
+       atomic_t pending;
+};
+
+#define I915_SW_FENCE_CHECKED_BIT      0 /* used internally for DAG checking */
+#define I915_SW_FENCE_PRIVATE_BIT      1 /* available for use by owner */
+#define I915_SW_FENCE_MASK             (~3)
+
+enum i915_sw_fence_notify {
+       FENCE_COMPLETE,
+       FENCE_FREE
+};
+
+typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
+                                     enum i915_sw_fence_notify state);
+#define __i915_sw_fence_call __aligned(4)
+
+void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn);
+void i915_sw_fence_commit(struct i915_sw_fence *fence);
+
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+                                struct i915_sw_fence *after,
+                                wait_queue_t *wq);
+int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+                                 struct fence *dma,
+                                 unsigned long timeout,
+                                 gfp_t gfp);
+int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
+                                   struct reservation_object *resv,
+                                   const struct fence_ops *exclude,
+                                   bool write,
+                                   unsigned long timeout,
+                                   gfp_t gfp);
+
+static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
+{
+       return atomic_read(&fence->pending) < 0;
+}
+
+#endif /* _I915_SW_FENCE_H_ */
index f1ffde7..1012eee 100644 (file)
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-#define dev_to_drm_minor(d) dev_get_drvdata((d))
+static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
+{
+       struct drm_minor *minor = dev_get_drvdata(kdev);
+       return to_i915(minor->dev);
+}
 
 #ifdef CONFIG_PM
-static u32 calc_residency(struct drm_device *dev,
+static u32 calc_residency(struct drm_i915_private *dev_priv,
                          i915_reg_t reg)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        u64 raw_time; /* 32b value may overflow during fixed point math */
        u64 units = 128ULL, div = 100000ULL;
        u32 ret;
@@ -49,13 +52,13 @@ static u32 calc_residency(struct drm_device *dev,
        intel_runtime_pm_get(dev_priv);
 
        /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                units = 1;
                div = dev_priv->czclk_freq;
 
                if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
                        units <<= 8;
-       } else if (IS_BROXTON(dev)) {
+       } else if (IS_BROXTON(dev_priv)) {
                units = 1;
                div = 1200;             /* 833.33ns */
        }
@@ -76,32 +79,32 @@ show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 static ssize_t
 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = dev_get_drvdata(kdev);
-       u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
        return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
 
 static ssize_t
 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = dev_to_drm_minor(kdev);
-       u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
        return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
 }
 
 static ssize_t
 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = dev_to_drm_minor(kdev);
-       u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
        return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
 }
 
 static ssize_t
 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = dev_get_drvdata(kdev);
-       u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
        return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
 
@@ -144,9 +147,9 @@ static struct attribute_group media_rc6_attr_group = {
 };
 #endif
 
-static int l3_access_valid(struct drm_device *dev, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
 {
-       if (!HAS_L3_DPF(dev))
+       if (!HAS_L3_DPF(dev_priv))
                return -EPERM;
 
        if (offset % 4 != 0)
@@ -163,22 +166,21 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
             struct bin_attribute *attr, char *buf,
             loff_t offset, size_t count)
 {
-       struct device *dev = kobj_to_dev(kobj);
-       struct drm_minor *dminor = dev_to_drm_minor(dev);
-       struct drm_device *drm_dev = dminor->dev;
-       struct drm_i915_private *dev_priv = to_i915(drm_dev);
+       struct device *kdev = kobj_to_dev(kobj);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct drm_device *dev = &dev_priv->drm;
        int slice = (int)(uintptr_t)attr->private;
        int ret;
 
        count = round_down(count, 4);
 
-       ret = l3_access_valid(drm_dev, offset);
+       ret = l3_access_valid(dev_priv, offset);
        if (ret)
                return ret;
 
        count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
 
-       ret = i915_mutex_lock_interruptible(drm_dev);
+       ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
@@ -189,7 +191,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
        else
                memset(buf, 0, count);
 
-       mutex_unlock(&drm_dev->struct_mutex);
+       mutex_unlock(&dev->struct_mutex);
 
        return count;
 }
@@ -199,30 +201,29 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
              struct bin_attribute *attr, char *buf,
              loff_t offset, size_t count)
 {
-       struct device *dev = kobj_to_dev(kobj);
-       struct drm_minor *dminor = dev_to_drm_minor(dev);
-       struct drm_device *drm_dev = dminor->dev;
-       struct drm_i915_private *dev_priv = to_i915(drm_dev);
+       struct device *kdev = kobj_to_dev(kobj);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct drm_device *dev = &dev_priv->drm;
        struct i915_gem_context *ctx;
        u32 *temp = NULL; /* Just here to make handling failures easy */
        int slice = (int)(uintptr_t)attr->private;
        int ret;
 
-       if (!HAS_HW_CONTEXTS(drm_dev))
+       if (!HAS_HW_CONTEXTS(dev_priv))
                return -ENXIO;
 
-       ret = l3_access_valid(drm_dev, offset);
+       ret = l3_access_valid(dev_priv, offset);
        if (ret)
                return ret;
 
-       ret = i915_mutex_lock_interruptible(drm_dev);
+       ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
        if (!dev_priv->l3_parity.remap_info[slice]) {
                temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
                if (!temp) {
-                       mutex_unlock(&drm_dev->struct_mutex);
+                       mutex_unlock(&dev->struct_mutex);
                        return -ENOMEM;
                }
        }
@@ -240,7 +241,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
        list_for_each_entry(ctx, &dev_priv->context_list, link)
                ctx->remap_slice |= (1<<slice);
 
-       mutex_unlock(&drm_dev->struct_mutex);
+       mutex_unlock(&dev->struct_mutex);
 
        return count;
 }
@@ -266,9 +267,7 @@ static struct bin_attribute dpf_attrs_1 = {
 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
                                    struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
        int ret;
 
        intel_runtime_pm_get(dev_priv);
@@ -298,9 +297,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
                                    struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
@@ -309,8 +306,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 
 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_i915_private *dev_priv = to_i915(minor->dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
@@ -321,9 +317,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
                                       struct device_attribute *attr,
                                       const char *buf, size_t count)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
        u32 val;
        ssize_t ret;
 
@@ -346,9 +340,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
                                     struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
@@ -357,9 +349,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
@@ -370,9 +360,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
                                     struct device_attribute *attr,
                                     const char *buf, size_t count)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
        u32 val;
        ssize_t ret;
 
@@ -418,9 +406,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 
 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        intel_gpu_freq(dev_priv,
@@ -431,9 +417,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
                                     struct device_attribute *attr,
                                     const char *buf, size_t count)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
        u32 val;
        ssize_t ret;
 
@@ -490,9 +474,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 /* For now we have a static number of RP states */
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
        u32 val;
 
        if (attr == &dev_attr_gt_RP0_freq_mhz)
@@ -538,8 +520,8 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 {
 
        struct device *kdev = kobj_to_dev(kobj);
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+       struct drm_device *dev = &dev_priv->drm;
        struct i915_error_state_file_priv error_priv;
        struct drm_i915_error_state_buf error_str;
        ssize_t ret_count = 0;
@@ -573,18 +555,10 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
                                 loff_t off, size_t count)
 {
        struct device *kdev = kobj_to_dev(kobj);
-       struct drm_minor *minor = dev_to_drm_minor(kdev);
-       struct drm_device *dev = minor->dev;
-       int ret;
+       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
        DRM_DEBUG_DRIVER("Resetting error state\n");
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       i915_destroy_error_state(dev);
-       mutex_unlock(&dev->struct_mutex);
+       i915_destroy_error_state(&dev_priv->drm);
 
        return count;
 }
@@ -597,37 +571,38 @@ static struct bin_attribute error_state_attr = {
        .write = error_state_write,
 };
 
-void i915_setup_sysfs(struct drm_device *dev)
+void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 {
+       struct device *kdev = dev_priv->drm.primary->kdev;
        int ret;
 
 #ifdef CONFIG_PM
-       if (HAS_RC6(dev)) {
-               ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+       if (HAS_RC6(dev_priv)) {
+               ret = sysfs_merge_group(&kdev->kobj,
                                        &rc6_attr_group);
                if (ret)
                        DRM_ERROR("RC6 residency sysfs setup failed\n");
        }
-       if (HAS_RC6p(dev)) {
-               ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+       if (HAS_RC6p(dev_priv)) {
+               ret = sysfs_merge_group(&kdev->kobj,
                                        &rc6p_attr_group);
                if (ret)
                        DRM_ERROR("RC6p residency sysfs setup failed\n");
        }
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-               ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               ret = sysfs_merge_group(&kdev->kobj,
                                        &media_rc6_attr_group);
                if (ret)
                        DRM_ERROR("Media RC6 residency sysfs setup failed\n");
        }
 #endif
-       if (HAS_L3_DPF(dev)) {
-               ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
+       if (HAS_L3_DPF(dev_priv)) {
+               ret = device_create_bin_file(kdev, &dpf_attrs);
                if (ret)
                        DRM_ERROR("l3 parity sysfs setup failed\n");
 
-               if (NUM_L3_SLICES(dev) > 1) {
-                       ret = device_create_bin_file(dev->primary->kdev,
+               if (NUM_L3_SLICES(dev_priv) > 1) {
+                       ret = device_create_bin_file(kdev,
                                                     &dpf_attrs_1);
                        if (ret)
                                DRM_ERROR("l3 parity slice 1 setup failed\n");
@@ -635,30 +610,32 @@ void i915_setup_sysfs(struct drm_device *dev)
        }
 
        ret = 0;
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-               ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
-       else if (INTEL_INFO(dev)->gen >= 6)
-               ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
+       else if (INTEL_GEN(dev_priv) >= 6)
+               ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
        if (ret)
                DRM_ERROR("RPS sysfs setup failed\n");
 
-       ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
+       ret = sysfs_create_bin_file(&kdev->kobj,
                                    &error_state_attr);
        if (ret)
                DRM_ERROR("error_state sysfs setup failed\n");
 }
 
-void i915_teardown_sysfs(struct drm_device *dev)
+void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
 {
-       sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-               sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
+       struct device *kdev = dev_priv->drm.primary->kdev;
+
+       sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               sysfs_remove_files(&kdev->kobj, vlv_attrs);
        else
-               sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
-       device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
-       device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
+               sysfs_remove_files(&kdev->kobj, gen6_attrs);
+       device_remove_bin_file(kdev,  &dpf_attrs_1);
+       device_remove_bin_file(kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
-       sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
-       sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
+       sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
+       sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
 #endif
 }
index ca2e912..dae340c 100644 (file)
@@ -65,9 +65,6 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
 
        BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
-       if (!IS_HASWELL(dev_priv))
-               return;
-
        magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
        if (magic != VGT_MAGIC)
                return;
index 85389cd..6c70a5b 100644 (file)
@@ -359,9 +359,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-       struct intel_digital_port *intel_dig_port =
-               enc_to_dig_port(&encoder->base);
-       enum port port = intel_dig_port->port;
+       enum port port = enc_to_dig_port(&encoder->base)->port;
        enum pipe pipe = intel_crtc->pipe;
        uint32_t tmp, eldv;
        i915_reg_t aud_config, aud_cntrl_st2;
@@ -407,13 +405,10 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
 {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-       struct intel_digital_port *intel_dig_port =
-               enc_to_dig_port(&encoder->base);
-       enum port port = intel_dig_port->port;
+       enum port port = enc_to_dig_port(&encoder->base)->port;
        enum pipe pipe = intel_crtc->pipe;
        uint8_t *eld = connector->eld;
-       uint32_t eldv;
-       uint32_t tmp;
+       uint32_t tmp, eldv;
        int len, i;
        i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
 
@@ -581,26 +576,26 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
        }
 }
 
-static void i915_audio_component_get_power(struct device *dev)
+static void i915_audio_component_get_power(struct device *kdev)
 {
-       intel_display_power_get(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+       intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
 }
 
-static void i915_audio_component_put_power(struct device *dev)
+static void i915_audio_component_put_power(struct device *kdev)
 {
-       intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+       intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
 }
 
-static void i915_audio_component_codec_wake_override(struct device *dev,
+static void i915_audio_component_codec_wake_override(struct device *kdev,
                                                     bool enable)
 {
-       struct drm_i915_private *dev_priv = dev_to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        u32 tmp;
 
        if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
                return;
 
-       i915_audio_component_get_power(dev);
+       i915_audio_component_get_power(kdev);
 
        /*
         * Enable/disable generating the codec wake signal, overriding the
@@ -618,13 +613,13 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
                usleep_range(1000, 1500);
        }
 
-       i915_audio_component_put_power(dev);
+       i915_audio_component_put_power(kdev);
 }
 
 /* Get CDCLK in kHz  */
-static int i915_audio_component_get_cdclk_freq(struct device *dev)
+static int i915_audio_component_get_cdclk_freq(struct device *kdev)
 {
-       struct drm_i915_private *dev_priv = dev_to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 
        if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
                return -ENODEV;
@@ -632,10 +627,10 @@ static int i915_audio_component_get_cdclk_freq(struct device *dev)
        return dev_priv->cdclk_freq;
 }
 
-static int i915_audio_component_sync_audio_rate(struct device *dev,
+static int i915_audio_component_sync_audio_rate(struct device *kdev,
                                                int port, int rate)
 {
-       struct drm_i915_private *dev_priv = dev_to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        struct intel_encoder *intel_encoder;
        struct intel_crtc *crtc;
        struct drm_display_mode *mode;
@@ -652,7 +647,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
            !IS_HASWELL(dev_priv))
                return 0;
 
-       i915_audio_component_get_power(dev);
+       i915_audio_component_get_power(kdev);
        mutex_lock(&dev_priv->av_mutex);
        /* 1. get the pipe */
        intel_encoder = dev_priv->dig_port_map[port];
@@ -703,15 +698,15 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
 
  unlock:
        mutex_unlock(&dev_priv->av_mutex);
-       i915_audio_component_put_power(dev);
+       i915_audio_component_put_power(kdev);
        return err;
 }
 
-static int i915_audio_component_get_eld(struct device *dev, int port,
+static int i915_audio_component_get_eld(struct device *kdev, int port,
                                        bool *enabled,
                                        unsigned char *buf, int max_bytes)
 {
-       struct drm_i915_private *dev_priv = dev_to_i915(dev);
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
        struct intel_encoder *intel_encoder;
        struct intel_digital_port *intel_dig_port;
        const u8 *eld;
@@ -745,11 +740,11 @@ static const struct i915_audio_component_ops i915_audio_component_ops = {
        .get_eld        = i915_audio_component_get_eld,
 };
 
-static int i915_audio_component_bind(struct device *i915_dev,
-                                    struct device *hda_dev, void *data)
+static int i915_audio_component_bind(struct device *i915_kdev,
+                                    struct device *hda_kdev, void *data)
 {
        struct i915_audio_component *acomp = data;
-       struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+       struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
        int i;
 
        if (WARN_ON(acomp->ops || acomp->dev))
@@ -757,7 +752,7 @@ static int i915_audio_component_bind(struct device *i915_dev,
 
        drm_modeset_lock_all(&dev_priv->drm);
        acomp->ops = &i915_audio_component_ops;
-       acomp->dev = i915_dev;
+       acomp->dev = i915_kdev;
        BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
        for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
                acomp->aud_sample_rate[i] = 0;
@@ -767,11 +762,11 @@ static int i915_audio_component_bind(struct device *i915_dev,
        return 0;
 }
 
-static void i915_audio_component_unbind(struct device *i915_dev,
-                                       struct device *hda_dev, void *data)
+static void i915_audio_component_unbind(struct device *i915_kdev,
+                                       struct device *hda_kdev, void *data)
 {
        struct i915_audio_component *acomp = data;
-       struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+       struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
 
        drm_modeset_lock_all(&dev_priv->drm);
        acomp->ops = NULL;
index 2491e4c..9bad14d 100644 (file)
@@ -462,7 +462,10 @@ static int intel_breadcrumbs_signaler(void *arg)
                         */
                        intel_engine_remove_wait(engine,
                                                 &request->signaling.wait);
+
+                       local_bh_disable();
                        fence_signal(&request->fence);
+                       local_bh_enable(); /* kick start the tasklets */
 
                        /* Find the next oldest signal. Note that as we have
                         * not been holding the lock, another client may
index bc0fef3..95a7277 100644 (file)
@@ -100,13 +100,14 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int i, pipe = intel_crtc->pipe;
        uint16_t coeffs[9] = { 0, };
+       struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
 
        if (crtc_state->ctm) {
                struct drm_color_ctm *ctm =
                        (struct drm_color_ctm *)crtc_state->ctm->data;
                uint64_t input[9] = { 0, };
 
-               if (intel_crtc->config->limited_color_range) {
+               if (intel_crtc_state->limited_color_range) {
                        ctm_mult_by_limited(input, ctm->matrix);
                } else {
                        for (i = 0; i < ARRAY_SIZE(input); i++)
@@ -158,7 +159,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
                 * into consideration.
                 */
                for (i = 0; i < 3; i++) {
-                       if (intel_crtc->config->limited_color_range)
+                       if (intel_crtc_state->limited_color_range)
                                coeffs[i * 3 + i] =
                                        I9XX_CSC_COEFF_LIMITED_RANGE;
                        else
@@ -182,7 +183,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
        if (INTEL_INFO(dev)->gen > 6) {
                uint16_t postoff = 0;
 
-               if (intel_crtc->config->limited_color_range)
+               if (intel_crtc_state->limited_color_range)
                        postoff = (16 * (1 << 12) / 255) & 0x1fff;
 
                I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -193,7 +194,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
        } else {
                uint32_t mode = CSC_MODE_YUV_TO_RGB;
 
-               if (intel_crtc->config->limited_color_range)
+               if (intel_crtc_state->limited_color_range)
                        mode |= CSC_BLACK_SCREEN_OFFSET;
 
                I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -263,7 +264,8 @@ void intel_color_set_csc(struct drm_crtc_state *crtc_state)
 
 /* Loads the legacy palette/gamma unit for the CRTC. */
 static void i9xx_load_luts_internal(struct drm_crtc *crtc,
-                                   struct drm_property_blob *blob)
+                                   struct drm_property_blob *blob,
+                                   struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -272,7 +274,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
        int i;
 
        if (HAS_GMCH_DISPLAY(dev)) {
-               if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI))
+               if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
                        assert_pll_enabled(dev_priv, pipe);
@@ -305,7 +307,8 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
 
 static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
 {
-       i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut);
+       i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut,
+                               to_intel_crtc_state(crtc_state));
 }
 
 /* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
@@ -323,7 +326,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
         * Workaround : Do not read or write the pipe palette/gamma data while
         * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
         */
-       if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
+       if (IS_HASWELL(dev) && intel_crtc_state->ips_enabled &&
            (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
                hsw_disable_ips(intel_crtc);
                reenable_ips = true;
@@ -436,7 +439,8 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
                /* Turn off degamma/gamma on CGM block. */
                I915_WRITE(CGM_PIPE_MODE(pipe),
                           (state->ctm ? CGM_PIPE_MODE_CSC : 0));
-               i9xx_load_luts_internal(crtc, state->gamma_lut);
+               i9xx_load_luts_internal(crtc, state->gamma_lut,
+                                       to_intel_crtc_state(state));
                return;
        }
 
@@ -479,7 +483,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
         * Also program a linear LUT in the legacy block (behind the
         * CGM block).
         */
-       i9xx_load_luts_internal(crtc, NULL);
+       i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state));
 }
 
 void intel_color_load_luts(struct drm_crtc_state *crtc_state)
index 827b6ef..dfbcf16 100644 (file)
@@ -143,13 +143,15 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
 
 /* Note: The caller is required to filter out dpms modes not supported by the
  * platform. */
-static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
+static void intel_crt_set_dpms(struct intel_encoder *encoder,
+                              struct intel_crtc_state *crtc_state,
+                              int mode)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
        u32 adpa;
 
        if (INTEL_INFO(dev)->gen >= 5)
@@ -193,23 +195,45 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
        I915_WRITE(crt->adpa_reg, adpa);
 }
 
-static void intel_disable_crt(struct intel_encoder *encoder)
+static void intel_disable_crt(struct intel_encoder *encoder,
+                             struct intel_crtc_state *old_crtc_state,
+                             struct drm_connector_state *old_conn_state)
 {
-       intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+       intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
 }
 
-static void pch_disable_crt(struct intel_encoder *encoder)
+static void pch_disable_crt(struct intel_encoder *encoder,
+                           struct intel_crtc_state *old_crtc_state,
+                           struct drm_connector_state *old_conn_state)
 {
 }
 
-static void pch_post_disable_crt(struct intel_encoder *encoder)
+static void pch_post_disable_crt(struct intel_encoder *encoder,
+                                struct intel_crtc_state *old_crtc_state,
+                                struct drm_connector_state *old_conn_state)
 {
-       intel_disable_crt(encoder);
+       intel_disable_crt(encoder, old_crtc_state, old_conn_state);
 }
 
-static void intel_enable_crt(struct intel_encoder *encoder)
+static void hsw_post_disable_crt(struct intel_encoder *encoder,
+                                struct intel_crtc_state *old_crtc_state,
+                                struct drm_connector_state *old_conn_state)
 {
-       intel_crt_set_dpms(encoder, DRM_MODE_DPMS_ON);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+       pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
+
+       lpt_disable_pch_transcoder(dev_priv);
+       lpt_disable_iclkip(dev_priv);
+
+       intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder,
+                            struct intel_crtc_state *pipe_config,
+                            struct drm_connector_state *conn_state)
+{
+       intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
 }
 
 static enum drm_mode_status
@@ -253,7 +277,8 @@ intel_crt_mode_valid(struct drm_connector *connector,
 }
 
 static bool intel_crt_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config)
+                                    struct intel_crtc_state *pipe_config,
+                                    struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
 
@@ -894,6 +919,7 @@ void intel_crt_init(struct drm_device *dev)
        if (HAS_DDI(dev)) {
                crt->base.get_config = hsw_crt_get_config;
                crt->base.get_hw_state = intel_ddi_get_hw_state;
+               crt->base.post_disable = hsw_post_disable_crt;
        } else {
                crt->base.get_config = intel_crt_get_config;
                crt->base.get_hw_state = intel_crt_get_hw_state;
index fb27d18..1ea0e1f 100644 (file)
  * low-power state and comes back to normal.
  */
 
-#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
+#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
 MODULE_FIRMWARE(I915_CSR_KBL);
 #define KBL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 1)
 
-#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
 MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 23)
+#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 26)
 
-#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
 MODULE_FIRMWARE(I915_CSR_BXT);
 #define BXT_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
 
index c2df4e4..15d47c8 100644 (file)
@@ -546,6 +546,27 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
        DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
 }
 
+static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
+{
+       switch (pll->id) {
+       case DPLL_ID_WRPLL1:
+               return PORT_CLK_SEL_WRPLL1;
+       case DPLL_ID_WRPLL2:
+               return PORT_CLK_SEL_WRPLL2;
+       case DPLL_ID_SPLL:
+               return PORT_CLK_SEL_SPLL;
+       case DPLL_ID_LCPLL_810:
+               return PORT_CLK_SEL_LCPLL_810;
+       case DPLL_ID_LCPLL_1350:
+               return PORT_CLK_SEL_LCPLL_1350;
+       case DPLL_ID_LCPLL_2700:
+               return PORT_CLK_SEL_LCPLL_2700;
+       default:
+               MISSING_CASE(pll->id);
+               return PORT_CLK_SEL_NONE;
+       }
+}
+
 /* Starting with Haswell, different DDI ports can work in FDI mode for
  * connection to the PCH-located connectors. For this, it is necessary to train
  * both the DDI port and PCH receiver for the desired DDI buffer settings.
@@ -561,7 +582,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
-       u32 temp, i, rx_ctl_val;
+       u32 temp, i, rx_ctl_val, ddi_pll_sel;
 
        for_each_encoder_on_crtc(dev, crtc, encoder) {
                WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
@@ -592,8 +613,9 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
        I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
 
        /* Configure Port Clock Select */
-       I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel);
-       WARN_ON(intel_crtc->config->ddi_pll_sel != PORT_CLK_SEL_SPLL);
+       ddi_pll_sel = hsw_pll_to_ddi_pll_sel(intel_crtc->config->shared_dpll);
+       I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel);
+       WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL);
 
        /* Start the training iterating through available voltages and emphasis,
         * testing each value twice. */
@@ -870,7 +892,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
        int link_clock = 0;
        uint32_t dpll_ctl1, dpll;
 
-       dpll = pipe_config->ddi_pll_sel;
+       dpll = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
 
        dpll_ctl1 = I915_READ(DPLL_CTRL1);
 
@@ -918,7 +940,7 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
        int link_clock = 0;
        u32 val, pll;
 
-       val = pipe_config->ddi_pll_sel;
+       val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll);
        switch (val & PORT_CLK_SEL_MASK) {
        case PORT_CLK_SEL_LCPLL_810:
                link_clock = 81000;
@@ -1586,13 +1608,15 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
 }
 
 void intel_ddi_clk_select(struct intel_encoder *encoder,
-                         const struct intel_crtc_state *pipe_config)
+                         struct intel_shared_dpll *pll)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = intel_ddi_get_encoder_port(encoder);
 
+       if (WARN_ON(!pll))
+               return;
+
        if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
-               uint32_t dpll = pipe_config->ddi_pll_sel;
                uint32_t val;
 
                /* DDI -> PLL mapping  */
@@ -1600,70 +1624,91 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
 
                val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
                        DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
-               val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
+               val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) |
                        DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
 
                I915_WRITE(DPLL_CTRL2, val);
 
        } else if (INTEL_INFO(dev_priv)->gen < 9) {
-               WARN_ON(pipe_config->ddi_pll_sel == PORT_CLK_SEL_NONE);
-               I915_WRITE(PORT_CLK_SEL(port), pipe_config->ddi_pll_sel);
+               I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
        }
 }
 
-static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+                                   int link_rate, uint32_t lane_count,
+                                   struct intel_shared_dpll *pll,
+                                   bool link_mst)
 {
-       struct drm_encoder *encoder = &intel_encoder->base;
-       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-       struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
-       enum port port = intel_ddi_get_encoder_port(intel_encoder);
-       int type = intel_encoder->type;
-
-       if (type == INTEL_OUTPUT_HDMI) {
-               struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-
-               intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
-       }
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum port port = intel_ddi_get_encoder_port(encoder);
 
-       if (type == INTEL_OUTPUT_EDP) {
-               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       intel_dp_set_link_params(intel_dp, link_rate, lane_count,
+                                link_mst);
+       if (encoder->type == INTEL_OUTPUT_EDP)
                intel_edp_panel_on(intel_dp);
-       }
-
-       intel_ddi_clk_select(intel_encoder, crtc->config);
 
-       if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
-               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-               intel_prepare_dp_ddi_buffers(intel_encoder);
-
-               intel_dp_set_link_params(intel_dp, crtc->config);
+       intel_ddi_clk_select(encoder, pll);
+       intel_prepare_dp_ddi_buffers(encoder);
+       intel_ddi_init_dp_buf_reg(encoder);
+       intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+       intel_dp_start_link_train(intel_dp);
+       if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
+               intel_dp_stop_link_train(intel_dp);
+}
 
-               intel_ddi_init_dp_buf_reg(intel_encoder);
+static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
+                                     bool has_hdmi_sink,
+                                     struct drm_display_mode *adjusted_mode,
+                                     struct intel_shared_dpll *pll)
+{
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct drm_encoder *drm_encoder = &encoder->base;
+       enum port port = intel_ddi_get_encoder_port(encoder);
+       int level = intel_ddi_hdmi_level(dev_priv, port);
 
-               intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
-               intel_dp_start_link_train(intel_dp);
-               if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9)
-                       intel_dp_stop_link_train(intel_dp);
-       } else if (type == INTEL_OUTPUT_HDMI) {
-               struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-               int level = intel_ddi_hdmi_level(dev_priv, port);
+       intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+       intel_ddi_clk_select(encoder, pll);
+       intel_prepare_hdmi_ddi_buffers(encoder);
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+               skl_ddi_set_iboost(encoder, level);
+       else if (IS_BROXTON(dev_priv))
+               bxt_ddi_vswing_sequence(dev_priv, level, port,
+                                       INTEL_OUTPUT_HDMI);
 
-               intel_prepare_hdmi_ddi_buffers(intel_encoder);
+       intel_hdmi->set_infoframes(drm_encoder,
+                                  has_hdmi_sink,
+                                  adjusted_mode);
+}
 
-               if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-                       skl_ddi_set_iboost(intel_encoder, level);
-               else if (IS_BROXTON(dev_priv))
-                       bxt_ddi_vswing_sequence(dev_priv, level, port,
-                                               INTEL_OUTPUT_HDMI);
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
+                                struct intel_crtc_state *pipe_config,
+                                struct drm_connector_state *conn_state)
+{
+       struct drm_encoder *encoder = &intel_encoder->base;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+       int type = intel_encoder->type;
 
-               intel_hdmi->set_infoframes(encoder,
-                                          crtc->config->has_hdmi_sink,
-                                          &crtc->config->base.adjusted_mode);
+       if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
+               intel_ddi_pre_enable_dp(intel_encoder,
+                                       crtc->config->port_clock,
+                                       crtc->config->lane_count,
+                                       crtc->config->shared_dpll,
+                                       intel_crtc_has_type(crtc->config,
+                                                           INTEL_OUTPUT_DP_MST));
+       }
+       if (type == INTEL_OUTPUT_HDMI) {
+               intel_ddi_pre_enable_hdmi(intel_encoder,
+                                         crtc->config->has_hdmi_sink,
+                                         &crtc->config->base.adjusted_mode,
+                                         crtc->config->shared_dpll);
        }
 }
 
-static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
+                                  struct intel_crtc_state *old_crtc_state,
+                                  struct drm_connector_state *old_conn_state)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = encoder->dev;
@@ -1673,6 +1718,8 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
        uint32_t val;
        bool wait = false;
 
+       /* old_crtc_state and old_conn_state are NULL when called from DP_MST */
+
        val = I915_READ(DDI_BUF_CTL(port));
        if (val & DDI_BUF_CTL_ENABLE) {
                val &= ~DDI_BUF_CTL_ENABLE;
@@ -1708,7 +1755,42 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
        }
 }
 
-static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+                               struct intel_crtc_state *old_crtc_state,
+                               struct drm_connector_state *old_conn_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+       uint32_t val;
+
+       /*
+        * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
+        * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
+        * step 13 is the correct place for it. Step 18 is where it was
+        * originally before the BUN.
+        */
+       val = I915_READ(FDI_RX_CTL(PIPE_A));
+       val &= ~FDI_RX_ENABLE;
+       I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+
+       intel_ddi_post_disable(intel_encoder, old_crtc_state, old_conn_state);
+
+       val = I915_READ(FDI_RX_MISC(PIPE_A));
+       val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+       val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+       I915_WRITE(FDI_RX_MISC(PIPE_A), val);
+
+       val = I915_READ(FDI_RX_CTL(PIPE_A));
+       val &= ~FDI_PCDCLK;
+       I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+
+       val = I915_READ(FDI_RX_CTL(PIPE_A));
+       val &= ~FDI_RX_PLL_ENABLE;
+       I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder,
+                            struct intel_crtc_state *pipe_config,
+                            struct drm_connector_state *conn_state)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_crtc *crtc = encoder->crtc;
@@ -1737,7 +1819,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
 
                intel_edp_backlight_on(intel_dp);
                intel_psr_enable(intel_dp);
-               intel_edp_drrs_enable(intel_dp);
+               intel_edp_drrs_enable(intel_dp, pipe_config);
        }
 
        if (intel_crtc->config->has_audio) {
@@ -1746,7 +1828,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
        }
 }
 
-static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+static void intel_disable_ddi(struct intel_encoder *intel_encoder,
+                             struct intel_crtc_state *old_crtc_state,
+                             struct drm_connector_state *old_conn_state)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_crtc *crtc = encoder->crtc;
@@ -1763,7 +1847,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
        if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-               intel_edp_drrs_disable(intel_dp);
+               intel_edp_drrs_disable(intel_dp, old_crtc_state);
                intel_psr_disable(intel_dp);
                intel_edp_backlight_off(intel_dp);
        }
@@ -2052,7 +2136,9 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
        }
 }
 
-static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder)
+static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
+                                  struct intel_crtc_state *pipe_config,
+                                  struct drm_connector_state *conn_state)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
@@ -2141,38 +2227,6 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
        udelay(600);
 }
 
-void intel_ddi_fdi_disable(struct drm_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
-       uint32_t val;
-
-       /*
-        * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
-        * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
-        * step 13 is the correct place for it. Step 18 is where it was
-        * originally before the BUN.
-        */
-       val = I915_READ(FDI_RX_CTL(PIPE_A));
-       val &= ~FDI_RX_ENABLE;
-       I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-
-       intel_ddi_post_disable(intel_encoder);
-
-       val = I915_READ(FDI_RX_MISC(PIPE_A));
-       val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
-       val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
-       I915_WRITE(FDI_RX_MISC(PIPE_A), val);
-
-       val = I915_READ(FDI_RX_CTL(PIPE_A));
-       val &= ~FDI_PCDCLK;
-       I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-
-       val = I915_READ(FDI_RX_CTL(PIPE_A));
-       val &= ~FDI_RX_PLL_ENABLE;
-       I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-}
-
 void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config)
 {
@@ -2272,7 +2326,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 }
 
 static bool intel_ddi_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config)
+                                    struct intel_crtc_state *pipe_config,
+                                    struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        int type = encoder->type;
@@ -2285,9 +2340,9 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
                pipe_config->cpu_transcoder = TRANSCODER_EDP;
 
        if (type == INTEL_OUTPUT_HDMI)
-               ret = intel_hdmi_compute_config(encoder, pipe_config);
+               ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
        else
-               ret = intel_dp_compute_config(encoder, pipe_config);
+               ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
 
        if (IS_BROXTON(dev_priv) && ret)
                pipe_config->lane_lat_optim_mask =
@@ -2338,6 +2393,45 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
        return connector;
 }
 
+struct intel_shared_dpll *
+intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
+{
+       struct intel_connector *connector = intel_dp->attached_connector;
+       struct intel_encoder *encoder = connector->encoder;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct intel_shared_dpll *pll = NULL;
+       struct intel_shared_dpll_config tmp_pll_config;
+       enum intel_dpll_id dpll_id;
+
+       if (IS_BROXTON(dev_priv)) {
+               dpll_id =  (enum intel_dpll_id)dig_port->port;
+               /*
+                * Select the required PLL. This works for platforms where
+                * there is no shared DPLL.
+                */
+               pll = &dev_priv->shared_dplls[dpll_id];
+               if (WARN_ON(pll->active_mask)) {
+
+                       DRM_ERROR("Shared DPLL in use. active_mask:%x\n",
+                                 pll->active_mask);
+                       return NULL;
+               }
+               tmp_pll_config = pll->config;
+               if (!bxt_ddi_dp_set_dpll_hw_state(clock,
+                                                 &pll->config.hw_state)) {
+                       DRM_ERROR("Could not setup DPLL\n");
+                       pll->config = tmp_pll_config;
+                       return NULL;
+               }
+       } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+               pll = skl_find_link_pll(dev_priv, clock);
+       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+               pll = hsw_ddi_dp_get_dpll(encoder, clock);
+       }
+       return pll;
+}
+
 void intel_ddi_init(struct drm_device *dev, enum port port)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
index cba137f..73b6858 100644 (file)
@@ -46,71 +46,70 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
 
 static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
        u32 fuse, eu_dis;
 
        fuse = I915_READ(CHV_FUSE_GT);
 
-       info->slice_total = 1;
+       sseu->slice_mask = BIT(0);
 
        if (!(fuse & CHV_FGT_DISABLE_SS0)) {
-               info->subslice_per_slice++;
+               sseu->subslice_mask |= BIT(0);
                eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
                                 CHV_FGT_EU_DIS_SS0_R1_MASK);
-               info->eu_total += 8 - hweight32(eu_dis);
+               sseu->eu_total += 8 - hweight32(eu_dis);
        }
 
        if (!(fuse & CHV_FGT_DISABLE_SS1)) {
-               info->subslice_per_slice++;
+               sseu->subslice_mask |= BIT(1);
                eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
                                 CHV_FGT_EU_DIS_SS1_R1_MASK);
-               info->eu_total += 8 - hweight32(eu_dis);
+               sseu->eu_total += 8 - hweight32(eu_dis);
        }
 
-       info->subslice_total = info->subslice_per_slice;
        /*
         * CHV expected to always have a uniform distribution of EU
         * across subslices.
        */
-       info->eu_per_subslice = info->subslice_total ?
-                               info->eu_total / info->subslice_total :
+       sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+                               sseu->eu_total / sseu_subslice_total(sseu) :
                                0;
        /*
         * CHV supports subslice power gating on devices with more than
         * one subslice, and supports EU power gating on devices with
         * more than one EU pair per subslice.
        */
-       info->has_slice_pg = 0;
-       info->has_subslice_pg = (info->subslice_total > 1);
-       info->has_eu_pg = (info->eu_per_subslice > 2);
+       sseu->has_slice_pg = 0;
+       sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1;
+       sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
 }
 
 static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 {
        struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       struct sseu_dev_info *sseu = &info->sseu;
        int s_max = 3, ss_max = 4, eu_max = 8;
        int s, ss;
-       u32 fuse2, s_enable, ss_disable, eu_disable;
+       u32 fuse2, eu_disable;
        u8 eu_mask = 0xff;
 
        fuse2 = I915_READ(GEN8_FUSE2);
-       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-       ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
+       sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
 
-       info->slice_total = hweight32(s_enable);
        /*
         * The subslice disable field is global, i.e. it applies
         * to each of the enabled slices.
        */
-       info->subslice_per_slice = ss_max - hweight32(ss_disable);
-       info->subslice_total = info->slice_total * info->subslice_per_slice;
+       sseu->subslice_mask = (1 << ss_max) - 1;
+       sseu->subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
+                                GEN9_F2_SS_DIS_SHIFT);
 
        /*
         * Iterate through enabled slices and subslices to
         * count the total enabled EU.
        */
        for (s = 0; s < s_max; s++) {
-               if (!(s_enable & BIT(s)))
+               if (!(sseu->slice_mask & BIT(s)))
                        /* skip disabled slice */
                        continue;
 
@@ -118,7 +117,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
                for (ss = 0; ss < ss_max; ss++) {
                        int eu_per_ss;
 
-                       if (ss_disable & BIT(ss))
+                       if (!(sseu->subslice_mask & BIT(ss)))
                                /* skip disabled subslice */
                                continue;
 
@@ -131,9 +130,9 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
                         * subslices if they are unbalanced.
                         */
                        if (eu_per_ss == 7)
-                               info->subslice_7eu[s] |= BIT(ss);
+                               sseu->subslice_7eu[s] |= BIT(ss);
 
-                       info->eu_total += eu_per_ss;
+                       sseu->eu_total += eu_per_ss;
                }
        }
 
@@ -144,9 +143,9 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
         * recovery. BXT is expected to be perfectly uniform in EU
         * distribution.
        */
-       info->eu_per_subslice = info->subslice_total ?
-                               DIV_ROUND_UP(info->eu_total,
-                                            info->subslice_total) : 0;
+       sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+                               DIV_ROUND_UP(sseu->eu_total,
+                                            sseu_subslice_total(sseu)) : 0;
        /*
         * SKL supports slice power gating on devices with more than
         * one slice, and supports EU power gating on devices with
@@ -155,15 +154,15 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
         * supports EU power gating on devices with more than one EU
         * pair per subslice.
        */
-       info->has_slice_pg =
+       sseu->has_slice_pg =
                (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
-               info->slice_total > 1;
-       info->has_subslice_pg =
-               IS_BROXTON(dev_priv) && info->subslice_total > 1;
-       info->has_eu_pg = info->eu_per_subslice > 2;
+               hweight8(sseu->slice_mask) > 1;
+       sseu->has_subslice_pg =
+               IS_BROXTON(dev_priv) && sseu_subslice_total(sseu) > 1;
+       sseu->has_eu_pg = sseu->eu_per_subslice > 2;
 
        if (IS_BROXTON(dev_priv)) {
-#define IS_SS_DISABLED(_ss_disable, ss)    (_ss_disable & BIT(ss))
+#define IS_SS_DISABLED(ss)     (!(sseu->subslice_mask & BIT(ss)))
                /*
                 * There is a HW issue in 2x6 fused down parts that requires
                 * Pooled EU to be enabled as a WA. The pool configuration
@@ -171,19 +170,18 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
                 * doesn't affect if the device has all 3 subslices enabled.
                 */
                /* WaEnablePooledEuFor2x6:bxt */
-               info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
-                                      (info->subslice_per_slice == 2 &&
+               info->has_pooled_eu = ((hweight8(sseu->subslice_mask) == 3) ||
+                                      (hweight8(sseu->subslice_mask) == 2 &&
                                        INTEL_REVID(dev_priv) < BXT_REVID_C0));
 
-               info->min_eu_in_pool = 0;
+               sseu->min_eu_in_pool = 0;
                if (info->has_pooled_eu) {
-                       if (IS_SS_DISABLED(ss_disable, 0) ||
-                           IS_SS_DISABLED(ss_disable, 2))
-                               info->min_eu_in_pool = 3;
-                       else if (IS_SS_DISABLED(ss_disable, 1))
-                               info->min_eu_in_pool = 6;
+                       if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
+                               sseu->min_eu_in_pool = 3;
+                       else if (IS_SS_DISABLED(1))
+                               sseu->min_eu_in_pool = 6;
                        else
-                               info->min_eu_in_pool = 9;
+                               sseu->min_eu_in_pool = 9;
                }
 #undef IS_SS_DISABLED
        }
@@ -191,14 +189,20 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 
 static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
        const int s_max = 3, ss_max = 3, eu_max = 8;
        int s, ss;
-       u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+       u32 fuse2, eu_disable[s_max];
 
        fuse2 = I915_READ(GEN8_FUSE2);
-       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-       ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+       sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+       /*
+        * The subslice disable field is global, i.e. it applies
+        * to each of the enabled slices.
+        */
+       sseu->subslice_mask = BIT(ss_max) - 1;
+       sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
+                                GEN8_F2_SS_DIS_SHIFT);
 
        eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
        eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
@@ -208,28 +212,19 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
                        ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
                         (32 - GEN8_EU_DIS1_S2_SHIFT));
 
-       info->slice_total = hweight32(s_enable);
-
-       /*
-        * The subslice disable field is global, i.e. it applies
-        * to each of the enabled slices.
-        */
-       info->subslice_per_slice = ss_max - hweight32(ss_disable);
-       info->subslice_total = info->slice_total * info->subslice_per_slice;
-
        /*
         * Iterate through enabled slices and subslices to
         * count the total enabled EU.
         */
        for (s = 0; s < s_max; s++) {
-               if (!(s_enable & (0x1 << s)))
+               if (!(sseu->slice_mask & BIT(s)))
                        /* skip disabled slice */
                        continue;
 
                for (ss = 0; ss < ss_max; ss++) {
                        u32 n_disabled;
 
-                       if (ss_disable & (0x1 << ss))
+                       if (!(sseu->subslice_mask & BIT(ss)))
                                /* skip disabled subslice */
                                continue;
 
@@ -239,9 +234,9 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
                         * Record which subslices have 7 EUs.
                         */
                        if (eu_max - n_disabled == 7)
-                               info->subslice_7eu[s] |= 1 << ss;
+                               sseu->subslice_7eu[s] |= 1 << ss;
 
-                       info->eu_total += eu_max - n_disabled;
+                       sseu->eu_total += eu_max - n_disabled;
                }
        }
 
@@ -250,16 +245,17 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
         * subslices with the exception that any one EU in any one subslice may
         * be fused off for die recovery.
         */
-       info->eu_per_subslice = info->subslice_total ?
-               DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+       sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+                               DIV_ROUND_UP(sseu->eu_total,
+                                            sseu_subslice_total(sseu)) : 0;
 
        /*
         * BDW supports slice power gating on devices with more than
         * one slice.
         */
-       info->has_slice_pg = (info->slice_total > 1);
-       info->has_subslice_pg = 0;
-       info->has_eu_pg = 0;
+       sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
+       sseu->has_subslice_pg = 0;
+       sseu->has_eu_pg = 0;
 }
 
 /*
@@ -374,15 +370,19 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
        if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                info->has_snoop = false;
 
-       DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
-       DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
-       DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
-       DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
-       DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+       DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
+       DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
+       DRM_DEBUG_DRIVER("subslice total: %u\n",
+                        sseu_subslice_total(&info->sseu));
+       DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask);
+       DRM_DEBUG_DRIVER("subslice per slice: %u\n",
+                        hweight8(info->sseu.subslice_mask));
+       DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total);
+       DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice);
        DRM_DEBUG_DRIVER("has slice power gating: %s\n",
-                        info->has_slice_pg ? "y" : "n");
+                        info->sseu.has_slice_pg ? "y" : "n");
        DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
-                        info->has_subslice_pg ? "y" : "n");
+                        info->sseu.has_subslice_pg ? "y" : "n");
        DRM_DEBUG_DRIVER("has EU power gating: %s\n",
-                        info->has_eu_pg ? "y" : "n");
+                        info->sseu.has_eu_pg ? "y" : "n");
 }
index d224f64..497d99b 100644 (file)
@@ -1907,7 +1907,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
        }
 }
 
-static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
 {
        u32 val;
 
@@ -3377,6 +3377,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_framebuffer *fb = plane_state->base.fb;
+       const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
        int pipe = intel_crtc->pipe;
        u32 plane_ctl;
        unsigned int rotation = plane_state->base.rotation;
@@ -3410,6 +3411,9 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
        intel_crtc->adjusted_x = src_x;
        intel_crtc->adjusted_y = src_y;
 
+       if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
+               skl_write_plane_wm(intel_crtc, wm, 0);
+
        I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
        I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
        I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
@@ -3441,7 +3445,15 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       int pipe = to_intel_crtc(crtc)->pipe;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+
+       /*
+        * We only populate skl_results on watermark updates, and if the
+        * plane's visiblity isn't actually changing neither is its watermarks.
+        */
+       if (!crtc->primary->state->visible)
+               skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0);
 
        I915_WRITE(PLANE_CTL(pipe, 0), 0);
        I915_WRITE(PLANE_SURF(pipe, 0), 0);
@@ -3634,15 +3646,26 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
        mutex_unlock(&dev->mode_config.mutex);
 }
 
+static bool abort_flip_on_reset(struct intel_crtc *crtc)
+{
+       struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
+
+       if (i915_reset_in_progress(error))
+               return true;
+
+       if (crtc->reset_count != i915_reset_count(error))
+               return true;
+
+       return false;
+}
+
 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       unsigned reset_counter;
        bool pending;
 
-       reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
-       if (intel_crtc->reset_counter != reset_counter)
+       if (abort_flip_on_reset(intel_crtc))
                return false;
 
        spin_lock_irq(&dev->event_lock);
@@ -4285,7 +4308,7 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
        return 0;
 }
 
-static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
 {
        u32 temp;
 
@@ -5154,15 +5177,137 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
        intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
 }
 
-static void ironlake_crtc_enable(struct drm_crtc *crtc)
+static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
+                                         struct intel_crtc_state *crtc_state,
+                                         struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct drm_connector_state *conn_state = conn->state;
+               struct intel_encoder *encoder =
+                       to_intel_encoder(conn_state->best_encoder);
+
+               if (conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->pre_pll_enable)
+                       encoder->pre_pll_enable(encoder, crtc_state, conn_state);
+       }
+}
+
+static void intel_encoders_pre_enable(struct drm_crtc *crtc,
+                                     struct intel_crtc_state *crtc_state,
+                                     struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct drm_connector_state *conn_state = conn->state;
+               struct intel_encoder *encoder =
+                       to_intel_encoder(conn_state->best_encoder);
+
+               if (conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->pre_enable)
+                       encoder->pre_enable(encoder, crtc_state, conn_state);
+       }
+}
+
+static void intel_encoders_enable(struct drm_crtc *crtc,
+                                 struct intel_crtc_state *crtc_state,
+                                 struct drm_atomic_state *old_state)
 {
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct drm_connector_state *conn_state = conn->state;
+               struct intel_encoder *encoder =
+                       to_intel_encoder(conn_state->best_encoder);
+
+               if (conn_state->crtc != crtc)
+                       continue;
+
+               encoder->enable(encoder, crtc_state, conn_state);
+               intel_opregion_notify_encoder(encoder, true);
+       }
+}
+
+static void intel_encoders_disable(struct drm_crtc *crtc,
+                                  struct intel_crtc_state *old_crtc_state,
+                                  struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(old_conn_state->best_encoder);
+
+               if (old_conn_state->crtc != crtc)
+                       continue;
+
+               intel_opregion_notify_encoder(encoder, false);
+               encoder->disable(encoder, old_crtc_state, old_conn_state);
+       }
+}
+
+static void intel_encoders_post_disable(struct drm_crtc *crtc,
+                                       struct intel_crtc_state *old_crtc_state,
+                                       struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(old_conn_state->best_encoder);
+
+               if (old_conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->post_disable)
+                       encoder->post_disable(encoder, old_crtc_state, old_conn_state);
+       }
+}
+
+static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
+                                           struct intel_crtc_state *old_crtc_state,
+                                           struct drm_atomic_state *old_state)
+{
+       struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(old_conn_state->best_encoder);
+
+               if (old_conn_state->crtc != crtc)
+                       continue;
+
+               if (encoder->post_pll_disable)
+                       encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
+       }
+}
+
+static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
+                                struct drm_atomic_state *old_state)
+{
+       struct drm_crtc *crtc = pipe_config->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
-       struct intel_crtc_state *pipe_config =
-               to_intel_crtc_state(crtc->state);
 
        if (WARN_ON(intel_crtc->active))
                return;
@@ -5200,9 +5345,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 
        intel_crtc->active = true;
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_enable)
-                       encoder->pre_enable(encoder);
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
        if (intel_crtc->config->has_pch_encoder) {
                /* Note: FDI PLL enabling _must_ be done before we enable the
@@ -5232,8 +5375,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        assert_vblank_disabled(crtc);
        drm_crtc_vblank_on(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->enable(encoder);
+       intel_encoders_enable(crtc, pipe_config, old_state);
 
        if (HAS_PCH_CPT(dev))
                cpt_verify_modeset(dev, intel_crtc->pipe);
@@ -5251,16 +5393,15 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
        return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
 }
 
-static void haswell_crtc_enable(struct drm_crtc *crtc)
+static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
+                               struct drm_atomic_state *old_state)
 {
+       struct drm_crtc *crtc = pipe_config->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe, hsw_workaround_pipe;
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
-       struct intel_crtc_state *pipe_config =
-               to_intel_crtc_state(crtc->state);
 
        if (WARN_ON(intel_crtc->active))
                return;
@@ -5269,9 +5410,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
                                                      false);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder);
+       intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
        if (intel_crtc->config->shared_dpll)
                intel_enable_shared_dpll(intel_crtc);
@@ -5309,10 +5448,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        else
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
-               if (encoder->pre_enable)
-                       encoder->pre_enable(encoder);
-       }
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
        if (intel_crtc->config->has_pch_encoder)
                dev_priv->display.fdi_link_train(crtc);
@@ -5353,10 +5489,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        assert_vblank_disabled(crtc);
        drm_crtc_vblank_on(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
-               encoder->enable(encoder);
-               intel_opregion_notify_encoder(encoder, true);
-       }
+       intel_encoders_enable(crtc, pipe_config, old_state);
 
        if (intel_crtc->config->has_pch_encoder) {
                intel_wait_for_vblank(dev, pipe);
@@ -5390,12 +5523,13 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
        }
 }
 
-static void ironlake_crtc_disable(struct drm_crtc *crtc)
+static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
+                                 struct drm_atomic_state *old_state)
 {
+       struct drm_crtc *crtc = old_crtc_state->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
 
        /*
@@ -5408,8 +5542,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
                intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
        }
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->disable(encoder);
+       intel_encoders_disable(crtc, old_crtc_state, old_state);
 
        drm_crtc_vblank_off(crtc);
        assert_vblank_disabled(crtc);
@@ -5421,9 +5554,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        if (intel_crtc->config->has_pch_encoder)
                ironlake_fdi_disable(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->post_disable)
-                       encoder->post_disable(encoder);
+       intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
        if (intel_crtc->config->has_pch_encoder) {
                ironlake_disable_pch_transcoder(dev_priv, pipe);
@@ -5453,22 +5584,20 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 }
 
-static void haswell_crtc_disable(struct drm_crtc *crtc)
+static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
+                                struct drm_atomic_state *old_state)
 {
+       struct drm_crtc *crtc = old_crtc_state->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
        if (intel_crtc->config->has_pch_encoder)
                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
                                                      false);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
-               intel_opregion_notify_encoder(encoder, false);
-               encoder->disable(encoder);
-       }
+       intel_encoders_disable(crtc, old_crtc_state, old_state);
 
        drm_crtc_vblank_off(crtc);
        assert_vblank_disabled(crtc);
@@ -5491,18 +5620,11 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_disable_pipe_clock(intel_crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->post_disable)
-                       encoder->post_disable(encoder);
-
-       if (intel_crtc->config->has_pch_encoder) {
-               lpt_disable_pch_transcoder(dev_priv);
-               lpt_disable_iclkip(dev_priv);
-               intel_ddi_fdi_disable(crtc);
+       intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
+       if (old_crtc_state->has_pch_encoder)
                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
                                                      true);
-       }
 }
 
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -6558,14 +6680,13 @@ static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
        intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
 }
 
-static void valleyview_crtc_enable(struct drm_crtc *crtc)
+static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
+                                  struct drm_atomic_state *old_state)
 {
+       struct drm_crtc *crtc = pipe_config->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
-       struct intel_crtc_state *pipe_config =
-               to_intel_crtc_state(crtc->state);
        int pipe = intel_crtc->pipe;
 
        if (WARN_ON(intel_crtc->active))
@@ -6590,9 +6711,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 
        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder);
+       intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
        if (IS_CHERRYVIEW(dev)) {
                chv_prepare_pll(intel_crtc, intel_crtc->config);
@@ -6602,9 +6721,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
                vlv_enable_pll(intel_crtc, intel_crtc->config);
        }
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_enable)
-                       encoder->pre_enable(encoder);
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
        i9xx_pfit_enable(intel_crtc);
 
@@ -6616,8 +6733,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        assert_vblank_disabled(crtc);
        drm_crtc_vblank_on(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->enable(encoder);
+       intel_encoders_enable(crtc, pipe_config, old_state);
 }
 
 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -6629,14 +6745,13 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
        I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
 }
 
-static void i9xx_crtc_enable(struct drm_crtc *crtc)
+static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
+                            struct drm_atomic_state *old_state)
 {
+       struct drm_crtc *crtc = pipe_config->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
-       struct intel_crtc_state *pipe_config =
-               to_intel_crtc_state(crtc->state);
        enum pipe pipe = intel_crtc->pipe;
 
        if (WARN_ON(intel_crtc->active))
@@ -6657,9 +6772,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        if (!IS_GEN2(dev))
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_enable)
-                       encoder->pre_enable(encoder);
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
        i9xx_enable_pll(intel_crtc);
 
@@ -6673,8 +6786,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        assert_vblank_disabled(crtc);
        drm_crtc_vblank_on(crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->enable(encoder);
+       intel_encoders_enable(crtc, pipe_config, old_state);
 }
 
 static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -6692,12 +6804,13 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
        I915_WRITE(PFIT_CONTROL, 0);
 }
 
-static void i9xx_crtc_disable(struct drm_crtc *crtc)
+static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
+                             struct drm_atomic_state *old_state)
 {
+       struct drm_crtc *crtc = old_crtc_state->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
 
        /*
@@ -6707,8 +6820,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
        if (IS_GEN2(dev))
                intel_wait_for_vblank(dev, pipe);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->disable(encoder);
+       intel_encoders_disable(crtc, old_crtc_state, old_state);
 
        drm_crtc_vblank_off(crtc);
        assert_vblank_disabled(crtc);
@@ -6717,9 +6829,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 
        i9xx_pfit_disable(intel_crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->post_disable)
-                       encoder->post_disable(encoder);
+       intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
        if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
                if (IS_CHERRYVIEW(dev))
@@ -6730,9 +6840,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
                        i9xx_disable_pll(intel_crtc);
        }
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->post_pll_disable)
-                       encoder->post_pll_disable(encoder);
+       intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
 
        if (!IS_GEN2(dev))
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -6745,6 +6853,9 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        enum intel_display_power_domain domain;
        unsigned long domains;
+       struct drm_atomic_state *state;
+       struct intel_crtc_state *crtc_state;
+       int ret;
 
        if (!intel_crtc->active)
                return;
@@ -6758,7 +6869,18 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
                to_intel_plane_state(crtc->primary->state)->base.visible = false;
        }
 
-       dev_priv->display.crtc_disable(crtc);
+       state = drm_atomic_state_alloc(crtc->dev);
+       state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+
+       /* Everything's already locked, -EDEADLK can't happen. */
+       crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+       ret = drm_atomic_add_affected_connectors(state, crtc);
+
+       WARN_ON(IS_ERR(crtc_state) || ret);
+
+       dev_priv->display.crtc_disable(crtc_state, state);
+
+       drm_atomic_state_free(state);
 
        DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
                      crtc->base.id, crtc->name);
@@ -7273,9 +7395,10 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
 
 static int pnv_get_display_clock_speed(struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
        u16 gcfgc = 0;
 
-       pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+       pci_read_config_word(pdev, GCFGC, &gcfgc);
 
        switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
        case GC_DISPLAY_CLOCK_267_MHZ_PNV:
@@ -7297,9 +7420,10 @@ static int pnv_get_display_clock_speed(struct drm_device *dev)
 
 static int i915gm_get_display_clock_speed(struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
        u16 gcfgc = 0;
 
-       pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+       pci_read_config_word(pdev, GCFGC, &gcfgc);
 
        if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
                return 133333;
@@ -7321,6 +7445,7 @@ static int i865_get_display_clock_speed(struct drm_device *dev)
 
 static int i85x_get_display_clock_speed(struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
        u16 hpllcc = 0;
 
        /*
@@ -7328,10 +7453,10 @@ static int i85x_get_display_clock_speed(struct drm_device *dev)
         * encoding is different :(
         * FIXME is this the right way to detect 852GM/852GMV?
         */
-       if (dev->pdev->revision == 0x1)
+       if (pdev->revision == 0x1)
                return 133333;
 
-       pci_bus_read_config_word(dev->pdev->bus,
+       pci_bus_read_config_word(pdev->bus,
                                 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
 
        /* Assume that the hardware is in the high speed state.  This
@@ -7432,10 +7557,11 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
 
 static int gm45_get_display_clock_speed(struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
        unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
        uint16_t tmp = 0;
 
-       pci_read_config_word(dev->pdev, GCFGC, &tmp);
+       pci_read_config_word(pdev, GCFGC, &tmp);
 
        cdclk_sel = (tmp >> 12) & 0x1;
 
@@ -7454,6 +7580,7 @@ static int gm45_get_display_clock_speed(struct drm_device *dev)
 
 static int i965gm_get_display_clock_speed(struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
        static const uint8_t div_3200[] = { 16, 10,  8 };
        static const uint8_t div_4000[] = { 20, 12, 10 };
        static const uint8_t div_5333[] = { 24, 16, 14 };
@@ -7461,7 +7588,7 @@ static int i965gm_get_display_clock_speed(struct drm_device *dev)
        unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
        uint16_t tmp = 0;
 
-       pci_read_config_word(dev->pdev, GCFGC, &tmp);
+       pci_read_config_word(pdev, GCFGC, &tmp);
 
        cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
 
@@ -7491,6 +7618,7 @@ fail:
 
 static int g33_get_display_clock_speed(struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
        static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
        static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
        static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
@@ -7499,7 +7627,7 @@ static int g33_get_display_clock_speed(struct drm_device *dev)
        unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
        uint16_t tmp = 0;
 
-       pci_read_config_word(dev->pdev, GCFGC, &tmp);
+       pci_read_config_word(pdev, GCFGC, &tmp);
 
        cdclk_sel = (tmp >> 4) & 0x7;
 
@@ -9665,7 +9793,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
        return;
 
 error:
-       kfree(fb);
+       kfree(intel_fb);
 }
 
 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -9910,7 +10038,7 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
                mutex_lock(&dev_priv->rps.hw_lock);
                if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
                                            val))
-                       DRM_ERROR("Failed to write to D_COMP\n");
+                       DRM_DEBUG_KMS("Failed to write to D_COMP\n");
                mutex_unlock(&dev_priv->rps.hw_lock);
        } else {
                I915_WRITE(D_COMP_BDW, val);
@@ -10318,15 +10446,12 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
 
        switch (port) {
        case PORT_A:
-               pipe_config->ddi_pll_sel = SKL_DPLL0;
                id = DPLL_ID_SKL_DPLL0;
                break;
        case PORT_B:
-               pipe_config->ddi_pll_sel = SKL_DPLL1;
                id = DPLL_ID_SKL_DPLL1;
                break;
        case PORT_C:
-               pipe_config->ddi_pll_sel = SKL_DPLL2;
                id = DPLL_ID_SKL_DPLL2;
                break;
        default:
@@ -10345,25 +10470,10 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
        u32 temp;
 
        temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
-       pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
+       id = temp >> (port * 3 + 1);
 
-       switch (pipe_config->ddi_pll_sel) {
-       case SKL_DPLL0:
-               id = DPLL_ID_SKL_DPLL0;
-               break;
-       case SKL_DPLL1:
-               id = DPLL_ID_SKL_DPLL1;
-               break;
-       case SKL_DPLL2:
-               id = DPLL_ID_SKL_DPLL2;
-               break;
-       case SKL_DPLL3:
-               id = DPLL_ID_SKL_DPLL3;
-               break;
-       default:
-               MISSING_CASE(pipe_config->ddi_pll_sel);
+       if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
                return;
-       }
 
        pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 }
@@ -10373,10 +10483,9 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
                                struct intel_crtc_state *pipe_config)
 {
        enum intel_dpll_id id;
+       uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
 
-       pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
-
-       switch (pipe_config->ddi_pll_sel) {
+       switch (ddi_pll_sel) {
        case PORT_CLK_SEL_WRPLL1:
                id = DPLL_ID_WRPLL1;
                break;
@@ -10396,7 +10505,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
                id = DPLL_ID_LCPLL_2700;
                break;
        default:
-               MISSING_CASE(pipe_config->ddi_pll_sel);
+               MISSING_CASE(ddi_pll_sel);
                /* fall through */
        case PORT_CLK_SEL_NONE:
                return;
@@ -10690,9 +10799,13 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
        int pipe = intel_crtc->pipe;
        uint32_t cntl = 0;
 
+       if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
+               skl_write_cursor_wm(intel_crtc, wm);
+
        if (plane_state && plane_state->base.visible) {
                cntl = MCURSOR_GAMMA_ENABLE;
                switch (plane_state->base.crtc_w) {
@@ -11431,10 +11544,8 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       unsigned reset_counter;
 
-       reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-       if (crtc->reset_counter != reset_counter)
+       if (abort_flip_on_reset(crtc))
                return true;
 
        /*
@@ -11911,8 +12022,7 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
 
        if (work->flip_queued_req)
                WARN_ON(i915_wait_request(work->flip_queued_req,
-                                         false, NULL,
-                                         NO_WAITBOOST));
+                                         0, NULL, NO_WAITBOOST));
 
        /* For framebuffer backed by dmabuf, wait for fence */
        resv = i915_gem_object_get_dmabuf_resv(obj);
@@ -12094,17 +12204,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        crtc->primary->fb = fb;
        update_state_fb(crtc->primary);
 
-       intel_fbc_pre_update(intel_crtc, intel_crtc->config,
-                            to_intel_plane_state(primary->state));
-
        work->pending_flip_obj = i915_gem_object_get(obj);
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                goto cleanup;
 
-       intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-       if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
+       intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
+       if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
                ret = -EIO;
                goto cleanup;
        }
@@ -12142,6 +12249,17 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        work->gtt_offset += intel_crtc->dspaddr_offset;
        work->rotation = crtc->primary->state->rotation;
 
+       /*
+        * There's the potential that the next frame will not be compatible with
+        * FBC, so we want to call pre_update() before the actual page flip.
+        * The problem is that pre_update() caches some information about the fb
+        * object, so we want to do this only after the object is pinned. Let's
+        * be on the safe side and do this immediately before scheduling the
+        * flip.
+        */
+       intel_fbc_pre_update(intel_crtc, intel_crtc->config,
+                            to_intel_plane_state(primary->state));
+
        if (mmio_flip) {
                INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
 
@@ -12155,7 +12273,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                        goto cleanup_unpin;
                }
 
-               ret = i915_gem_object_sync(obj, request);
+               ret = i915_gem_request_await_object(request, obj, false);
                if (ret)
                        goto cleanup_request;
 
@@ -12668,10 +12786,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
        DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 
        if (IS_BROXTON(dev)) {
-               DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+               DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
                              "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
                              "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
-                             pipe_config->ddi_pll_sel,
                              pipe_config->dpll_hw_state.ebb0,
                              pipe_config->dpll_hw_state.ebb4,
                              pipe_config->dpll_hw_state.pll0,
@@ -12684,15 +12801,13 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                              pipe_config->dpll_hw_state.pll10,
                              pipe_config->dpll_hw_state.pcsdw12);
        } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
-               DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
+               DRM_DEBUG_KMS("dpll_hw_state: "
                              "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
-                             pipe_config->ddi_pll_sel,
                              pipe_config->dpll_hw_state.ctrl1,
                              pipe_config->dpll_hw_state.cfgcr1,
                              pipe_config->dpll_hw_state.cfgcr2);
        } else if (HAS_DDI(dev)) {
-               DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
-                             pipe_config->ddi_pll_sel,
+               DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
                              pipe_config->dpll_hw_state.wrpll,
                              pipe_config->dpll_hw_state.spll);
        } else {
@@ -12805,7 +12920,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
        struct intel_crtc_scaler_state scaler_state;
        struct intel_dpll_hw_state dpll_hw_state;
        struct intel_shared_dpll *shared_dpll;
-       uint32_t ddi_pll_sel;
        bool force_thru;
 
        /* FIXME: before the switch to atomic started, a new pipe_config was
@@ -12817,7 +12931,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
        scaler_state = crtc_state->scaler_state;
        shared_dpll = crtc_state->shared_dpll;
        dpll_hw_state = crtc_state->dpll_hw_state;
-       ddi_pll_sel = crtc_state->ddi_pll_sel;
        force_thru = crtc_state->pch_pfit.force_thru;
 
        memset(crtc_state, 0, sizeof *crtc_state);
@@ -12826,7 +12939,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
        crtc_state->scaler_state = scaler_state;
        crtc_state->shared_dpll = shared_dpll;
        crtc_state->dpll_hw_state = dpll_hw_state;
-       crtc_state->ddi_pll_sel = ddi_pll_sel;
        crtc_state->pch_pfit.force_thru = force_thru;
 }
 
@@ -12914,7 +13026,7 @@ encoder_retry:
 
                encoder = to_intel_encoder(connector_state->best_encoder);
 
-               if (!(encoder->compute_config(encoder, pipe_config))) {
+               if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
                        DRM_DEBUG_KMS("Encoder config failure\n");
                        goto fail;
                }
@@ -13002,12 +13114,6 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
        return false;
 }
 
-#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
-       list_for_each_entry((intel_crtc), \
-                           &(dev)->mode_config.crtc_list, \
-                           base.head) \
-               for_each_if (mask & (1 <<(intel_crtc)->pipe))
-
 static bool
 intel_compare_m_n(unsigned int m, unsigned int n,
                  unsigned int m2, unsigned int n2,
@@ -13255,8 +13361,6 @@ intel_pipe_config_compare(struct drm_device *dev,
 
        PIPE_CONF_CHECK_I(double_wide);
 
-       PIPE_CONF_CHECK_X(ddi_pll_sel);
-
        PIPE_CONF_CHECK_P(shared_dpll);
        PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
        PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
@@ -13338,16 +13442,23 @@ static void verify_wm_state(struct drm_crtc *crtc,
                          hw_entry->start, hw_entry->end);
        }
 
-       /* cursor */
-       hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
-       sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
-
-       if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
-               DRM_ERROR("mismatch in DDB state pipe %c cursor "
-                         "(expected (%u,%u), found (%u,%u))\n",
-                         pipe_name(pipe),
-                         sw_entry->start, sw_entry->end,
-                         hw_entry->start, hw_entry->end);
+       /*
+        * cursor
+        * If the cursor plane isn't active, we may not have updated it's ddb
+        * allocation. In that case since the ddb allocation will be updated
+        * once the plane becomes visible, we can skip this check
+        */
+       if (intel_crtc->cursor_addr) {
+               hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+               sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+
+               if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
+                       DRM_ERROR("mismatch in DDB state pipe %c cursor "
+                                 "(expected (%u,%u), found (%u,%u))\n",
+                                 pipe_name(pipe),
+                                 sw_entry->start, sw_entry->end,
+                                 hw_entry->start, hw_entry->end);
+               }
        }
 }
 
@@ -13963,7 +14074,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
                                continue;
 
                        ret = i915_wait_request(intel_plane_state->wait_req,
-                                               true, NULL, NULL);
+                                               I915_WAIT_INTERRUPTIBLE,
+                                               NULL, NULL);
                        if (ret) {
                                /* Any hang should be swallowed by the wait */
                                WARN_ON(ret == -EIO);
@@ -14053,6 +14165,111 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
        return false;
 }
 
+static void intel_update_crtc(struct drm_crtc *crtc,
+                             struct drm_atomic_state *state,
+                             struct drm_crtc_state *old_crtc_state,
+                             unsigned int *crtc_vblank_mask)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
+       bool modeset = needs_modeset(crtc->state);
+
+       if (modeset) {
+               update_scanline_offset(intel_crtc);
+               dev_priv->display.crtc_enable(pipe_config, state);
+       } else {
+               intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
+       }
+
+       if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
+               intel_fbc_enable(
+                   intel_crtc, pipe_config,
+                   to_intel_plane_state(crtc->primary->state));
+       }
+
+       drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
+
+       if (needs_vblank_wait(pipe_config))
+               *crtc_vblank_mask |= drm_crtc_mask(crtc);
+}
+
+static void intel_update_crtcs(struct drm_atomic_state *state,
+                              unsigned int *crtc_vblank_mask)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state;
+       int i;
+
+       for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+               if (!crtc->state->active)
+                       continue;
+
+               intel_update_crtc(crtc, state, old_crtc_state,
+                                 crtc_vblank_mask);
+       }
+}
+
+static void skl_update_crtcs(struct drm_atomic_state *state,
+                            unsigned int *crtc_vblank_mask)
+{
+       struct drm_device *dev = state->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state;
+       struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+       struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+       unsigned int updated = 0;
+       bool progress;
+       enum pipe pipe;
+
+       /*
+        * Whenever the number of active pipes changes, we need to make sure we
+        * update the pipes in the right order so that their ddb allocations
+        * never overlap with eachother inbetween CRTC updates. Otherwise we'll
+        * cause pipe underruns and other bad stuff.
+        */
+       do {
+               int i;
+               progress = false;
+
+               for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+                       bool vbl_wait = false;
+                       unsigned int cmask = drm_crtc_mask(crtc);
+                       pipe = to_intel_crtc(crtc)->pipe;
+
+                       if (updated & cmask || !crtc->state->active)
+                               continue;
+                       if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb,
+                                                       pipe))
+                               continue;
+
+                       updated |= cmask;
+
+                       /*
+                        * If this is an already active pipe, it's DDB changed,
+                        * and this isn't the last pipe that needs updating
+                        * then we need to wait for a vblank to pass for the
+                        * new ddb allocation to take effect.
+                        */
+                       if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) &&
+                           !crtc->state->active_changed &&
+                           intel_state->wm_results.dirty_pipes != updated)
+                               vbl_wait = true;
+
+                       intel_update_crtc(crtc, state, old_crtc_state,
+                                         crtc_vblank_mask);
+
+                       if (vbl_wait)
+                               intel_wait_for_vblank(dev, pipe);
+
+                       progress = true;
+               }
+       } while (progress);
+}
+
 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
@@ -14076,7 +14293,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                        continue;
 
                ret = i915_wait_request(intel_plane_state->wait_req,
-                                       true, NULL, NULL);
+                                       0, NULL, NULL);
                /* EIO should be eaten, and we can't get interrupted in the
                 * worker, and blocking commits have waited already. */
                WARN_ON(ret);
@@ -14112,7 +14329,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
                if (old_crtc_state->active) {
                        intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
-                       dev_priv->display.crtc_disable(crtc);
+                       dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
                        intel_crtc->active = false;
                        intel_fbc_disable(intel_crtc);
                        intel_disable_shared_dpll(intel_crtc);
@@ -14141,20 +14358,19 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                     intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
                        dev_priv->display.modeset_commit_cdclk(state);
 
+               /*
+                * SKL workaround: bspec recommends we disable the SAGV when we
+                * have more then one pipe enabled
+                */
+               if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
+                       skl_disable_sagv(dev_priv);
+
                intel_modeset_verify_disabled(dev);
        }
 
-       /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+       /* Complete the events for pipes that have now been disabled */
        for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
                bool modeset = needs_modeset(crtc->state);
-               struct intel_crtc_state *pipe_config =
-                       to_intel_crtc_state(crtc->state);
-
-               if (modeset && crtc->state->active) {
-                       update_scanline_offset(to_intel_crtc(crtc));
-                       dev_priv->display.crtc_enable(crtc);
-               }
 
                /* Complete events for now disable pipes here. */
                if (modeset && !crtc->state->active && crtc->state->event) {
@@ -14164,21 +14380,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
                        crtc->state->event = NULL;
                }
-
-               if (!modeset)
-                       intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
-
-               if (crtc->state->active &&
-                   drm_atomic_get_existing_plane_state(state, crtc->primary))
-                       intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
-
-               if (crtc->state->active)
-                       drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
-
-               if (pipe_config->base.active && needs_vblank_wait(pipe_config))
-                       crtc_vblank_mask |= 1 << i;
        }
 
+       /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+       dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
+
        /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
         * already, but still need the state for the delayed optimization. To
         * fix this:
@@ -14214,6 +14420,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
        }
 
+       if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
+           skl_can_enable_sagv(state))
+               skl_enable_sagv(dev_priv);
+
        drm_atomic_helper_commit_hw_done(state);
 
        if (intel_state->modeset)
@@ -14354,8 +14564,6 @@ out:
                drm_atomic_state_free(state);
 }
 
-#undef for_each_intel_crtc_masked
-
 /*
  * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
  *        drm_atomic_helper_legacy_gamma_set() directly.
@@ -14601,10 +14809,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
                                    struct drm_crtc_state *old_crtc_state)
 {
        struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *old_intel_state =
                to_intel_crtc_state(old_crtc_state);
        bool modeset = needs_modeset(crtc->state);
+       enum pipe pipe = intel_crtc->pipe;
 
        /* Perform vblank evasion around commit operation */
        intel_pipe_update_start(intel_crtc);
@@ -14619,8 +14829,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
 
        if (to_intel_crtc_state(crtc->state)->update_pipe)
                intel_update_pipe_config(intel_crtc, old_intel_state);
-       else if (INTEL_INFO(dev)->gen >= 9)
+       else if (INTEL_GEN(dev_priv) >= 9) {
                skl_detach_scalers(intel_crtc);
+
+               I915_WRITE(PIPE_WM_LINETIME(pipe),
+                          dev_priv->wm.skl_hw.wm_linetime[pipe]);
+       }
 }
 
 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
@@ -15782,6 +15996,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
                        skl_modeset_calc_cdclk;
        }
 
+       if (dev_priv->info.gen >= 9)
+               dev_priv->display.update_crtcs = skl_update_crtcs;
+       else
+               dev_priv->display.update_crtcs = intel_update_crtcs;
+
        switch (INTEL_INFO(dev_priv)->gen) {
        case 2:
                dev_priv->display.queue_flip = intel_gen2_queue_flip;
@@ -15983,15 +16202,16 @@ static void intel_init_quirks(struct drm_device *dev)
 static void i915_disable_vga(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        u8 sr1;
        i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
 
        /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
-       vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+       vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
        outb(SR01, VGA_SR_INDEX);
        sr1 = inb(VGA_SR_DATA);
        outb(sr1 | 1<<5, VGA_SR_DATA);
-       vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+       vga_put(pdev, VGA_RSRC_LEGACY_IO);
        udelay(300);
 
        I915_WRITE(vga_reg, VGA_DISP_DISABLE);
@@ -16273,15 +16493,15 @@ static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
        return false;
 }
 
-static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
+static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
        struct intel_connector *connector;
 
        for_each_connector_on_encoder(dev, &encoder->base, connector)
-               return true;
+               return connector;
 
-       return false;
+       return NULL;
 }
 
 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
@@ -16386,7 +16606,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 static void intel_sanitize_encoder(struct intel_encoder *encoder)
 {
        struct intel_connector *connector;
-       struct drm_device *dev = encoder->base.dev;
 
        /* We need to check both for a crtc link (meaning that the
         * encoder is active and trying to read from a pipe) and the
@@ -16394,7 +16613,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
        bool has_active_crtc = encoder->base.crtc &&
                to_intel_crtc(encoder->base.crtc)->active;
 
-       if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
+       connector = intel_encoder_find_connector(encoder);
+       if (connector && !has_active_crtc) {
                DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
                              encoder->base.base.id,
                              encoder->base.name);
@@ -16403,12 +16623,14 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                 * fallout from our resume register restoring. Disable
                 * the encoder manually again. */
                if (encoder->base.crtc) {
+                       struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
+
                        DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
                                      encoder->base.base.id,
                                      encoder->base.name);
-                       encoder->disable(encoder);
+                       encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
                        if (encoder->post_disable)
-                               encoder->post_disable(encoder);
+                               encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
                }
                encoder->base.crtc = NULL;
 
@@ -16416,12 +16638,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                 * a bug in one of the get_hw_state functions. Or someplace else
                 * in our code, like the register restore mess on resume. Clamp
                 * things to off as a safer default. */
-               for_each_intel_connector(dev, connector) {
-                       if (connector->encoder != encoder)
-                               continue;
-                       connector->base.dpms = DRM_MODE_DPMS_OFF;
-                       connector->base.encoder = NULL;
-               }
+
+               connector->base.dpms = DRM_MODE_DPMS_OFF;
+               connector->base.encoder = NULL;
        }
        /* Enabled encoders without active connectors will be fixed in
         * the crtc fixup. */
index 364db90..acd0c51 100644 (file)
@@ -190,6 +190,29 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
        return (max_link_clock * max_lanes * 8) / 10;
 }
 
+static int
+intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *encoder = &intel_dig_port->base;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       int max_dotclk = dev_priv->max_dotclk_freq;
+       int ds_max_dotclk;
+
+       int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+
+       if (type != DP_DS_PORT_TYPE_VGA)
+               return max_dotclk;
+
+       ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
+                                                   intel_dp->downstream_ports);
+
+       if (ds_max_dotclk != 0)
+               max_dotclk = min(max_dotclk, ds_max_dotclk);
+
+       return max_dotclk;
+}
+
 static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
@@ -199,7 +222,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
        int target_clock = mode->clock;
        int max_rate, mode_rate, max_lanes, max_link_clock;
-       int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+       int max_dotclk;
+
+       max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
 
        if (is_edp(intel_dp) && fixed_mode) {
                if (mode->hdisplay > fixed_mode->hdisplay)
@@ -1243,7 +1268,7 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
 }
 
 static void
-intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
+intel_dp_aux_init(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->port;
@@ -1419,6 +1444,44 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
        DRM_DEBUG_KMS("common rates: %s\n", str);
 }
 
+static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
+{
+       uint8_t rev;
+       int len;
+
+       if ((drm_debug & DRM_UT_KMS) == 0)
+               return;
+
+       if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+             DP_DWN_STRM_PORT_PRESENT))
+               return;
+
+       len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
+       if (len < 0)
+               return;
+
+       DRM_DEBUG_KMS("sink hw revision: %d.%d\n", (rev & 0xf0) >> 4, rev & 0xf);
+}
+
+static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
+{
+       uint8_t rev[2];
+       int len;
+
+       if ((drm_debug & DRM_UT_KMS) == 0)
+               return;
+
+       if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+             DP_DWN_STRM_PORT_PRESENT))
+               return;
+
+       len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
+       if (len < 0)
+               return;
+
+       DRM_DEBUG_KMS("sink sw revision: %d.%d\n", rev[0], rev[1]);
+}
+
 static int rate_to_index(int find, const int *rates)
 {
        int i = 0;
@@ -1461,9 +1524,24 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
        }
 }
 
+static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
+                               struct intel_crtc_state *pipe_config)
+{
+       int bpp, bpc;
+
+       bpp = pipe_config->pipe_bpp;
+       bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
+
+       if (bpc > 0)
+               bpp = min(bpp, 3*bpc);
+
+       return bpp;
+}
+
 bool
 intel_dp_compute_config(struct intel_encoder *encoder,
-                       struct intel_crtc_state *pipe_config)
+                       struct intel_crtc_state *pipe_config,
+                       struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1526,7 +1604,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 
        /* Walk through all bpp values. Luckily they're all nicely spaced with 2
         * bpc in between. */
-       bpp = pipe_config->pipe_bpp;
+       bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
        if (is_edp(intel_dp)) {
 
                /* Get bpp from vbt only for panels that dont have bpp in edid */
@@ -1640,23 +1718,28 @@ found:
 }
 
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
-                             const struct intel_crtc_state *pipe_config)
+                             int link_rate, uint8_t lane_count,
+                             bool link_mst)
 {
-       intel_dp->link_rate = pipe_config->port_clock;
-       intel_dp->lane_count = pipe_config->lane_count;
-       intel_dp->link_mst = intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST);
+       intel_dp->link_rate = link_rate;
+       intel_dp->lane_count = lane_count;
+       intel_dp->link_mst = link_mst;
 }
 
-static void intel_dp_prepare(struct intel_encoder *encoder)
+static void intel_dp_prepare(struct intel_encoder *encoder,
+                            struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 
-       intel_dp_set_link_params(intel_dp, crtc->config);
+       intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
+                                pipe_config->lane_count,
+                                intel_crtc_has_type(pipe_config,
+                                                    INTEL_OUTPUT_DP_MST));
 
        /*
         * There are four kinds of DP registers:
@@ -1682,7 +1765,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
 
        /* Handle DP bits in common between all three register formats */
        intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
-       intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
+       intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
 
        /* Split out the IBX/CPU vs CPT settings */
 
@@ -1710,7 +1793,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
                I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
        } else {
                if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
-                   !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
+                   !IS_CHERRYVIEW(dev) && pipe_config->limited_color_range)
                        intel_dp->DP |= DP_COLOR_RANGE_16_235;
 
                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -2249,10 +2332,10 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
 
-static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
+                               struct intel_crtc_state *pipe_config)
 {
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+       struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
        assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -2260,11 +2343,11 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
        assert_edp_pll_disabled(dev_priv);
 
        DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
-                     crtc->config->port_clock);
+                     pipe_config->port_clock);
 
        intel_dp->DP &= ~DP_PLL_FREQ_MASK;
 
-       if (crtc->config->port_clock == 162000)
+       if (pipe_config->port_clock == 162000)
                intel_dp->DP |= DP_PLL_FREQ_162MHZ;
        else
                intel_dp->DP |= DP_PLL_FREQ_270MHZ;
@@ -2473,16 +2556,17 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        }
 }
 
-static void intel_disable_dp(struct intel_encoder *encoder)
+static void intel_disable_dp(struct intel_encoder *encoder,
+                            struct intel_crtc_state *old_crtc_state,
+                            struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-       struct drm_device *dev = encoder->base.dev;
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-       if (crtc->config->has_audio)
+       if (old_crtc_state->has_audio)
                intel_audio_codec_disable(encoder);
 
-       if (HAS_PSR(dev) && !HAS_DDI(dev))
+       if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
                intel_psr_disable(intel_dp);
 
        /* Make sure the panel is off before trying to change the mode. But also
@@ -2493,11 +2577,13 @@ static void intel_disable_dp(struct intel_encoder *encoder)
        intel_edp_panel_off(intel_dp);
 
        /* disable the port before the pipe on g4x */
-       if (INTEL_INFO(dev)->gen < 5)
+       if (INTEL_GEN(dev_priv) < 5)
                intel_dp_link_down(intel_dp);
 }
 
-static void ilk_post_disable_dp(struct intel_encoder *encoder)
+static void ilk_post_disable_dp(struct intel_encoder *encoder,
+                               struct intel_crtc_state *old_crtc_state,
+                               struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
@@ -2509,14 +2595,18 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder)
                ironlake_edp_pll_off(intel_dp);
 }
 
-static void vlv_post_disable_dp(struct intel_encoder *encoder)
+static void vlv_post_disable_dp(struct intel_encoder *encoder,
+                               struct intel_crtc_state *old_crtc_state,
+                               struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
        intel_dp_link_down(intel_dp);
 }
 
-static void chv_post_disable_dp(struct intel_encoder *encoder)
+static void chv_post_disable_dp(struct intel_encoder *encoder,
+                               struct intel_crtc_state *old_crtc_state,
+                               struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
@@ -2542,6 +2632,10 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
 
+       if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
+               DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
+                             dp_train_pat & DP_TRAINING_PATTERN_MASK);
+
        if (HAS_DDI(dev)) {
                uint32_t temp = I915_READ(DP_TP_CTL(port));
 
@@ -2583,7 +2677,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
                        *DP |= DP_LINK_TRAIN_PAT_2_CPT;
                        break;
                case DP_TRAINING_PATTERN_3:
-                       DRM_ERROR("DP training pattern 3 not supported\n");
+                       DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
                        *DP |= DP_LINK_TRAIN_PAT_2_CPT;
                        break;
                }
@@ -2608,7 +2702,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
                        if (IS_CHERRYVIEW(dev)) {
                                *DP |= DP_LINK_TRAIN_PAT_3_CHV;
                        } else {
-                               DRM_ERROR("DP training pattern 3 not supported\n");
+                               DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
                                *DP |= DP_LINK_TRAIN_PAT_2;
                        }
                        break;
@@ -2616,19 +2710,15 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
        }
 }
 
-static void intel_dp_enable_port(struct intel_dp *intel_dp)
+static void intel_dp_enable_port(struct intel_dp *intel_dp,
+                                struct intel_crtc_state *old_crtc_state)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc =
-               to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
 
        /* enable with pattern 1 (as per spec) */
-       _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
-                                DP_TRAINING_PATTERN_1);
 
-       I915_WRITE(intel_dp->output_reg, intel_dp->DP);
-       POSTING_READ(intel_dp->output_reg);
+       intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
 
        /*
         * Magic for VLV/CHV. We _must_ first set up the register
@@ -2637,14 +2727,15 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
         * fail when the power sequencer is freshly used for this port.
         */
        intel_dp->DP |= DP_PORT_EN;
-       if (crtc->config->has_audio)
+       if (old_crtc_state->has_audio)
                intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 
        I915_WRITE(intel_dp->output_reg, intel_dp->DP);
        POSTING_READ(intel_dp->output_reg);
 }
 
-static void intel_enable_dp(struct intel_encoder *encoder)
+static void intel_enable_dp(struct intel_encoder *encoder,
+                           struct intel_crtc_state *pipe_config)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
@@ -2661,7 +2752,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
                vlv_init_panel_power_sequencer(intel_dp);
 
-       intel_dp_enable_port(intel_dp);
+       intel_dp_enable_port(intel_dp, pipe_config);
 
        edp_panel_vdd_on(intel_dp);
        edp_panel_on(intel_dp);
@@ -2673,7 +2764,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
                unsigned int lane_mask = 0x0;
 
                if (IS_CHERRYVIEW(dev))
-                       lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
+                       lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
 
                vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
                                    lane_mask);
@@ -2683,22 +2774,26 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        intel_dp_start_link_train(intel_dp);
        intel_dp_stop_link_train(intel_dp);
 
-       if (crtc->config->has_audio) {
+       if (pipe_config->has_audio) {
                DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
                                 pipe_name(pipe));
                intel_audio_codec_enable(encoder);
        }
 }
 
-static void g4x_enable_dp(struct intel_encoder *encoder)
+static void g4x_enable_dp(struct intel_encoder *encoder,
+                         struct intel_crtc_state *pipe_config,
+                         struct drm_connector_state *conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
-       intel_enable_dp(encoder);
+       intel_enable_dp(encoder, pipe_config);
        intel_edp_backlight_on(intel_dp);
 }
 
-static void vlv_enable_dp(struct intel_encoder *encoder)
+static void vlv_enable_dp(struct intel_encoder *encoder,
+                         struct intel_crtc_state *pipe_config,
+                         struct drm_connector_state *conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
@@ -2706,16 +2801,18 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
        intel_psr_enable(intel_dp);
 }
 
-static void g4x_pre_enable_dp(struct intel_encoder *encoder)
+static void g4x_pre_enable_dp(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config,
+                             struct drm_connector_state *conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
 
-       intel_dp_prepare(encoder);
+       intel_dp_prepare(encoder, pipe_config);
 
        /* Only ilk+ has port A */
        if (port == PORT_A)
-               ironlake_edp_pll_on(intel_dp);
+               ironlake_edp_pll_on(intel_dp, pipe_config);
 }
 
 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
@@ -2821,38 +2918,48 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
        intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 }
 
-static void vlv_pre_enable_dp(struct intel_encoder *encoder)
+static void vlv_pre_enable_dp(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config,
+                             struct drm_connector_state *conn_state)
 {
        vlv_phy_pre_encoder_enable(encoder);
 
-       intel_enable_dp(encoder);
+       intel_enable_dp(encoder, pipe_config);
 }
 
-static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *pipe_config,
+                                 struct drm_connector_state *conn_state)
 {
-       intel_dp_prepare(encoder);
+       intel_dp_prepare(encoder, pipe_config);
 
        vlv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_pre_enable_dp(struct intel_encoder *encoder)
+static void chv_pre_enable_dp(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config,
+                             struct drm_connector_state *conn_state)
 {
        chv_phy_pre_encoder_enable(encoder);
 
-       intel_enable_dp(encoder);
+       intel_enable_dp(encoder, pipe_config);
 
        /* Second common lane will stay alive on its own now */
        chv_phy_release_cl2_override(encoder);
 }
 
-static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *pipe_config,
+                                 struct drm_connector_state *conn_state)
 {
-       intel_dp_prepare(encoder);
+       intel_dp_prepare(encoder, pipe_config);
 
        chv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
+static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        chv_phy_post_pll_disable(encoder);
 }
@@ -4171,7 +4278,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
  *
  * Return %true if @port is connected, %false otherwise.
  */
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
                                         struct intel_digital_port *port)
 {
        if (HAS_PCH_IBX(dev_priv))
@@ -4282,6 +4389,9 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
 
        intel_dp_probe_oui(intel_dp);
 
+       intel_dp_print_hw_revision(intel_dp);
+       intel_dp_print_sw_revision(intel_dp);
+
        intel_dp_configure_mst(intel_dp);
 
        if (intel_dp->is_mst) {
@@ -5022,7 +5132,8 @@ static void intel_dp_pps_init(struct drm_device *dev,
 
 /**
  * intel_dp_set_drrs_state - program registers for RR switch to take effect
- * @dev: DRM device
+ * @dev_priv: i915 device
+ * @crtc_state: a pointer to the active intel_crtc_state
  * @refresh_rate: RR to be programmed
  *
  * This function gets called when refresh rate (RR) has to be changed from
@@ -5032,14 +5143,14 @@ static void intel_dp_pps_init(struct drm_device *dev,
  *
  * The caller of this function needs to take a lock on dev_priv->drrs.
  */
-static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
+static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
+                                   struct intel_crtc_state *crtc_state,
+                                   int refresh_rate)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        struct intel_digital_port *dig_port = NULL;
        struct intel_dp *intel_dp = dev_priv->drrs.dp;
-       struct intel_crtc_state *config = NULL;
-       struct intel_crtc *intel_crtc = NULL;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
 
        if (refresh_rate <= 0) {
@@ -5066,8 +5177,6 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
                return;
        }
 
-       config = intel_crtc->config;
-
        if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
                DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
                return;
@@ -5083,12 +5192,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
                return;
        }
 
-       if (!intel_crtc->active) {
+       if (!crtc_state->base.active) {
                DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
                return;
        }
 
-       if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
+       if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
                switch (index) {
                case DRRS_HIGH_RR:
                        intel_dp_set_m_n(intel_crtc, M1_N1);
@@ -5100,18 +5209,18 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
                default:
                        DRM_ERROR("Unsupported refreshrate type\n");
                }
-       } else if (INTEL_INFO(dev)->gen > 6) {
-               i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
+       } else if (INTEL_GEN(dev_priv) > 6) {
+               i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
                u32 val;
 
                val = I915_READ(reg);
                if (index > DRRS_HIGH_RR) {
-                       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+                       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                                val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
                        else
                                val |= PIPECONF_EDP_RR_MODE_SWITCH;
                } else {
-                       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+                       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                                val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
                        else
                                val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
@@ -5127,18 +5236,17 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
 /**
  * intel_edp_drrs_enable - init drrs struct if supported
  * @intel_dp: DP struct
+ * @crtc_state: A pointer to the active crtc state.
  *
  * Initializes frontbuffer_bits and drrs.dp
  */
-void intel_edp_drrs_enable(struct intel_dp *intel_dp)
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+                          struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_crtc *crtc = dig_port->base.base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-       if (!intel_crtc->config->has_drrs) {
+       if (!crtc_state->has_drrs) {
                DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
                return;
        }
@@ -5160,17 +5268,16 @@ unlock:
 /**
  * intel_edp_drrs_disable - Disable DRRS
  * @intel_dp: DP struct
+ * @old_crtc_state: Pointer to old crtc_state.
  *
  */
-void intel_edp_drrs_disable(struct intel_dp *intel_dp)
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+                           struct intel_crtc_state *old_crtc_state)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_crtc *crtc = dig_port->base.base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-       if (!intel_crtc->config->has_drrs)
+       if (!old_crtc_state->has_drrs)
                return;
 
        mutex_lock(&dev_priv->drrs.mutex);
@@ -5180,9 +5287,8 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
        }
 
        if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(&dev_priv->drm,
-                                       intel_dp->attached_connector->panel.
-                                       fixed_mode->vrefresh);
+               intel_dp_set_drrs_state(dev_priv, old_crtc_state,
+                       intel_dp->attached_connector->panel.fixed_mode->vrefresh);
 
        dev_priv->drrs.dp = NULL;
        mutex_unlock(&dev_priv->drrs.mutex);
@@ -5211,10 +5317,12 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
        if (dev_priv->drrs.busy_frontbuffer_bits)
                goto unlock;
 
-       if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
-               intel_dp_set_drrs_state(&dev_priv->drm,
-                                       intel_dp->attached_connector->panel.
-                                       downclock_mode->vrefresh);
+       if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
+               struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+
+               intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+                       intel_dp->attached_connector->panel.downclock_mode->vrefresh);
+       }
 
 unlock:
        mutex_unlock(&dev_priv->drrs.mutex);
@@ -5255,9 +5363,8 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
 
        /* invalidate means busy screen hence upclock */
        if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(&dev_priv->drm,
-                                       dev_priv->drrs.dp->attached_connector->panel.
-                                       fixed_mode->vrefresh);
+               intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+                       dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
 
        mutex_unlock(&dev_priv->drrs.mutex);
 }
@@ -5299,9 +5406,8 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
 
        /* flush means busy screen hence upclock */
        if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(&dev_priv->drm,
-                                       dev_priv->drrs.dp->attached_connector->panel.
-                                       fixed_mode->vrefresh);
+               intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+                               dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
 
        /*
         * flush also means no more activity hence schedule downclock, if all
@@ -5598,7 +5704,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
-       intel_dp_aux_init(intel_dp, intel_connector);
+       intel_dp_aux_init(intel_dp);
 
        INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
                          edp_panel_vdd_work);
index 60fb39c..c438b02 100644 (file)
 
 #include "intel_drv.h"
 
+static void
+intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+
+       DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
+                     link_status[0], link_status[1], link_status[2],
+                     link_status[3], link_status[4], link_status[5]);
+}
+
 static void
 intel_get_adjust_train(struct intel_dp *intel_dp,
                       const uint8_t link_status[DP_LINK_STATUS_SIZE])
@@ -103,13 +112,24 @@ intel_dp_update_link_train(struct intel_dp *intel_dp)
        return ret == intel_dp->lane_count;
 }
 
+static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
+{
+       int lane;
+
+       for (lane = 0; lane < intel_dp->lane_count; lane++)
+               if ((intel_dp->train_set[lane] &
+                    DP_TRAIN_MAX_SWING_REACHED) == 0)
+                       return false;
+
+       return true;
+}
+
 /* Enable corresponding port and start training pattern 1 */
-static void
+static bool
 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 {
-       int i;
        uint8_t voltage;
-       int voltage_tries, loop_tries;
+       int voltage_tries, max_vswing_tries;
        uint8_t link_config[2];
        uint8_t link_bw, rate_select;
 
@@ -125,6 +145,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
        if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
                link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
        drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+
        if (intel_dp->num_sink_rates)
                drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
                                  &rate_select, 1);
@@ -140,60 +161,54 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
                                       DP_TRAINING_PATTERN_1 |
                                       DP_LINK_SCRAMBLING_DISABLE)) {
                DRM_ERROR("failed to enable link training\n");
-               return;
+               return false;
        }
 
-       voltage = 0xff;
-       voltage_tries = 0;
-       loop_tries = 0;
+       voltage_tries = 1;
+       max_vswing_tries = 0;
        for (;;) {
                uint8_t link_status[DP_LINK_STATUS_SIZE];
 
                drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+
                if (!intel_dp_get_link_status(intel_dp, link_status)) {
                        DRM_ERROR("failed to get link status\n");
-                       break;
+                       return false;
                }
 
                if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
                        DRM_DEBUG_KMS("clock recovery OK\n");
-                       break;
+                       return true;
                }
 
-               /* Check to see if we've tried the max voltage */
-               for (i = 0; i < intel_dp->lane_count; i++)
-                       if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
-                               break;
-               if (i == intel_dp->lane_count) {
-                       ++loop_tries;
-                       if (loop_tries == 5) {
-                               DRM_ERROR("too many full retries, give up\n");
-                               break;
-                       }
-                       intel_dp_reset_link_train(intel_dp,
-                                                 DP_TRAINING_PATTERN_1 |
-                                                 DP_LINK_SCRAMBLING_DISABLE);
-                       voltage_tries = 0;
-                       continue;
+               if (voltage_tries == 5) {
+                       DRM_DEBUG_KMS("Same voltage tried 5 times\n");
+                       return false;
+               }
+
+               if (max_vswing_tries == 1) {
+                       DRM_DEBUG_KMS("Max Voltage Swing reached\n");
+                       return false;
                }
 
-               /* Check to see if we've tried the same voltage 5 times */
-               if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
-                       ++voltage_tries;
-                       if (voltage_tries == 5) {
-                               DRM_ERROR("too many voltage retries, give up\n");
-                               break;
-                       }
-               } else
-                       voltage_tries = 0;
                voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
                /* Update training set as requested by target */
                intel_get_adjust_train(intel_dp, link_status);
                if (!intel_dp_update_link_train(intel_dp)) {
                        DRM_ERROR("failed to update link training\n");
-                       break;
+                       return false;
                }
+
+               if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
+                   voltage)
+                       ++voltage_tries;
+               else
+                       voltage_tries = 1;
+
+               if (intel_dp_link_max_vswing_reached(intel_dp))
+                       ++max_vswing_tries;
+
        }
 }
 
@@ -229,12 +244,12 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
        return training_pattern;
 }
 
-static void
+static bool
 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 {
-       bool channel_eq = false;
-       int tries, cr_tries;
+       int tries;
        u32 training_pattern;
+       uint8_t link_status[DP_LINK_STATUS_SIZE];
 
        training_pattern = intel_dp_training_pattern(intel_dp);
 
@@ -243,19 +258,11 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
                                     training_pattern |
                                     DP_LINK_SCRAMBLING_DISABLE)) {
                DRM_ERROR("failed to start channel equalization\n");
-               return;
+               return false;
        }
 
-       tries = 0;
-       cr_tries = 0;
-       channel_eq = false;
-       for (;;) {
-               uint8_t link_status[DP_LINK_STATUS_SIZE];
-
-               if (cr_tries > 5) {
-                       DRM_ERROR("failed to train DP, aborting\n");
-                       break;
-               }
+       intel_dp->channel_eq_status = false;
+       for (tries = 0; tries < 5; tries++) {
 
                drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
                if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -266,44 +273,38 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
                /* Make sure clock is still ok */
                if (!drm_dp_clock_recovery_ok(link_status,
                                              intel_dp->lane_count)) {
-                       intel_dp_link_training_clock_recovery(intel_dp);
-                       intel_dp_set_link_train(intel_dp,
-                                               training_pattern |
-                                               DP_LINK_SCRAMBLING_DISABLE);
-                       cr_tries++;
-                       continue;
+                       intel_dp_dump_link_status(link_status);
+                       DRM_DEBUG_KMS("Clock recovery check failed, cannot "
+                                     "continue channel equalization\n");
+                       break;
                }
 
                if (drm_dp_channel_eq_ok(link_status,
                                         intel_dp->lane_count)) {
-                       channel_eq = true;
+                       intel_dp->channel_eq_status = true;
+                       DRM_DEBUG_KMS("Channel EQ done. DP Training "
+                                     "successful\n");
                        break;
                }
 
-               /* Try 5 times, then try clock recovery if that fails */
-               if (tries > 5) {
-                       intel_dp_link_training_clock_recovery(intel_dp);
-                       intel_dp_set_link_train(intel_dp,
-                                               training_pattern |
-                                               DP_LINK_SCRAMBLING_DISABLE);
-                       tries = 0;
-                       cr_tries++;
-                       continue;
-               }
-
                /* Update training set as requested by target */
                intel_get_adjust_train(intel_dp, link_status);
                if (!intel_dp_update_link_train(intel_dp)) {
                        DRM_ERROR("failed to update link training\n");
                        break;
                }
-               ++tries;
+       }
+
+       /* Try 5 times, else fail and try at lower BW */
+       if (tries == 5) {
+               intel_dp_dump_link_status(link_status);
+               DRM_DEBUG_KMS("Channel equalization failed 5 times\n");
        }
 
        intel_dp_set_idle_link_train(intel_dp);
 
-       if (channel_eq)
-               DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
+       return intel_dp->channel_eq_status;
+
 }
 
 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
index 629337d..54a9d76 100644 (file)
 #include <drm/drm_edid.h>
 
 static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
-                                       struct intel_crtc_state *pipe_config)
+                                       struct intel_crtc_state *pipe_config,
+                                       struct drm_connector_state *conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_atomic_state *state;
-       int bpp, i;
+       int bpp;
        int lane_count, slots;
        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-       struct drm_connector *drm_connector;
-       struct intel_connector *connector, *found = NULL;
-       struct drm_connector_state *connector_state;
        int mst_pbn;
 
        pipe_config->dp_encoder_is_mst = true;
@@ -54,7 +52,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
         */
        lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
 
-
        pipe_config->lane_count = lane_count;
 
        pipe_config->pipe_bpp = 24;
@@ -62,20 +59,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
        state = pipe_config->base.state;
 
-       for_each_connector_in_state(state, drm_connector, connector_state, i) {
-               connector = to_intel_connector(drm_connector);
-
-               if (connector_state->best_encoder == &encoder->base) {
-                       found = connector;
-                       break;
-               }
-       }
-
-       if (!found) {
-               DRM_ERROR("can't find connector\n");
-               return false;
-       }
-
        mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
 
        pipe_config->pbn = mst_pbn;
@@ -92,16 +75,20 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
 }
 
-static void intel_mst_disable_dp(struct intel_encoder *encoder)
+static void intel_mst_disable_dp(struct intel_encoder *encoder,
+                                struct intel_crtc_state *old_crtc_state,
+                                struct drm_connector_state *old_conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_connector *connector =
+               to_intel_connector(old_conn_state->connector);
        int ret;
 
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
-       drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->connector->port);
+       drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
 
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
        if (ret) {
@@ -109,11 +96,15 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
        }
 }
 
-static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
+                                     struct intel_crtc_state *old_crtc_state,
+                                     struct drm_connector_state *old_conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_connector *connector =
+               to_intel_connector(old_conn_state->connector);
 
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
@@ -122,59 +113,51 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
        /* and this can also fail */
        drm_dp_update_payload_part2(&intel_dp->mst_mgr);
 
-       drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->connector->port);
+       drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
 
        intel_dp->active_mst_links--;
 
        intel_mst->connector = NULL;
        if (intel_dp->active_mst_links == 0) {
-               intel_dig_port->base.post_disable(&intel_dig_port->base);
+               intel_dig_port->base.post_disable(&intel_dig_port->base,
+                                                 NULL, NULL);
+
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
        }
 }
 
-static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = intel_dig_port->port;
+       struct intel_connector *connector =
+               to_intel_connector(conn_state->connector);
        int ret;
        uint32_t temp;
-       struct intel_connector *found = NULL, *connector;
        int slots;
-       struct drm_crtc *crtc = encoder->base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       for_each_intel_connector(dev, connector) {
-               if (connector->base.state->best_encoder == &encoder->base) {
-                       found = connector;
-                       break;
-               }
-       }
-
-       if (!found) {
-               DRM_ERROR("can't find connector\n");
-               return;
-       }
 
        /* MST encoders are bound to a crtc, not to a connector,
         * force the mapping here for get_hw_state.
         */
-       found->encoder = encoder;
+       connector->encoder = encoder;
+       intel_mst->connector = connector;
 
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
-       intel_mst->connector = found;
-
        if (intel_dp->active_mst_links == 0) {
-               intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
+               intel_ddi_clk_select(&intel_dig_port->base,
+                                    pipe_config->shared_dpll);
 
                intel_prepare_dp_ddi_buffers(&intel_dig_port->base);
-
-               intel_dp_set_link_params(intel_dp, intel_crtc->config);
+               intel_dp_set_link_params(intel_dp,
+                                        pipe_config->port_clock,
+                                        pipe_config->lane_count,
+                                        true);
 
                intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
 
@@ -185,8 +168,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
        }
 
        ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
-                                      intel_mst->connector->port,
-                                      intel_crtc->config->pbn, &slots);
+                                      connector->port,
+                                      pipe_config->pbn, &slots);
        if (ret == false) {
                DRM_ERROR("failed to allocate vcpi\n");
                return;
@@ -200,13 +183,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
 }
 
-static void intel_mst_enable_dp(struct intel_encoder *encoder)
+static void intel_mst_enable_dp(struct intel_encoder *encoder,
+                               struct intel_crtc_state *pipe_config,
+                               struct drm_connector_state *conn_state)
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = intel_dig_port->port;
        int ret;
 
@@ -239,9 +223,8 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
 {
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
        u32 temp, flags = 0;
 
index 5c1f2d2..c26d18a 100644 (file)
 
 #include "intel_drv.h"
 
+struct intel_shared_dpll *
+skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
+{
+       struct intel_shared_dpll *pll = NULL;
+       struct intel_dpll_hw_state dpll_hw_state;
+       enum intel_dpll_id i;
+       bool found = false;
+
+       if (!skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+               return pll;
+
+       for (i = DPLL_ID_SKL_DPLL1; i <= DPLL_ID_SKL_DPLL3; i++) {
+               pll = &dev_priv->shared_dplls[i];
+
+               /* Only want to check enabled timings first */
+               if (pll->config.crtc_mask == 0)
+                       continue;
+
+               if (memcmp(&dpll_hw_state, &pll->config.hw_state,
+                          sizeof(pll->config.hw_state)) == 0) {
+                       found = true;
+                       break;
+               }
+       }
+
+       /* Ok no matching timings, maybe there's a free one? */
+       for (i = DPLL_ID_SKL_DPLL1;
+            ((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) {
+               pll = &dev_priv->shared_dplls[i];
+               if (pll->config.crtc_mask == 0) {
+                       pll->config.hw_state = dpll_hw_state;
+                       break;
+               }
+       }
+
+       return pll;
+}
+
 struct intel_shared_dpll *
 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
                            enum intel_dpll_id id)
@@ -452,26 +490,6 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
        return val & SPLL_PLL_ENABLE;
 }
 
-static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
-{
-       switch (pll->id) {
-       case DPLL_ID_WRPLL1:
-               return PORT_CLK_SEL_WRPLL1;
-       case DPLL_ID_WRPLL2:
-               return PORT_CLK_SEL_WRPLL2;
-       case DPLL_ID_SPLL:
-               return PORT_CLK_SEL_SPLL;
-       case DPLL_ID_LCPLL_810:
-               return PORT_CLK_SEL_LCPLL_810;
-       case DPLL_ID_LCPLL_1350:
-               return PORT_CLK_SEL_LCPLL_1350;
-       case DPLL_ID_LCPLL_2700:
-               return PORT_CLK_SEL_LCPLL_2700;
-       default:
-               return PORT_CLK_SEL_NONE;
-       }
-}
-
 #define LC_FREQ 2700
 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
 
@@ -687,11 +705,65 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
        *r2_out = best.r2;
 }
 
+static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
+                                                      struct intel_crtc *crtc,
+                                                      struct intel_crtc_state *crtc_state)
+{
+       struct intel_shared_dpll *pll;
+       uint32_t val;
+       unsigned int p, n2, r2;
+
+       hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+       val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
+             WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+             WRPLL_DIVIDER_POST(p);
+
+       crtc_state->dpll_hw_state.wrpll = val;
+
+       pll = intel_find_shared_dpll(crtc, crtc_state,
+                                    DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+
+       if (!pll)
+               return NULL;
+
+       return pll;
+}
+
+struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
+                                             int clock)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_shared_dpll *pll;
+       enum intel_dpll_id pll_id;
+
+       switch (clock / 2) {
+       case 81000:
+               pll_id = DPLL_ID_LCPLL_810;
+               break;
+       case 135000:
+               pll_id = DPLL_ID_LCPLL_1350;
+               break;
+       case 270000:
+               pll_id = DPLL_ID_LCPLL_2700;
+               break;
+       default:
+               DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+               return NULL;
+       }
+
+       pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+
+       if (!pll)
+               return NULL;
+
+       return pll;
+}
+
 static struct intel_shared_dpll *
 hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
             struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_shared_dpll *pll;
        int clock = crtc_state->port_clock;
 
@@ -699,41 +771,12 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
               sizeof(crtc_state->dpll_hw_state));
 
        if (encoder->type == INTEL_OUTPUT_HDMI) {
-               uint32_t val;
-               unsigned p, n2, r2;
-
-               hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
-               val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
-                     WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
-                     WRPLL_DIVIDER_POST(p);
-
-               crtc_state->dpll_hw_state.wrpll = val;
-
-               pll = intel_find_shared_dpll(crtc, crtc_state,
-                                            DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+               pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
 
        } else if (encoder->type == INTEL_OUTPUT_DP ||
                   encoder->type == INTEL_OUTPUT_DP_MST ||
                   encoder->type == INTEL_OUTPUT_EDP) {
-               enum intel_dpll_id pll_id;
-
-               switch (clock / 2) {
-               case 81000:
-                       pll_id = DPLL_ID_LCPLL_810;
-                       break;
-               case 135000:
-                       pll_id = DPLL_ID_LCPLL_1350;
-                       break;
-               case 270000:
-                       pll_id = DPLL_ID_LCPLL_2700;
-                       break;
-               default:
-                       DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
-                       return NULL;
-               }
-
-               pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+               pll = hsw_ddi_dp_get_dpll(encoder, clock);
 
        } else if (encoder->type == INTEL_OUTPUT_ANALOG) {
                if (WARN_ON(crtc_state->port_clock / 2 != 135000))
@@ -751,14 +794,11 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
        if (!pll)
                return NULL;
 
-       crtc_state->ddi_pll_sel = hsw_pll_to_ddi_pll_sel(pll);
-
        intel_reference_shared_dpll(pll, crtc_state);
 
        return pll;
 }
 
-
 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
        .enable = hsw_ddi_wrpll_enable,
        .disable = hsw_ddi_wrpll_disable,
@@ -1194,75 +1234,110 @@ skip_remaining_dividers:
        return true;
 }
 
-static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
-            struct intel_encoder *encoder)
+static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
+                                     struct intel_crtc_state *crtc_state,
+                                     int clock)
 {
-       struct intel_shared_dpll *pll;
        uint32_t ctrl1, cfgcr1, cfgcr2;
-       int clock = crtc_state->port_clock;
+       struct skl_wrpll_params wrpll_params = { 0, };
 
        /*
         * See comment in intel_dpll_hw_state to understand why we always use 0
         * as the DPLL id in this function.
         */
-
        ctrl1 = DPLL_CTRL1_OVERRIDE(0);
 
-       if (encoder->type == INTEL_OUTPUT_HDMI) {
-               struct skl_wrpll_params wrpll_params = { 0, };
+       ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
 
-               ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+       if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+               return false;
 
-               if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
-                       return NULL;
+       cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+               DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+               wrpll_params.dco_integer;
+
+       cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+               DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+               DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+               DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+               wrpll_params.central_freq;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+       crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+       crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+       return true;
+}
 
-               cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
-                        DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
-                        wrpll_params.dco_integer;
 
-               cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
-                        DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
-                        DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
-                        DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
-                        wrpll_params.central_freq;
+bool skl_ddi_dp_set_dpll_hw_state(int clock,
+                                 struct intel_dpll_hw_state *dpll_hw_state)
+{
+       uint32_t ctrl1;
+
+       /*
+        * See comment in intel_dpll_hw_state to understand why we always use 0
+        * as the DPLL id in this function.
+        */
+       ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+       switch (clock / 2) {
+       case 81000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+               break;
+       case 135000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
+               break;
+       case 270000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
+               break;
+               /* eDP 1.4 rates */
+       case 162000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
+               break;
+       case 108000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
+               break;
+       case 216000:
+               ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
+               break;
+       }
+
+       dpll_hw_state->ctrl1 = ctrl1;
+       return true;
+}
+
+static struct intel_shared_dpll *
+skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+            struct intel_encoder *encoder)
+{
+       struct intel_shared_dpll *pll;
+       int clock = crtc_state->port_clock;
+       bool bret;
+       struct intel_dpll_hw_state dpll_hw_state;
+
+       memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+
+       if (encoder->type == INTEL_OUTPUT_HDMI) {
+               bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+               if (!bret) {
+                       DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+                       return NULL;
+               }
        } else if (encoder->type == INTEL_OUTPUT_DP ||
                   encoder->type == INTEL_OUTPUT_DP_MST ||
                   encoder->type == INTEL_OUTPUT_EDP) {
-               switch (crtc_state->port_clock / 2) {
-               case 81000:
-                       ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
-                       break;
-               case 135000:
-                       ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
-                       break;
-               case 270000:
-                       ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
-                       break;
-               /* eDP 1.4 rates */
-               case 162000:
-                       ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
-                       break;
-               case 108000:
-                       ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
-                       break;
-               case 216000:
-                       ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
-                       break;
+               bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+               if (!bret) {
+                       DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+                       return NULL;
                }
-
-               cfgcr1 = cfgcr2 = 0;
+               crtc_state->dpll_hw_state = dpll_hw_state;
        } else {
                return NULL;
        }
 
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       crtc_state->dpll_hw_state.ctrl1 = ctrl1;
-       crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
-       crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
        if (encoder->type == INTEL_OUTPUT_EDP)
                pll = intel_find_shared_dpll(crtc, crtc_state,
                                             DPLL_ID_SKL_DPLL0,
@@ -1274,8 +1349,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
        if (!pll)
                return NULL;
 
-       crtc_state->ddi_pll_sel = pll->id;
-
        intel_reference_shared_dpll(pll, crtc_state);
 
        return pll;
@@ -1484,6 +1557,8 @@ struct bxt_clk_div {
        uint32_t m2_frac;
        bool m2_frac_en;
        uint32_t n;
+
+       int vco;
 };
 
 /* pre-calculated values for DP linkrates */
@@ -1497,57 +1572,60 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = {
        {432000, 3, 1, 32, 1677722, 1, 1}
 };
 
-static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
-            struct intel_encoder *encoder)
+static bool
+bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
+                         struct intel_crtc_state *crtc_state, int clock,
+                         struct bxt_clk_div *clk_div)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_shared_dpll *pll;
-       enum intel_dpll_id i;
-       struct intel_digital_port *intel_dig_port;
-       struct bxt_clk_div clk_div = {0};
-       int vco = 0;
-       uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
-       uint32_t lanestagger;
-       int clock = crtc_state->port_clock;
+       struct dpll best_clock;
 
-       if (encoder->type == INTEL_OUTPUT_HDMI) {
-               struct dpll best_clock;
+       /* Calculate HDMI div */
+       /*
+        * FIXME: tie the following calculation into
+        * i9xx_crtc_compute_clock
+        */
+       if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+               DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
+                                clock, pipe_name(intel_crtc->pipe));
+               return false;
+       }
 
-               /* Calculate HDMI div */
-               /*
-                * FIXME: tie the following calculation into
-                * i9xx_crtc_compute_clock
-                */
-               if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
-                       DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
-                                        clock, pipe_name(crtc->pipe));
-                       return NULL;
-               }
+       clk_div->p1 = best_clock.p1;
+       clk_div->p2 = best_clock.p2;
+       WARN_ON(best_clock.m1 != 2);
+       clk_div->n = best_clock.n;
+       clk_div->m2_int = best_clock.m2 >> 22;
+       clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
+       clk_div->m2_frac_en = clk_div->m2_frac != 0;
 
-               clk_div.p1 = best_clock.p1;
-               clk_div.p2 = best_clock.p2;
-               WARN_ON(best_clock.m1 != 2);
-               clk_div.n = best_clock.n;
-               clk_div.m2_int = best_clock.m2 >> 22;
-               clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
-               clk_div.m2_frac_en = clk_div.m2_frac != 0;
+       clk_div->vco = best_clock.vco;
 
-               vco = best_clock.vco;
-       } else if (encoder->type == INTEL_OUTPUT_DP ||
-                  encoder->type == INTEL_OUTPUT_EDP) {
-               int i;
+       return true;
+}
 
-               clk_div = bxt_dp_clk_val[0];
-               for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
-                       if (bxt_dp_clk_val[i].clock == clock) {
-                               clk_div = bxt_dp_clk_val[i];
-                               break;
-                       }
+static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
+{
+       int i;
+
+       *clk_div = bxt_dp_clk_val[0];
+       for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+               if (bxt_dp_clk_val[i].clock == clock) {
+                       *clk_div = bxt_dp_clk_val[i];
+                       break;
                }
-               vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
        }
 
+       clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
+}
+
+static bool bxt_ddi_set_dpll_hw_state(int clock,
+                         struct bxt_clk_div *clk_div,
+                         struct intel_dpll_hw_state *dpll_hw_state)
+{
+       int vco = clk_div->vco;
+       uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
+       uint32_t lanestagger;
+
        if (vco >= 6200000 && vco <= 6700000) {
                prop_coef = 4;
                int_coef = 9;
@@ -1566,12 +1644,9 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                targ_cnt = 9;
        } else {
                DRM_ERROR("Invalid VCO\n");
-               return NULL;
+               return false;
        }
 
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
        if (clock > 270000)
                lanestagger = 0x18;
        else if (clock > 135000)
@@ -1583,35 +1658,75 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
        else
                lanestagger = 0x02;
 
-       crtc_state->dpll_hw_state.ebb0 =
-               PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
-       crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
-       crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
-       crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
+       dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
+       dpll_hw_state->pll0 = clk_div->m2_int;
+       dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
+       dpll_hw_state->pll2 = clk_div->m2_frac;
 
-       if (clk_div.m2_frac_en)
-               crtc_state->dpll_hw_state.pll3 =
-                       PORT_PLL_M2_FRAC_ENABLE;
+       if (clk_div->m2_frac_en)
+               dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
 
-       crtc_state->dpll_hw_state.pll6 =
-               prop_coef | PORT_PLL_INT_COEFF(int_coef);
-       crtc_state->dpll_hw_state.pll6 |=
-               PORT_PLL_GAIN_CTL(gain_ctl);
+       dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
+       dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
 
-       crtc_state->dpll_hw_state.pll8 = targ_cnt;
+       dpll_hw_state->pll8 = targ_cnt;
 
-       crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
+       dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
 
-       crtc_state->dpll_hw_state.pll10 =
+       dpll_hw_state->pll10 =
                PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
                | PORT_PLL_DCO_AMP_OVR_EN_H;
 
-       crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+       dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
 
-       crtc_state->dpll_hw_state.pcsdw12 =
-               LANESTAGGER_STRAP_OVRD | lanestagger;
+       dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
 
-       intel_dig_port = enc_to_dig_port(&encoder->base);
+       return true;
+}
+
+bool bxt_ddi_dp_set_dpll_hw_state(int clock,
+                         struct intel_dpll_hw_state *dpll_hw_state)
+{
+       struct bxt_clk_div clk_div = {0};
+
+       bxt_ddi_dp_pll_dividers(clock, &clk_div);
+
+       return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+}
+
+static struct intel_shared_dpll *
+bxt_get_dpll(struct intel_crtc *crtc,
+               struct intel_crtc_state *crtc_state,
+               struct intel_encoder *encoder)
+{
+       struct bxt_clk_div clk_div = {0};
+       struct intel_dpll_hw_state dpll_hw_state = {0};
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       struct intel_digital_port *intel_dig_port;
+       struct intel_shared_dpll *pll;
+       int i, clock = crtc_state->port_clock;
+
+       if (encoder->type == INTEL_OUTPUT_HDMI
+           && !bxt_ddi_hdmi_pll_dividers(crtc, crtc_state,
+                                         clock, &clk_div))
+               return NULL;
+
+       if ((encoder->type == INTEL_OUTPUT_DP ||
+            encoder->type == INTEL_OUTPUT_EDP) &&
+           !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+               return NULL;
+
+       memset(&crtc_state->dpll_hw_state, 0,
+              sizeof(crtc_state->dpll_hw_state));
+
+       crtc_state->dpll_hw_state = dpll_hw_state;
+
+       if (encoder->type == INTEL_OUTPUT_DP_MST) {
+               struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+
+               intel_dig_port = intel_mst->primary;
+       } else
+               intel_dig_port = enc_to_dig_port(&encoder->base);
 
        /* 1:1 mapping between ports and PLLs */
        i = (enum intel_dpll_id) intel_dig_port->port;
@@ -1622,9 +1737,6 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 
        intel_reference_shared_dpll(pll, crtc_state);
 
-       /* shared DPLL id 0 is DPLL A */
-       crtc_state->ddi_pll_sel = pll->id;
-
        return pll;
 }
 
index 89c5ada..f438535 100644 (file)
@@ -160,5 +160,20 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc);
 void intel_shared_dpll_commit(struct drm_atomic_state *state);
 void intel_shared_dpll_init(struct drm_device *dev);
 
+/* BXT dpll related functions */
+bool bxt_ddi_dp_set_dpll_hw_state(int clock,
+                         struct intel_dpll_hw_state *dpll_hw_state);
+
+
+/* SKL dpll related functions */
+bool skl_ddi_dp_set_dpll_hw_state(int clock,
+                                 struct intel_dpll_hw_state *dpll_hw_state);
+struct intel_shared_dpll *skl_find_link_pll(struct drm_i915_private *dev_priv,
+                                           int clock);
+
+
+/* HSW dpll related functions */
+struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
+                                             int clock);
 
 #endif /* _INTEL_DPLL_MGR_H_ */
index 774aab3..8fd16ad 100644 (file)
  */
 #define _wait_for(COND, US, W) ({ \
        unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1;   \
-       int ret__ = 0;                                                  \
-       while (!(COND)) {                                               \
-               if (time_after(jiffies, timeout__)) {                   \
-                       if (!(COND))                                    \
-                               ret__ = -ETIMEDOUT;                     \
+       int ret__;                                                      \
+       for (;;) {                                                      \
+               bool expired__ = time_after(jiffies, timeout__);        \
+               if (COND) {                                             \
+                       ret__ = 0;                                      \
+                       break;                                          \
+               }                                                       \
+               if (expired__) {                                        \
+                       ret__ = -ETIMEDOUT;                             \
                        break;                                          \
                }                                                       \
                if ((W) && drm_can_sleep()) {                           \
@@ -205,14 +209,26 @@ struct intel_encoder {
        unsigned int cloneable;
        void (*hot_plug)(struct intel_encoder *);
        bool (*compute_config)(struct intel_encoder *,
-                              struct intel_crtc_state *);
-       void (*pre_pll_enable)(struct intel_encoder *);
-       void (*pre_enable)(struct intel_encoder *);
-       void (*enable)(struct intel_encoder *);
-       void (*mode_set)(struct intel_encoder *intel_encoder);
-       void (*disable)(struct intel_encoder *);
-       void (*post_disable)(struct intel_encoder *);
-       void (*post_pll_disable)(struct intel_encoder *);
+                              struct intel_crtc_state *,
+                              struct drm_connector_state *);
+       void (*pre_pll_enable)(struct intel_encoder *,
+                              struct intel_crtc_state *,
+                              struct drm_connector_state *);
+       void (*pre_enable)(struct intel_encoder *,
+                          struct intel_crtc_state *,
+                          struct drm_connector_state *);
+       void (*enable)(struct intel_encoder *,
+                      struct intel_crtc_state *,
+                      struct drm_connector_state *);
+       void (*disable)(struct intel_encoder *,
+                       struct intel_crtc_state *,
+                       struct drm_connector_state *);
+       void (*post_disable)(struct intel_encoder *,
+                            struct intel_crtc_state *,
+                            struct drm_connector_state *);
+       void (*post_pll_disable)(struct intel_encoder *,
+                                struct intel_crtc_state *,
+                                struct drm_connector_state *);
        /* Read out the current hw state of this connector, returning true if
         * the encoder is active. If the encoder is enabled it also set the pipe
         * it is connected to in the pipe parameter. */
@@ -578,12 +594,6 @@ struct intel_crtc_state {
        /* Selected dpll when shared or NULL. */
        struct intel_shared_dpll *shared_dpll;
 
-       /*
-        * - PORT_CLK_SEL for DDI ports on HSW/BDW.
-        * - enum skl_dpll on SKL
-        */
-       uint32_t ddi_pll_sel;
-
        /* Actual register state of the dpll, for shared dpll cross-checking. */
        struct intel_dpll_hw_state dpll_hw_state;
 
@@ -700,8 +710,8 @@ struct intel_crtc {
 
        struct intel_crtc_state *config;
 
-       /* reset counter value when the last flip was submitted */
-       unsigned int reset_counter;
+       /* global reset count when the last flip was submitted */
+       unsigned int reset_count;
 
        /* Access to these should be protected by dev_priv->irq_lock. */
        bool cpu_fifo_underrun_disabled;
@@ -872,6 +882,7 @@ struct intel_dp {
        bool link_mst;
        bool has_audio;
        bool detect_done;
+       bool channel_eq_status;
        enum hdmi_force_audio force_audio;
        bool limited_color_range;
        bool color_range_auto;
@@ -1124,7 +1135,10 @@ void intel_crt_reset(struct drm_encoder *encoder);
 
 /* intel_ddi.c */
 void intel_ddi_clk_select(struct intel_encoder *encoder,
-                         const struct intel_crtc_state *pipe_config);
+                         struct intel_shared_dpll *pll);
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+                               struct intel_crtc_state *old_crtc_state,
+                               struct drm_connector_state *old_conn_state);
 void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
 void hsw_fdi_link_train(struct drm_crtc *crtc);
 void intel_ddi_init(struct drm_device *dev, enum port port);
@@ -1140,7 +1154,6 @@ bool intel_ddi_pll_select(struct intel_crtc *crtc,
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-void intel_ddi_fdi_disable(struct drm_crtc *crtc);
 void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config);
 struct intel_encoder *
@@ -1151,7 +1164,8 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
                         struct intel_crtc_state *pipe_config);
 void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
-
+struct intel_shared_dpll *intel_ddi_get_link_dpll(struct intel_dp *intel_dp,
+                                                 int clock);
 unsigned int intel_fb_align_height(struct drm_device *dev,
                                   unsigned int height,
                                   uint32_t pixel_format,
@@ -1171,6 +1185,8 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
 void intel_update_rawclk(struct drm_i915_private *dev_priv);
 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
                      const char *name, u32 reg, int ref_freq);
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
 extern const struct drm_plane_funcs intel_plane_funcs;
 void intel_init_display_hooks(struct drm_i915_private *dev_priv);
 unsigned int intel_fb_xy_to_linear(int x, int y,
@@ -1367,7 +1383,8 @@ bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port
 bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                             struct intel_connector *intel_connector);
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
-                             const struct intel_crtc_state *pipe_config);
+                             int link_rate, uint8_t lane_count,
+                             bool link_mst);
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
@@ -1376,7 +1393,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
 bool intel_dp_compute_config(struct intel_encoder *encoder,
-                            struct intel_crtc_state *pipe_config);
+                            struct intel_crtc_state *pipe_config,
+                            struct drm_connector_state *conn_state);
 bool intel_dp_is_edp(struct drm_device *dev, enum port port);
 enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
                                  bool long_hpd);
@@ -1394,14 +1412,14 @@ void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
 void intel_plane_destroy(struct drm_plane *plane);
-void intel_edp_drrs_enable(struct intel_dp *intel_dp);
-void intel_edp_drrs_disable(struct intel_dp *intel_dp);
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+                          struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+                          struct intel_crtc_state *crtc_state);
 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
                               unsigned int frontbuffer_bits);
 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
                          unsigned int frontbuffer_bits);
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
-                                 struct intel_digital_port *port);
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1501,7 +1519,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
                               struct intel_connector *intel_connector);
 struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-                              struct intel_crtc_state *pipe_config);
+                              struct intel_crtc_state *pipe_config,
+                              struct drm_connector_state *conn_state);
 void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
 
 
@@ -1722,6 +1741,21 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
 void skl_wm_get_hw_state(struct drm_device *dev);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
                          struct skl_ddb_allocation *ddb /* out */);
+bool skl_can_enable_sagv(struct drm_atomic_state *state);
+int skl_enable_sagv(struct drm_i915_private *dev_priv);
+int skl_disable_sagv(struct drm_i915_private *dev_priv);
+bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
+                              const struct skl_ddb_allocation *new,
+                              enum pipe pipe);
+bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
+                                const struct skl_ddb_allocation *old,
+                                const struct skl_ddb_allocation *new,
+                                enum pipe pipe);
+void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+                        const struct skl_wm_values *wm);
+void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+                       const struct skl_wm_values *wm,
+                       int plane);
 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
 bool ilk_disable_lp_wm(struct drm_device *dev);
 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
index de8e9fb..b2e3d3a 100644 (file)
@@ -312,7 +312,8 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
 }
 
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config)
+                                    struct intel_crtc_state *pipe_config,
+                                    struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
@@ -533,14 +534,15 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
        intel_panel_enable_backlight(intel_dsi->attached_connector);
 }
 
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+                             struct intel_crtc_state *pipe_config);
 
-static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+static void intel_dsi_pre_enable(struct intel_encoder *encoder,
+                                struct intel_crtc_state *pipe_config,
+                                struct drm_connector_state *conn_state)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        enum port port;
 
        DRM_DEBUG_KMS("\n");
@@ -550,9 +552,9 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
         * lock. It needs to be fully powered down to fix it.
         */
        intel_disable_dsi_pll(encoder);
-       intel_enable_dsi_pll(encoder, crtc->config);
+       intel_enable_dsi_pll(encoder, pipe_config);
 
-       intel_dsi_prepare(encoder);
+       intel_dsi_prepare(encoder, pipe_config);
 
        /* Panel Enable over CRC PMIC */
        if (intel_dsi->gpio_panel)
@@ -582,7 +584,9 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
        intel_dsi_enable(encoder);
 }
 
-static void intel_dsi_enable_nop(struct intel_encoder *encoder)
+static void intel_dsi_enable_nop(struct intel_encoder *encoder,
+                                struct intel_crtc_state *pipe_config,
+                                struct drm_connector_state *conn_state)
 {
        DRM_DEBUG_KMS("\n");
 
@@ -592,7 +596,9 @@ static void intel_dsi_enable_nop(struct intel_encoder *encoder)
         */
 }
 
-static void intel_dsi_pre_disable(struct intel_encoder *encoder)
+static void intel_dsi_pre_disable(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *old_crtc_state,
+                                 struct drm_connector_state *old_conn_state)
 {
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
@@ -694,7 +700,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
        intel_disable_dsi_pll(encoder);
 }
 
-static void intel_dsi_post_disable(struct intel_encoder *encoder)
+static void intel_dsi_post_disable(struct intel_encoder *encoder,
+                                  struct intel_crtc_state *pipe_config,
+                                  struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -819,6 +827,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
        u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
                                crtc_hblank_start_sw, crtc_hblank_end_sw;
 
+       /* FIXME: hw readout should not depend on SW state */
        intel_crtc = to_intel_crtc(encoder->base.crtc);
        adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
 
@@ -1104,14 +1113,15 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
        }
 }
 
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+                             struct intel_crtc_state *pipe_config)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-       const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        enum port port;
        unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
        u32 val, tmp;
@@ -1348,7 +1358,7 @@ static int intel_dsi_set_property(struct drm_connector *connector,
                intel_connector->panel.fitting_mode = val;
        }
 
-       crtc = intel_attached_encoder(connector)->base.crtc;
+       crtc = connector->state->crtc;
        if (crtc && crtc->state->enable) {
                /*
                 * If the CRTC is enabled, the display will be changed
index 47bdf9d..2e452c5 100644 (file)
@@ -174,7 +174,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
        pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
-static void intel_disable_dvo(struct intel_encoder *encoder)
+static void intel_disable_dvo(struct intel_encoder *encoder,
+                             struct intel_crtc_state *old_crtc_state,
+                             struct drm_connector_state *old_conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -186,17 +188,18 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
        I915_READ(dvo_reg);
 }
 
-static void intel_enable_dvo(struct intel_encoder *encoder)
+static void intel_enable_dvo(struct intel_encoder *encoder,
+                            struct intel_crtc_state *pipe_config,
+                            struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
        u32 temp = I915_READ(dvo_reg);
 
        intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
-                                        &crtc->config->base.mode,
-                                        &crtc->config->base.adjusted_mode);
+                                        &pipe_config->base.mode,
+                                        &pipe_config->base.adjusted_mode);
 
        I915_WRITE(dvo_reg, temp | DVO_ENABLE);
        I915_READ(dvo_reg);
@@ -235,7 +238,8 @@ intel_dvo_mode_valid(struct drm_connector *connector,
 }
 
 static bool intel_dvo_compute_config(struct intel_encoder *encoder,
-                                    struct intel_crtc_state *pipe_config)
+                                    struct intel_crtc_state *pipe_config,
+                                    struct drm_connector_state *conn_state)
 {
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        const struct drm_display_mode *fixed_mode =
@@ -253,12 +257,13 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
        return true;
 }
 
-static void intel_dvo_pre_enable(struct intel_encoder *encoder)
+static void intel_dvo_pre_enable(struct intel_encoder *encoder,
+                                struct intel_crtc_state *pipe_config,
+                                struct drm_connector_state *conn_state)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        int pipe = crtc->pipe;
        u32 dvo_val;
@@ -554,7 +559,6 @@ void intel_dvo_init(struct drm_device *dev)
                return;
        }
 
-       drm_encoder_cleanup(&intel_encoder->base);
        kfree(intel_dvo);
        kfree(intel_connector);
 }
index 2e96a86..e405f10 100644 (file)
@@ -211,6 +211,8 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
 {
        memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
        clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+       if (intel_engine_has_waiter(engine))
+               i915_queue_hangcheck(engine->i915);
 }
 
 static void intel_engine_init_requests(struct intel_engine_cs *engine)
@@ -230,7 +232,6 @@ static void intel_engine_init_requests(struct intel_engine_cs *engine)
  */
 void intel_engine_setup_common(struct intel_engine_cs *engine)
 {
-       INIT_LIST_HEAD(&engine->buffers);
        INIT_LIST_HEAD(&engine->execlist_queue);
        spin_lock_init(&engine->execlist_lock);
 
@@ -306,6 +307,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
        return 0;
 }
 
+void intel_engine_reset_irq(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (intel_engine_has_waiter(engine))
+               engine->irq_enable(engine);
+       else
+               engine->irq_disable(engine);
+       spin_unlock_irq(&dev_priv->irq_lock);
+}
+
 /**
  * intel_engines_cleanup_common - cleans up the engine state created by
  *                                the common initiailizers.
index bf8b22a..faa6762 100644 (file)
@@ -799,10 +799,8 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
         */
        if (cache->fb.tiling_mode != I915_TILING_X ||
            cache->fb.fence_reg == I915_FENCE_REG_NONE) {
-               if (INTEL_GEN(dev_priv) < 5) {
-                       fbc->no_fbc_reason = "framebuffer not tiled or fenced";
-                       return false;
-               }
+               fbc->no_fbc_reason = "framebuffer not tiled or fenced";
+               return false;
        }
        if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
            cache->plane.rotation != DRM_ROTATE_0) {
index 4003e49..b7098f9 100644 (file)
@@ -183,6 +183,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
        struct intel_framebuffer *intel_fb = ifbdev->fb;
        struct drm_device *dev = helper->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct fb_info *info;
        struct drm_framebuffer *fb;
@@ -280,7 +281,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
        ifbdev->vma = vma;
 
        mutex_unlock(&dev->struct_mutex);
-       vga_switcheroo_client_fb_set(dev->pdev, info);
+       vga_switcheroo_client_fb_set(pdev, info);
        return 0;
 
 out_destroy_fbi:
index c973262..b1ba869 100644 (file)
@@ -78,9 +78,11 @@ struct i915_guc_client {
        uint16_t doorbell_id;
        uint16_t padding[3];            /* Maintain alignment           */
 
+       spinlock_t wq_lock;
        uint32_t wq_offset;
        uint32_t wq_size;
        uint32_t wq_tail;
+       uint32_t wq_rsvd;
        uint32_t no_wq_space;
        uint32_t b_fail;
        int retcode;
@@ -157,7 +159,7 @@ extern int intel_guc_resume(struct drm_device *dev);
 /* i915_guc_submission.c */
 int i915_guc_submission_init(struct drm_i915_private *dev_priv);
 int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
-int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
+int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
 void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
 void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
 
index 324812d..6fd39ef 100644 (file)
@@ -97,7 +97,7 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
        }
 };
 
-static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
+static void guc_interrupts_release(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
        int irqs;
@@ -114,7 +114,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
        I915_WRITE(GUC_WD_VECS_IER, 0);
 }
 
-static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
+static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
        int irqs;
@@ -134,13 +134,28 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
        I915_WRITE(GUC_WD_VECS_IER, ~irqs);
 
        /*
-        * If GuC has routed PM interrupts to itself, don't keep it.
-        * and keep other interrupts those are unmasked by GuC.
-       */
+        * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
+        * (unmasked) PM interrupts to the GuC. All other bits of this
+        * register *disable* generation of a specific interrupt.
+        *
+        * 'pm_intr_keep' indicates bits that are NOT to be set when
+        * writing to the PM interrupt mask register, i.e. interrupts
+        * that must not be disabled.
+        *
+        * If the GuC is handling these interrupts, then we must not let
+        * the PM code disable ANY interrupt that the GuC is expecting.
+        * So for each ENABLED (0) bit in this register, we must SET the
+        * bit in pm_intr_keep so that it's left enabled for the GuC.
+        *
+        * OTOH the REDIRECT_TO_GUC bit is initially SET in pm_intr_keep
+        * (so interrupts go to the DISPLAY unit at first); but here we
+        * need to CLEAR that bit, which will result in the register bit
+        * being left SET!
+        */
        tmp = I915_READ(GEN6_PMINTRMSK);
-       if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
-               dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
-               dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+       if (tmp & GEN8_PMINTR_REDIRECT_TO_GUC) {
+               dev_priv->rps.pm_intr_keep |= ~tmp;
+               dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_GUC;
        }
 }
 
@@ -152,17 +167,24 @@ static u32 get_gttype(struct drm_i915_private *dev_priv)
 
 static u32 get_core_family(struct drm_i915_private *dev_priv)
 {
-       switch (INTEL_INFO(dev_priv)->gen) {
+       u32 gen = INTEL_GEN(dev_priv);
+
+       switch (gen) {
        case 9:
                return GFXCORE_FAMILY_GEN9;
 
        default:
-               DRM_ERROR("GUC: unsupported core family\n");
+               WARN(1, "GEN%d does not support GuC operation!\n", gen);
                return GFXCORE_FAMILY_UNKNOWN;
        }
 }
 
-static void set_guc_init_params(struct drm_i915_private *dev_priv)
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+static void guc_params_init(struct drm_i915_private *dev_priv)
 {
        struct intel_guc *guc = &dev_priv->guc;
        u32 params[GUC_CTL_MAX_DWORDS];
@@ -375,11 +397,11 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
                I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
                                            I915_READ(GEN7_MISCCPCTL)));
 
-               /* allows for 5us before GT can go to RC6 */
+               /* allows for 5us (in 10ns units) before GT can go to RC6 */
                I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
        }
 
-       set_guc_init_params(dev_priv);
+       guc_params_init(dev_priv);
 
        ret = guc_ucode_xfer_dma(dev_priv, vma);
 
@@ -394,7 +416,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
        return ret;
 }
 
-static int i915_reset_guc(struct drm_i915_private *dev_priv)
+static int guc_hw_reset(struct drm_i915_private *dev_priv)
 {
        int ret;
        u32 guc_status;
@@ -447,7 +469,7 @@ int intel_guc_setup(struct drm_device *dev)
                goto fail;
        } else if (*fw_path == '\0') {
                /* Device has a GuC but we don't know what f/w to load? */
-               DRM_INFO("No GuC firmware known for this platform\n");
+               WARN(1, "No GuC firmware known for this platform!\n");
                err = -ENODEV;
                goto fail;
        }
@@ -461,7 +483,7 @@ int intel_guc_setup(struct drm_device *dev)
                goto fail;
        }
 
-       direct_interrupts_to_host(dev_priv);
+       guc_interrupts_release(dev_priv);
 
        guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
 
@@ -484,11 +506,9 @@ int intel_guc_setup(struct drm_device *dev)
                 * Always reset the GuC just before (re)loading, so
                 * that the state and timing are fairly predictable
                 */
-               err = i915_reset_guc(dev_priv);
-               if (err) {
-                       DRM_ERROR("GuC reset failed: %d\n", err);
+               err = guc_hw_reset(dev_priv);
+               if (err)
                        goto fail;
-               }
 
                err = guc_ucode_xfer(dev_priv);
                if (!err)
@@ -511,7 +531,7 @@ int intel_guc_setup(struct drm_device *dev)
                err = i915_guc_submission_enable(dev_priv);
                if (err)
                        goto fail;
-               direct_interrupts_to_guc(dev_priv);
+               guc_interrupts_capture(dev_priv);
        }
 
        return 0;
@@ -520,7 +540,7 @@ fail:
        if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
                guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
 
-       direct_interrupts_to_host(dev_priv);
+       guc_interrupts_release(dev_priv);
        i915_guc_submission_disable(dev_priv);
        i915_guc_submission_fini(dev_priv);
 
@@ -546,15 +566,15 @@ fail:
        else if (err == 0)
                DRM_INFO("GuC firmware load skipped\n");
        else if (ret != -EIO)
-               DRM_INFO("GuC firmware load failed: %d\n", err);
+               DRM_NOTE("GuC firmware load failed: %d\n", err);
        else
-               DRM_ERROR("GuC firmware load failed: %d\n", err);
+               DRM_WARN("GuC firmware load failed: %d\n", err);
 
        if (i915.enable_guc_submission) {
                if (fw_path == NULL)
                        DRM_INFO("GuC submission without firmware not supported\n");
                if (ret == 0)
-                       DRM_INFO("Falling back from GuC submission to execlist mode\n");
+                       DRM_NOTE("Falling back from GuC submission to execlist mode\n");
                else
                        DRM_ERROR("GuC init failed: %d\n", ret);
        }
@@ -565,6 +585,7 @@ fail:
 
 static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 {
+       struct pci_dev *pdev = dev->pdev;
        struct drm_i915_gem_object *obj;
        const struct firmware *fw;
        struct guc_css_header *css;
@@ -574,7 +595,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
        DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
                intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
 
-       err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
+       err = request_firmware(&fw, guc_fw->guc_fw_path, &pdev->dev);
        if (err)
                goto fail;
        if (!fw)
@@ -585,7 +606,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 
        /* Check the size of the blob before examining buffer contents */
        if (fw->size < sizeof(struct guc_css_header)) {
-               DRM_ERROR("Firmware header is missing\n");
+               DRM_NOTE("Firmware header is missing\n");
                goto fail;
        }
 
@@ -597,7 +618,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
                css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
 
        if (guc_fw->header_size != sizeof(struct guc_css_header)) {
-               DRM_ERROR("CSS header definition mismatch\n");
+               DRM_NOTE("CSS header definition mismatch\n");
                goto fail;
        }
 
@@ -607,7 +628,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 
        /* now RSA */
        if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
-               DRM_ERROR("RSA key size is bad\n");
+               DRM_NOTE("RSA key size is bad\n");
                goto fail;
        }
        guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
@@ -616,14 +637,14 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
        /* At least, it should have header, uCode and RSA. Size of all three. */
        size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
        if (fw->size < size) {
-               DRM_ERROR("Missing firmware components\n");
+               DRM_NOTE("Missing firmware components\n");
                goto fail;
        }
 
        /* Header and uCode will be loaded to WOPCM. Size of the two. */
        size = guc_fw->header_size + guc_fw->ucode_size;
        if (size > guc_wopcm_size(to_i915(dev))) {
-               DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+               DRM_NOTE("Firmware is too large to fit in WOPCM\n");
                goto fail;
        }
 
@@ -638,7 +659,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 
        if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
            guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
-               DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
+               DRM_NOTE("GuC firmware version %d.%d, required %d.%d\n",
                        guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
                        guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
                err = -ENOEXEC;
@@ -668,10 +689,10 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
        return;
 
 fail:
+       DRM_WARN("Failed to fetch valid GuC firmware from %s (error %d)\n",
+                guc_fw->guc_fw_path, err);
        DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
                err, fw, guc_fw->guc_fw_obj);
-       DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
-                 guc_fw->guc_fw_path, err);
 
        mutex_lock(&dev->struct_mutex);
        obj = guc_fw->guc_fw_obj;
@@ -752,7 +773,7 @@ void intel_guc_fini(struct drm_device *dev)
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 
        mutex_lock(&dev->struct_mutex);
-       direct_interrupts_to_host(dev_priv);
+       guc_interrupts_release(dev_priv);
        i915_guc_submission_disable(dev_priv);
        i915_guc_submission_fini(dev_priv);
 
index 4df9f38..c51073f 100644 (file)
@@ -985,7 +985,9 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
        intel_audio_codec_enable(encoder);
 }
 
-static void g4x_enable_hdmi(struct intel_encoder *encoder)
+static void g4x_enable_hdmi(struct intel_encoder *encoder,
+                           struct intel_crtc_state *pipe_config,
+                           struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1006,7 +1008,9 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder)
                intel_enable_hdmi_audio(encoder);
 }
 
-static void ibx_enable_hdmi(struct intel_encoder *encoder)
+static void ibx_enable_hdmi(struct intel_encoder *encoder,
+                           struct intel_crtc_state *pipe_config,
+                           struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1055,7 +1059,9 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder)
                intel_enable_hdmi_audio(encoder);
 }
 
-static void cpt_enable_hdmi(struct intel_encoder *encoder)
+static void cpt_enable_hdmi(struct intel_encoder *encoder,
+                           struct intel_crtc_state *pipe_config,
+                           struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1108,11 +1114,15 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder)
                intel_enable_hdmi_audio(encoder);
 }
 
-static void vlv_enable_hdmi(struct intel_encoder *encoder)
+static void vlv_enable_hdmi(struct intel_encoder *encoder,
+                           struct intel_crtc_state *pipe_config,
+                           struct drm_connector_state *conn_state)
 {
 }
 
-static void intel_disable_hdmi(struct intel_encoder *encoder)
+static void intel_disable_hdmi(struct intel_encoder *encoder,
+                              struct intel_crtc_state *old_crtc_state,
+                              struct drm_connector_state *old_conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1164,17 +1174,21 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
        intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
 }
 
-static void g4x_disable_hdmi(struct intel_encoder *encoder)
+static void g4x_disable_hdmi(struct intel_encoder *encoder,
+                            struct intel_crtc_state *old_crtc_state,
+                            struct drm_connector_state *old_conn_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
        if (crtc->config->has_audio)
                intel_audio_codec_disable(encoder);
 
-       intel_disable_hdmi(encoder);
+       intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
 }
 
-static void pch_disable_hdmi(struct intel_encoder *encoder)
+static void pch_disable_hdmi(struct intel_encoder *encoder,
+                            struct intel_crtc_state *old_crtc_state,
+                            struct drm_connector_state *old_conn_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
@@ -1182,9 +1196,11 @@ static void pch_disable_hdmi(struct intel_encoder *encoder)
                intel_audio_codec_disable(encoder);
 }
 
-static void pch_post_disable_hdmi(struct intel_encoder *encoder)
+static void pch_post_disable_hdmi(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *old_crtc_state,
+                                 struct drm_connector_state *old_conn_state)
 {
-       intel_disable_hdmi(encoder);
+       intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
 }
 
 static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
@@ -1285,7 +1301,8 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
 }
 
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-                              struct intel_crtc_state *pipe_config)
+                              struct intel_crtc_state *pipe_config,
+                              struct drm_connector_state *conn_state)
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
@@ -1422,24 +1439,22 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
 }
 
 static bool
-intel_hdmi_set_edid(struct drm_connector *connector, bool force)
+intel_hdmi_set_edid(struct drm_connector *connector)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-       struct edid *edid = NULL;
+       struct edid *edid;
        bool connected = false;
 
-       if (force) {
-               intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+       intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
-               edid = drm_get_edid(connector,
-                                   intel_gmbus_get_adapter(dev_priv,
-                                   intel_hdmi->ddc_bus));
+       edid = drm_get_edid(connector,
+                           intel_gmbus_get_adapter(dev_priv,
+                           intel_hdmi->ddc_bus));
 
-               intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
+       intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
 
-               intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
-       }
+       intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
 
        to_intel_connector(connector)->detect_edid = edid;
        if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1465,37 +1480,16 @@ static enum drm_connector_status
 intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
        enum drm_connector_status status;
-       struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
-       bool live_status = false;
-       unsigned int try;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
 
        intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
-       for (try = 0; !live_status && try < 9; try++) {
-               if (try)
-                       msleep(10);
-               live_status = intel_digital_port_connected(dev_priv,
-                               hdmi_to_dig_port(intel_hdmi));
-       }
-
-       if (!live_status) {
-               DRM_DEBUG_KMS("HDMI live status down\n");
-               /*
-                * Live status register is not reliable on all intel platforms.
-                * So consider live_status only for certain platforms, for
-                * others, read EDID to determine presence of sink.
-                */
-               if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
-                       live_status = true;
-       }
-
        intel_hdmi_unset_edid(connector);
 
-       if (intel_hdmi_set_edid(connector, live_status)) {
+       if (intel_hdmi_set_edid(connector)) {
                struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 
                hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
@@ -1521,7 +1515,7 @@ intel_hdmi_force(struct drm_connector *connector)
        if (connector->status != connector_status_connected)
                return;
 
-       intel_hdmi_set_edid(connector, true);
+       intel_hdmi_set_edid(connector);
        hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
 }
 
@@ -1638,7 +1632,9 @@ done:
        return 0;
 }
 
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *pipe_config,
+                                 struct drm_connector_state *conn_state)
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1651,7 +1647,9 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
                                   adjusted_mode);
 }
 
-static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
+                               struct intel_crtc_state *pipe_config,
+                               struct drm_connector_state *conn_state)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &dport->hdmi;
@@ -1671,37 +1669,47 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
                                   intel_crtc->config->has_hdmi_sink,
                                   adjusted_mode);
 
-       g4x_enable_hdmi(encoder);
+       g4x_enable_hdmi(encoder, pipe_config, conn_state);
 
        vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
 
-static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        intel_hdmi_prepare(encoder);
 
        vlv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+                                   struct intel_crtc_state *pipe_config,
+                                   struct drm_connector_state *conn_state)
 {
        intel_hdmi_prepare(encoder);
 
        chv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
+static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
+                                     struct intel_crtc_state *old_crtc_state,
+                                     struct drm_connector_state *old_conn_state)
 {
        chv_phy_post_pll_disable(encoder);
 }
 
-static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
+static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *old_crtc_state,
+                                 struct drm_connector_state *old_conn_state)
 {
        /* Reset lanes to avoid HDMI flicker (VLV w/a) */
        vlv_phy_reset_lanes(encoder);
 }
 
-static void chv_hdmi_post_disable(struct intel_encoder *encoder)
+static void chv_hdmi_post_disable(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *old_crtc_state,
+                                 struct drm_connector_state *old_conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1714,7 +1722,9 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder)
        mutex_unlock(&dev_priv->sb_lock);
 }
 
-static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
+static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
+                               struct intel_crtc_state *pipe_config,
+                               struct drm_connector_state *conn_state)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &dport->hdmi;
@@ -1734,7 +1744,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
                                   intel_crtc->config->has_hdmi_sink,
                                   adjusted_mode);
 
-       g4x_enable_hdmi(encoder);
+       g4x_enable_hdmi(encoder, pipe_config, conn_state);
 
        vlv_wait_port_ready(dev_priv, dport, 0x0);
 
index 1f266d7..79aab9a 100644 (file)
@@ -255,67 +255,59 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
        algo->data = bus;
 }
 
-static int
-gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
-                    u32 gmbus2_status,
-                    u32 gmbus4_irq_en)
+static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
 {
-       int i;
-       u32 gmbus2 = 0;
        DEFINE_WAIT(wait);
-
-       if (!HAS_GMBUS_IRQ(dev_priv))
-               gmbus4_irq_en = 0;
+       u32 gmbus2;
+       int ret;
 
        /* Important: The hw handles only the first bit, so set only one! Since
         * we also need to check for NAKs besides the hw ready/idle signal, we
-        * need to wake up periodically and check that ourselves. */
-       I915_WRITE(GMBUS4, gmbus4_irq_en);
-
-       for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
-               prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
-                               TASK_UNINTERRUPTIBLE);
+        * need to wake up periodically and check that ourselves.
+        */
+       if (!HAS_GMBUS_IRQ(dev_priv))
+               irq_en = 0;
 
-               gmbus2 = I915_READ_NOTRACE(GMBUS2);
-               if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
-                       break;
+       add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+       I915_WRITE_FW(GMBUS4, irq_en);
 
-               schedule_timeout(1);
-       }
-       finish_wait(&dev_priv->gmbus_wait_queue, &wait);
+       status |= GMBUS_SATOER;
+       ret = wait_for_us((gmbus2 = I915_READ_FW(GMBUS2)) & status, 2);
+       if (ret)
+               ret = wait_for((gmbus2 = I915_READ_FW(GMBUS2)) & status, 50);
 
-       I915_WRITE(GMBUS4, 0);
+       I915_WRITE_FW(GMBUS4, 0);
+       remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
 
        if (gmbus2 & GMBUS_SATOER)
                return -ENXIO;
-       if (gmbus2 & gmbus2_status)
-               return 0;
-       return -ETIMEDOUT;
+
+       return ret;
 }
 
 static int
 gmbus_wait_idle(struct drm_i915_private *dev_priv)
 {
+       DEFINE_WAIT(wait);
+       u32 irq_enable;
        int ret;
 
-       if (!HAS_GMBUS_IRQ(dev_priv))
-               return intel_wait_for_register(dev_priv,
-                                              GMBUS2, GMBUS_ACTIVE, 0,
-                                              10);
-
        /* Important: The hw handles only the first bit, so set only one! */
-       I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
+       irq_enable = 0;
+       if (HAS_GMBUS_IRQ(dev_priv))
+               irq_enable = GMBUS_IDLE_EN;
 
-       ret = wait_event_timeout(dev_priv->gmbus_wait_queue,
-                                (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0,
-                                msecs_to_jiffies_timeout(10));
+       add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+       I915_WRITE_FW(GMBUS4, irq_enable);
 
-       I915_WRITE(GMBUS4, 0);
+       ret = intel_wait_for_register_fw(dev_priv,
+                                        GMBUS2, GMBUS_ACTIVE, 0,
+                                        10);
 
-       if (ret)
-               return 0;
-       else
-               return -ETIMEDOUT;
+       I915_WRITE_FW(GMBUS4, 0);
+       remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+
+       return ret;
 }
 
 static int
@@ -323,22 +315,21 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
                      unsigned short addr, u8 *buf, unsigned int len,
                      u32 gmbus1_index)
 {
-       I915_WRITE(GMBUS1,
-                  gmbus1_index |
-                  GMBUS_CYCLE_WAIT |
-                  (len << GMBUS_BYTE_COUNT_SHIFT) |
-                  (addr << GMBUS_SLAVE_ADDR_SHIFT) |
-                  GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+       I915_WRITE_FW(GMBUS1,
+                     gmbus1_index |
+                     GMBUS_CYCLE_WAIT |
+                     (len << GMBUS_BYTE_COUNT_SHIFT) |
+                     (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+                     GMBUS_SLAVE_READ | GMBUS_SW_RDY);
        while (len) {
                int ret;
                u32 val, loop = 0;
 
-               ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
-                                          GMBUS_HW_RDY_EN);
+               ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
                if (ret)
                        return ret;
 
-               val = I915_READ(GMBUS3);
+               val = I915_READ_FW(GMBUS3);
                do {
                        *buf++ = val & 0xff;
                        val >>= 8;
@@ -385,12 +376,12 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
                len -= 1;
        }
 
-       I915_WRITE(GMBUS3, val);
-       I915_WRITE(GMBUS1,
-                  GMBUS_CYCLE_WAIT |
-                  (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
-                  (addr << GMBUS_SLAVE_ADDR_SHIFT) |
-                  GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+       I915_WRITE_FW(GMBUS3, val);
+       I915_WRITE_FW(GMBUS1,
+                     GMBUS_CYCLE_WAIT |
+                     (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
+                     (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+                     GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
        while (len) {
                int ret;
 
@@ -399,10 +390,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
                        val |= *buf++ << (8 * loop);
                } while (--len && ++loop < 4);
 
-               I915_WRITE(GMBUS3, val);
+               I915_WRITE_FW(GMBUS3, val);
 
-               ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
-                                          GMBUS_HW_RDY_EN);
+               ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
                if (ret)
                        return ret;
        }
@@ -460,13 +450,13 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
 
        /* GMBUS5 holds 16-bit index */
        if (gmbus5)
-               I915_WRITE(GMBUS5, gmbus5);
+               I915_WRITE_FW(GMBUS5, gmbus5);
 
        ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
 
        /* Clear GMBUS5 after each index transfer */
        if (gmbus5)
-               I915_WRITE(GMBUS5, 0);
+               I915_WRITE_FW(GMBUS5, 0);
 
        return ret;
 }
@@ -478,11 +468,15 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
                                               struct intel_gmbus,
                                               adapter);
        struct drm_i915_private *dev_priv = bus->dev_priv;
+       const unsigned int fw =
+               intel_uncore_forcewake_for_reg(dev_priv, GMBUS0,
+                                              FW_REG_READ | FW_REG_WRITE);
        int i = 0, inc, try = 0;
        int ret = 0;
 
+       intel_uncore_forcewake_get(dev_priv, fw);
 retry:
-       I915_WRITE(GMBUS0, bus->reg0);
+       I915_WRITE_FW(GMBUS0, bus->reg0);
 
        for (; i < num; i += inc) {
                inc = 1;
@@ -496,8 +490,8 @@ retry:
                }
 
                if (!ret)
-                       ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
-                                                  GMBUS_HW_WAIT_EN);
+                       ret = gmbus_wait(dev_priv,
+                                        GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN);
                if (ret == -ETIMEDOUT)
                        goto timeout;
                else if (ret)
@@ -508,7 +502,7 @@ retry:
         * a STOP on the very first cycle. To simplify the code we
         * unconditionally generate the STOP condition with an additional gmbus
         * cycle. */
-       I915_WRITE(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+       I915_WRITE_FW(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
 
        /* Mark the GMBUS interface as disabled after waiting for idle.
         * We will re-enable it at the start of the next xfer,
@@ -519,7 +513,7 @@ retry:
                         adapter->name);
                ret = -ETIMEDOUT;
        }
-       I915_WRITE(GMBUS0, 0);
+       I915_WRITE_FW(GMBUS0, 0);
        ret = ret ?: i;
        goto out;
 
@@ -548,9 +542,9 @@ clear_err:
         * of resetting the GMBUS controller and so clearing the
         * BUS_ERROR raised by the slave's NAK.
         */
-       I915_WRITE(GMBUS1, GMBUS_SW_CLR_INT);
-       I915_WRITE(GMBUS1, 0);
-       I915_WRITE(GMBUS0, 0);
+       I915_WRITE_FW(GMBUS1, GMBUS_SW_CLR_INT);
+       I915_WRITE_FW(GMBUS1, 0);
+       I915_WRITE_FW(GMBUS0, 0);
 
        DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
                         adapter->name, msgs[i].addr,
@@ -573,7 +567,7 @@ clear_err:
 timeout:
        DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
                      bus->adapter.name, bus->reg0 & 0xff);
-       I915_WRITE(GMBUS0, 0);
+       I915_WRITE_FW(GMBUS0, 0);
 
        /*
         * Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -582,6 +576,7 @@ timeout:
        ret = -EAGAIN;
 
 out:
+       intel_uncore_forcewake_put(dev_priv, fw);
        return ret;
 }
 
@@ -633,6 +628,7 @@ static const struct i2c_algorithm gmbus_algorithm = {
 int intel_setup_gmbus(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct intel_gmbus *bus;
        unsigned int pin;
        int ret;
@@ -663,7 +659,7 @@ int intel_setup_gmbus(struct drm_device *dev)
                         "i915 gmbus %s",
                         get_gmbus_pin(dev_priv, pin)->name);
 
-               bus->adapter.dev.parent = &dev->pdev->dev;
+               bus->adapter.dev.parent = &pdev->dev;
                bus->dev_priv = dev_priv;
 
                bus->adapter.algo = &gmbus_algorithm;
index 6b49df4..2511433 100644 (file)
 #define GEN8_CTX_STATUS_COMPLETE       (1 << 4)
 #define GEN8_CTX_STATUS_LITE_RESTORE   (1 << 15)
 
+#define GEN8_CTX_STATUS_COMPLETED_MASK \
+        (GEN8_CTX_STATUS_ACTIVE_IDLE | \
+         GEN8_CTX_STATUS_PREEMPTED | \
+         GEN8_CTX_STATUS_ELEMENT_SWITCH)
+
 #define CTX_LRI_HEADER_0               0x01
 #define CTX_CONTEXT_CONTROL            0x02
 #define CTX_RING_HEAD                  0x04
@@ -263,12 +268,10 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
 
-       if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
-               engine->idle_lite_restore_wa = ~0;
-
-       engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
-                                       IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
-                                       (engine->id == VCS || engine->id == VCS2);
+       engine->disable_lite_restore_wa =
+               (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+                IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
+               (engine->id == VCS || engine->id == VCS2);
 
        engine->ctx_desc_template = GEN8_CTX_VALID;
        if (IS_GEN8(dev_priv))
@@ -328,34 +331,18 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
        return ctx->engine[engine->id].lrc_desc;
 }
 
-static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
-                                struct drm_i915_gem_request *rq1)
+static inline void
+execlists_context_status_change(struct drm_i915_gem_request *rq,
+                               unsigned long status)
 {
+       /*
+        * Only used when GVT-g is enabled now. When GVT-g is disabled,
+        * The compiler should eliminate this function as dead-code.
+        */
+       if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+               return;
 
-       struct intel_engine_cs *engine = rq0->engine;
-       struct drm_i915_private *dev_priv = rq0->i915;
-       uint64_t desc[2];
-
-       if (rq1) {
-               desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
-               rq1->elsp_submitted++;
-       } else {
-               desc[1] = 0;
-       }
-
-       desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
-       rq0->elsp_submitted++;
-
-       /* You must always write both descriptors in the order below. */
-       I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
-       I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
-
-       I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
-       /* The context is automatically loaded after the following */
-       I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
-
-       /* ELSP is a wo register, use another nearby reg for posting */
-       POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
+       atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
 }
 
 static void
@@ -367,11 +354,11 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
        ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
 }
 
-static void execlists_update_context(struct drm_i915_gem_request *rq)
+static u64 execlists_update_context(struct drm_i915_gem_request *rq)
 {
-       struct intel_engine_cs *engine = rq->engine;
+       struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
        struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-       uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
+       u32 *reg_state = ce->lrc_reg_state;
 
        reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
 
@@ -382,158 +369,163 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
         */
        if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
                execlists_update_context_pdps(ppgtt, reg_state);
+
+       return ce->lrc_desc;
 }
 
-static void execlists_elsp_submit_contexts(struct drm_i915_gem_request *rq0,
-                                          struct drm_i915_gem_request *rq1)
+static void execlists_submit_ports(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = rq0->i915;
-       unsigned int fw_domains = rq0->engine->fw_domains;
-
-       execlists_update_context(rq0);
+       struct drm_i915_private *dev_priv = engine->i915;
+       struct execlist_port *port = engine->execlist_port;
+       u32 __iomem *elsp =
+               dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+       u64 desc[2];
 
-       if (rq1)
-               execlists_update_context(rq1);
+       if (!port[0].count)
+               execlists_context_status_change(port[0].request,
+                                               INTEL_CONTEXT_SCHEDULE_IN);
+       desc[0] = execlists_update_context(port[0].request);
+       engine->preempt_wa = port[0].count++; /* bdw only? fixed on skl? */
 
-       spin_lock_irq(&dev_priv->uncore.lock);
-       intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+       if (port[1].request) {
+               GEM_BUG_ON(port[1].count);
+               execlists_context_status_change(port[1].request,
+                                               INTEL_CONTEXT_SCHEDULE_IN);
+               desc[1] = execlists_update_context(port[1].request);
+               port[1].count = 1;
+       } else {
+               desc[1] = 0;
+       }
+       GEM_BUG_ON(desc[0] == desc[1]);
 
-       execlists_elsp_write(rq0, rq1);
+       /* You must always write both descriptors in the order below. */
+       writel(upper_32_bits(desc[1]), elsp);
+       writel(lower_32_bits(desc[1]), elsp);
 
-       intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
-       spin_unlock_irq(&dev_priv->uncore.lock);
+       writel(upper_32_bits(desc[0]), elsp);
+       /* The context is automatically loaded after the following */
+       writel(lower_32_bits(desc[0]), elsp);
 }
 
-static inline void execlists_context_status_change(
-               struct drm_i915_gem_request *rq,
-               unsigned long status)
+static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
 {
-       /*
-        * Only used when GVT-g is enabled now. When GVT-g is disabled,
-        * The compiler should eliminate this function as dead-code.
-        */
-       if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
-               return;
-
-       atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+       return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
+               ctx->execlists_force_single_submission);
 }
 
-static void execlists_unqueue(struct intel_engine_cs *engine)
+static bool can_merge_ctx(const struct i915_gem_context *prev,
+                         const struct i915_gem_context *next)
 {
-       struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
-       struct drm_i915_gem_request *cursor, *tmp;
+       if (prev != next)
+               return false;
 
-       assert_spin_locked(&engine->execlist_lock);
+       if (ctx_single_port_submission(prev))
+               return false;
 
-       /*
-        * If irqs are not active generate a warning as batches that finish
-        * without the irqs may get lost and a GPU Hang may occur.
-        */
-       WARN_ON(!intel_irqs_enabled(engine->i915));
-
-       /* Try to read in pairs */
-       list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
-                                execlist_link) {
-               if (!req0) {
-                       req0 = cursor;
-               } else if (req0->ctx == cursor->ctx) {
-                       /* Same ctx: ignore first request, as second request
-                        * will update tail past first request's workload */
-                       cursor->elsp_submitted = req0->elsp_submitted;
-                       list_del(&req0->execlist_link);
-                       i915_gem_request_put(req0);
-                       req0 = cursor;
-               } else {
-                       if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
-                               /*
-                                * req0 (after merged) ctx requires single
-                                * submission, stop picking
-                                */
-                               if (req0->ctx->execlists_force_single_submission)
-                                       break;
-                               /*
-                                * req0 ctx doesn't require single submission,
-                                * but next req ctx requires, stop picking
-                                */
-                               if (cursor->ctx->execlists_force_single_submission)
-                                       break;
-                       }
-                       req1 = cursor;
-                       WARN_ON(req1->elsp_submitted);
-                       break;
-               }
-       }
+       return true;
+}
 
-       if (unlikely(!req0))
-               return;
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+       struct drm_i915_gem_request *cursor, *last;
+       struct execlist_port *port = engine->execlist_port;
+       bool submit = false;
+
+       last = port->request;
+       if (last)
+               /* WaIdleLiteRestore:bdw,skl
+                * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
+                * as we resubmit the request. See gen8_emit_request()
+                * for where we prepare the padding after the end of the
+                * request.
+                */
+               last->tail = last->wa_tail;
 
-       execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
+       GEM_BUG_ON(port[1].request);
 
-       if (req1)
-               execlists_context_status_change(req1,
-                                               INTEL_CONTEXT_SCHEDULE_IN);
+       /* Hardware submission is through 2 ports. Conceptually each port
+        * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
+        * static for a context, and unique to each, so we only execute
+        * requests belonging to a single context from each ring. RING_HEAD
+        * is maintained by the CS in the context image, it marks the place
+        * where it got up to last time, and through RING_TAIL we tell the CS
+        * where we want to execute up to this time.
+        *
+        * In this list the requests are in order of execution. Consecutive
+        * requests from the same context are adjacent in the ringbuffer. We
+        * can combine these requests into a single RING_TAIL update:
+        *
+        *              RING_HEAD...req1...req2
+        *                                    ^- RING_TAIL
+        * since to execute req2 the CS must first execute req1.
+        *
+        * Our goal then is to point each port to the end of a consecutive
+        * sequence of requests as being the most optimal (fewest wake ups
+        * and context switches) submission.
+        */
 
-       if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
-               /*
-                * WaIdleLiteRestore: make sure we never cause a lite restore
-                * with HEAD==TAIL.
+       spin_lock(&engine->execlist_lock);
+       list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
+               /* Can we combine this request with the current port? It has to
+                * be the same context/ringbuffer and not have any exceptions
+                * (e.g. GVT saying never to combine contexts).
                 *
-                * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
-                * resubmit the request. See gen8_emit_request() for where we
-                * prepare the padding after the end of the request.
+                * If we can combine the requests, we can execute both by
+                * updating the RING_TAIL to point to the end of the second
+                * request, and so we never need to tell the hardware about
+                * the first.
                 */
-               req0->tail += 8;
-               req0->tail &= req0->ring->size - 1;
+               if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
+                       /* If we are on the second port and cannot combine
+                        * this request with the last, then we are done.
+                        */
+                       if (port != engine->execlist_port)
+                               break;
+
+                       /* If GVT overrides us we only ever submit port[0],
+                        * leaving port[1] empty. Note that we also have
+                        * to be careful that we don't queue the same
+                        * context (even though a different request) to
+                        * the second port.
+                        */
+                       if (ctx_single_port_submission(cursor->ctx))
+                               break;
+
+                       GEM_BUG_ON(last->ctx == cursor->ctx);
+
+                       i915_gem_request_assign(&port->request, last);
+                       port++;
+               }
+               last = cursor;
+               submit = true;
+       }
+       if (submit) {
+               /* Decouple all the requests submitted from the queue */
+               engine->execlist_queue.next = &cursor->execlist_link;
+               cursor->execlist_link.prev = &engine->execlist_queue;
+
+               i915_gem_request_assign(&port->request, last);
        }
+       spin_unlock(&engine->execlist_lock);
 
-       execlists_elsp_submit_contexts(req0, req1);
+       if (submit)
+               execlists_submit_ports(engine);
 }
 
-static unsigned int
-execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
+static bool execlists_elsp_idle(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_request *head_req;
-
-       assert_spin_locked(&engine->execlist_lock);
-
-       head_req = list_first_entry_or_null(&engine->execlist_queue,
-                                           struct drm_i915_gem_request,
-                                           execlist_link);
-
-       if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
-               return 0;
-
-       WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
-
-       if (--head_req->elsp_submitted > 0)
-               return 0;
-
-       execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
-
-       list_del(&head_req->execlist_link);
-       i915_gem_request_put(head_req);
-
-       return 1;
+       return !engine->execlist_port[0].request;
 }
 
-static u32
-get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
-                  u32 *context_id)
+static bool execlists_elsp_ready(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-       u32 status;
-
-       read_pointer %= GEN8_CSB_ENTRIES;
-
-       status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
-
-       if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
-               return 0;
+       int port;
 
-       *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
-                                                             read_pointer));
+       port = 1; /* wait for a free slot */
+       if (engine->disable_lite_restore_wa || engine->preempt_wa)
+               port = 0; /* wait for GPU to be idle before continuing */
 
-       return status;
+       return !engine->execlist_port[port].request;
 }
 
 /*
@@ -543,103 +535,70 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
 static void intel_lrc_irq_handler(unsigned long data)
 {
        struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+       struct execlist_port *port = engine->execlist_port;
        struct drm_i915_private *dev_priv = engine->i915;
-       u32 status_pointer;
-       unsigned int read_pointer, write_pointer;
-       u32 csb[GEN8_CSB_ENTRIES][2];
-       unsigned int csb_read = 0, i;
-       unsigned int submit_contexts = 0;
 
        intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
 
-       status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
-
-       read_pointer = engine->next_context_status_buffer;
-       write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
-       if (read_pointer > write_pointer)
-               write_pointer += GEN8_CSB_ENTRIES;
-
-       while (read_pointer < write_pointer) {
-               if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
-                       break;
-               csb[csb_read][0] = get_context_status(engine, ++read_pointer,
-                                                     &csb[csb_read][1]);
-               csb_read++;
-       }
-
-       engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
-
-       /* Update the read pointer to the old write pointer. Manual ringbuffer
-        * management ftw </sarcasm> */
-       I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
-                     _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
-                                   engine->next_context_status_buffer << 8));
-
-       intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
-
-       spin_lock(&engine->execlist_lock);
+       if (!execlists_elsp_idle(engine)) {
+               u32 __iomem *csb_mmio =
+                       dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
+               u32 __iomem *buf =
+                       dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
+               unsigned int csb, head, tail;
+
+               csb = readl(csb_mmio);
+               head = GEN8_CSB_READ_PTR(csb);
+               tail = GEN8_CSB_WRITE_PTR(csb);
+               if (tail < head)
+                       tail += GEN8_CSB_ENTRIES;
+               while (head < tail) {
+                       unsigned int idx = ++head % GEN8_CSB_ENTRIES;
+                       unsigned int status = readl(buf + 2 * idx);
+
+                       if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
+                               continue;
+
+                       GEM_BUG_ON(port[0].count == 0);
+                       if (--port[0].count == 0) {
+                               GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
+                               execlists_context_status_change(port[0].request,
+                                                               INTEL_CONTEXT_SCHEDULE_OUT);
+
+                               i915_gem_request_put(port[0].request);
+                               port[0] = port[1];
+                               memset(&port[1], 0, sizeof(port[1]));
+
+                               engine->preempt_wa = false;
+                       }
 
-       for (i = 0; i < csb_read; i++) {
-               if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
-                       if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
-                               if (execlists_check_remove_request(engine, csb[i][1]))
-                                       WARN(1, "Lite Restored request removed from queue\n");
-                       } else
-                               WARN(1, "Preemption without Lite Restore\n");
+                       GEM_BUG_ON(port[0].count == 0 &&
+                                  !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
                }
 
-               if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
-                   GEN8_CTX_STATUS_ELEMENT_SWITCH))
-                       submit_contexts +=
-                               execlists_check_remove_request(engine, csb[i][1]);
-       }
-
-       if (submit_contexts) {
-               if (!engine->disable_lite_restore_wa ||
-                   (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
-                       execlists_unqueue(engine);
+               writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+                                    GEN8_CSB_WRITE_PTR(csb) << 8),
+                      csb_mmio);
        }
 
-       spin_unlock(&engine->execlist_lock);
+       if (execlists_elsp_ready(engine))
+               execlists_dequeue(engine);
 
-       if (unlikely(submit_contexts > 2))
-               DRM_ERROR("More than two context complete events?\n");
+       intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
 }
 
 static void execlists_submit_request(struct drm_i915_gem_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
-       struct drm_i915_gem_request *cursor;
-       int num_elements = 0;
-
-       spin_lock_bh(&engine->execlist_lock);
+       unsigned long flags;
 
-       list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
-               if (++num_elements > 2)
-                       break;
+       spin_lock_irqsave(&engine->execlist_lock, flags);
 
-       if (num_elements > 2) {
-               struct drm_i915_gem_request *tail_req;
-
-               tail_req = list_last_entry(&engine->execlist_queue,
-                                          struct drm_i915_gem_request,
-                                          execlist_link);
-
-               if (request->ctx == tail_req->ctx) {
-                       WARN(tail_req->elsp_submitted != 0,
-                               "More than 2 already-submitted reqs queued\n");
-                       list_del(&tail_req->execlist_link);
-                       i915_gem_request_put(tail_req);
-               }
-       }
-
-       i915_gem_request_get(request);
        list_add_tail(&request->execlist_link, &engine->execlist_queue);
-       request->ctx_hw_id = request->ctx->hw_id;
-       if (num_elements == 0)
-               execlists_unqueue(engine);
+       if (execlists_elsp_idle(engine))
+               tasklet_hi_schedule(&engine->irq_tasklet);
 
-       spin_unlock_bh(&engine->execlist_lock);
+       spin_unlock_irqrestore(&engine->execlist_lock, flags);
 }
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -668,7 +627,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
                 * going any further, as the i915_add_request() call
                 * later on mustn't fail ...
                 */
-               ret = i915_guc_wq_check_space(request);
+               ret = i915_guc_wq_reserve(request);
                if (ret)
                        return ret;
        }
@@ -731,6 +690,7 @@ intel_logical_ring_advance(struct drm_i915_gem_request *request)
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
+       request->wa_tail = ring->tail;
 
        /* We keep the previous context alive until we retire the following
         * request. This ensures that any the context object is still pinned
@@ -743,23 +703,6 @@ intel_logical_ring_advance(struct drm_i915_gem_request *request)
        return 0;
 }
 
-void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
-{
-       struct drm_i915_gem_request *req, *tmp;
-       LIST_HEAD(cancel_list);
-
-       WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
-
-       spin_lock_bh(&engine->execlist_lock);
-       list_replace_init(&engine->execlist_queue, &cancel_list);
-       spin_unlock_bh(&engine->execlist_lock);
-
-       list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
-               list_del(&req->execlist_link);
-               i915_gem_request_put(req);
-       }
-}
-
 static int intel_lr_context_pin(struct i915_gem_context *ctx,
                                struct intel_engine_cs *engine)
 {
@@ -1280,48 +1223,30 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       unsigned int next_context_status_buffer_hw;
+       int ret;
+
+       ret = intel_mocs_init_engine(engine);
+       if (ret)
+               return ret;
 
        lrc_init_hws(engine);
 
-       I915_WRITE_IMR(engine,
-                      ~(engine->irq_enable_mask | engine->irq_keep_mask));
+       intel_engine_reset_irq(engine);
+
        I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
 
        I915_WRITE(RING_MODE_GEN7(engine),
                   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
                   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
-       POSTING_READ(RING_MODE_GEN7(engine));
-
-       /*
-        * Instead of resetting the Context Status Buffer (CSB) read pointer to
-        * zero, we need to read the write pointer from hardware and use its
-        * value because "this register is power context save restored".
-        * Effectively, these states have been observed:
-        *
-        *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
-        * BDW  | CSB regs not reset       | CSB regs reset       |
-        * CHT  | CSB regs not reset       | CSB regs not reset   |
-        * SKL  |         ?                |         ?            |
-        * BXT  |         ?                |         ?            |
-        */
-       next_context_status_buffer_hw =
-               GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
-
-       /*
-        * When the CSB registers are reset (also after power-up / gpu reset),
-        * CSB write pointer is set to all 1's, which is not valid, use '5' in
-        * this special case, so the first element read is CSB[0].
-        */
-       if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
-               next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
 
-       engine->next_context_status_buffer = next_context_status_buffer_hw;
        DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
 
        intel_engine_init_hangcheck(engine);
 
-       return intel_mocs_init_engine(engine);
+       if (!execlists_elsp_idle(engine))
+               execlists_submit_ports(engine);
+
+       return 0;
 }
 
 static int gen8_init_render_ring(struct intel_engine_cs *engine)
@@ -1357,6 +1282,36 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
        return init_workarounds_ring(engine);
 }
 
+static void reset_common_ring(struct intel_engine_cs *engine,
+                             struct drm_i915_gem_request *request)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       struct execlist_port *port = engine->execlist_port;
+       struct intel_context *ce = &request->ctx->engine[engine->id];
+
+       /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
+       ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
+       request->ring->head = request->postfix;
+       request->ring->last_retired_head = -1;
+       intel_ring_update_space(request->ring);
+
+       if (i915.enable_guc_submission)
+               return;
+
+       /* Catch up with any missed context-switch interrupts */
+       I915_WRITE(RING_CONTEXT_STATUS_PTR(engine), _MASKED_FIELD(0xffff, 0));
+       if (request->ctx != port[0].request->ctx) {
+               i915_gem_request_put(port[0].request);
+               port[0] = port[1];
+               memset(&port[1], 0, sizeof(port[1]));
+       }
+
+       /* CS is stopped, and we will resubmit both ports on resume */
+       GEM_BUG_ON(request->ctx != port[0].request->ctx);
+       port[0].count = 0;
+       port[1].count = 0;
+}
+
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
        struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
@@ -1702,10 +1657,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
        }
        intel_lr_context_unpin(dev_priv->kernel_context, engine);
 
-       engine->idle_lite_restore_wa = 0;
-       engine->disable_lite_restore_wa = false;
-       engine->ctx_desc_template = 0;
-
        lrc_destroy_wa_ctx_obj(engine);
        engine->i915 = NULL;
 }
@@ -1723,6 +1674,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
        /* Default vfuncs which can be overriden by each engine. */
        engine->init_hw = gen8_init_common_ring;
+       engine->reset_hw = reset_common_ring;
        engine->emit_flush = gen8_emit_flush;
        engine->emit_request = gen8_emit_request;
        engine->submit_request = execlists_submit_request;
@@ -1896,24 +1848,24 @@ make_rpcs(struct drm_i915_private *dev_priv)
         * must make an explicit request through RPCS for full
         * enablement.
        */
-       if (INTEL_INFO(dev_priv)->has_slice_pg) {
+       if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
                rpcs |= GEN8_RPCS_S_CNT_ENABLE;
-               rpcs |= INTEL_INFO(dev_priv)->slice_total <<
+               rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
                        GEN8_RPCS_S_CNT_SHIFT;
                rpcs |= GEN8_RPCS_ENABLE;
        }
 
-       if (INTEL_INFO(dev_priv)->has_subslice_pg) {
+       if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
                rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
-               rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
+               rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
                        GEN8_RPCS_SS_CNT_SHIFT;
                rpcs |= GEN8_RPCS_ENABLE;
        }
 
-       if (INTEL_INFO(dev_priv)->has_eu_pg) {
-               rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
+       if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
+               rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
                        GEN8_RPCS_EU_MIN_SHIFT;
-               rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
+               rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
                        GEN8_RPCS_EU_MAX_SHIFT;
                rpcs |= GEN8_RPCS_ENABLE;
        }
@@ -2175,9 +2127,9 @@ error_deref_obj:
        return ret;
 }
 
-void intel_lr_context_reset(struct drm_i915_private *dev_priv,
-                           struct i915_gem_context *ctx)
+void intel_lr_context_resume(struct drm_i915_private *dev_priv)
 {
+       struct i915_gem_context *ctx = dev_priv->kernel_context;
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv) {
index a52cf57..4fed816 100644 (file)
@@ -87,8 +87,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 
 struct drm_i915_private;
 
-void intel_lr_context_reset(struct drm_i915_private *dev_priv,
-                           struct i915_gem_context *ctx);
+void intel_lr_context_resume(struct drm_i915_private *dev_priv);
 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
                                     struct intel_engine_cs *engine);
 
@@ -97,6 +96,4 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
                                    int enable_execlists);
 void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
 
-void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
-
 #endif /* _INTEL_LRC_H_ */
index 52d6ed6..e1d47d5 100644 (file)
@@ -230,20 +230,21 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
        I915_WRITE(PP_DIVISOR(0), val);
 }
 
-static void intel_pre_enable_lvds(struct intel_encoder *encoder)
+static void intel_pre_enable_lvds(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *pipe_config,
+                                 struct drm_connector_state *conn_state)
 {
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+       const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        int pipe = crtc->pipe;
        u32 temp;
 
-       if (HAS_PCH_SPLIT(dev)) {
+       if (HAS_PCH_SPLIT(dev_priv)) {
                assert_fdi_rx_pll_disabled(dev_priv, pipe);
                assert_shared_dpll_disabled(dev_priv,
-                                           crtc->config->shared_dpll);
+                                           pipe_config->shared_dpll);
        } else {
                assert_pll_disabled(dev_priv, pipe);
        }
@@ -253,7 +254,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
        temp = lvds_encoder->init_lvds_val;
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 
-       if (HAS_PCH_CPT(dev)) {
+       if (HAS_PCH_CPT(dev_priv)) {
                temp &= ~PORT_TRANS_SEL_MASK;
                temp |= PORT_TRANS_SEL_CPT(pipe);
        } else {
@@ -266,7 +267,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
 
        /* set the corresponsding LVDS_BORDER bit */
        temp &= ~LVDS_BORDER_ENABLE;
-       temp |= crtc->config->gmch_pfit.lvds_border_bits;
+       temp |= pipe_config->gmch_pfit.lvds_border_bits;
        /* Set the B0-B3 data pairs corresponding to whether we're going to
         * set the DPLLs for dual-channel mode or not.
         */
@@ -289,7 +290,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
        if (IS_GEN4(dev_priv)) {
                /* Bspec wording suggests that LVDS port dithering only exists
                 * for 18bpp panels. */
-               if (crtc->config->dither && crtc->config->pipe_bpp == 18)
+               if (pipe_config->dither && pipe_config->pipe_bpp == 18)
                        temp |= LVDS_ENABLE_DITHER;
                else
                        temp &= ~LVDS_ENABLE_DITHER;
@@ -306,7 +307,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
 /**
  * Sets the power state for the panel.
  */
-static void intel_enable_lvds(struct intel_encoder *encoder)
+static void intel_enable_lvds(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config,
+                             struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
@@ -324,11 +327,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
        intel_panel_enable_backlight(intel_connector);
 }
 
-static void intel_disable_lvds(struct intel_encoder *encoder)
+static void intel_disable_lvds(struct intel_encoder *encoder,
+                              struct intel_crtc_state *old_crtc_state,
+                              struct drm_connector_state *old_conn_state)
 {
-       struct drm_device *dev = encoder->base.dev;
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
        I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
        if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000))
@@ -338,7 +342,10 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
        POSTING_READ(lvds_encoder->reg);
 }
 
-static void gmch_disable_lvds(struct intel_encoder *encoder)
+static void gmch_disable_lvds(struct intel_encoder *encoder,
+                             struct intel_crtc_state *old_crtc_state,
+                             struct drm_connector_state *old_conn_state)
+
 {
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct intel_connector *intel_connector =
@@ -346,10 +353,12 @@ static void gmch_disable_lvds(struct intel_encoder *encoder)
 
        intel_panel_disable_backlight(intel_connector);
 
-       intel_disable_lvds(encoder);
+       intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
 }
 
-static void pch_disable_lvds(struct intel_encoder *encoder)
+static void pch_disable_lvds(struct intel_encoder *encoder,
+                            struct intel_crtc_state *old_crtc_state,
+                            struct drm_connector_state *old_conn_state)
 {
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct intel_connector *intel_connector =
@@ -358,9 +367,11 @@ static void pch_disable_lvds(struct intel_encoder *encoder)
        intel_panel_disable_backlight(intel_connector);
 }
 
-static void pch_post_disable_lvds(struct intel_encoder *encoder)
+static void pch_post_disable_lvds(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *old_crtc_state,
+                                 struct drm_connector_state *old_conn_state)
 {
-       intel_disable_lvds(encoder);
+       intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
 }
 
 static enum drm_mode_status
@@ -382,7 +393,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
 }
 
 static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
-                                     struct intel_crtc_state *pipe_config)
+                                     struct intel_crtc_state *pipe_config,
+                                     struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = intel_encoder->base.dev;
        struct intel_lvds_encoder *lvds_encoder =
index adca262..7acbbbf 100644 (file)
@@ -1047,6 +1047,23 @@ err_out:
        return err;
 }
 
+static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
+{
+       DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
+       return 1;
+}
+
+static const struct dmi_system_id intel_use_opregion_panel_type[] = {
+       {
+               .callback = intel_use_opregion_panel_type_callback,
+               .ident = "Conrac GmbH IX45GM2",
+               .matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
+                           DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
+               },
+       },
+       { }
+};
+
 int
 intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
 {
@@ -1072,6 +1089,16 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
                return -ENODEV;
        }
 
+       /*
+        * So far we know that some machined must use it, others must not use it.
+        * There doesn't seem to be any way to determine which way to go, except
+        * via a quirk list :(
+        */
+       if (!dmi_check_system(intel_use_opregion_panel_type)) {
+               DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+               return -ENODEV;
+       }
+
        /*
         * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
         * low vswing for eDP, whereas the VBT panel type (2) gives us normal
index 96c65d7..c10e9b0 100644 (file)
@@ -1430,10 +1430,11 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
        panel->backlight.min = get_backlight_min_vbt(connector);
 
        val = lpt_get_backlight(connector);
-       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
 
-       panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
-               panel->backlight.level != 0;
+       panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
 
        return 0;
 }
@@ -1459,11 +1460,13 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
        panel->backlight.min = get_backlight_min_vbt(connector);
 
        val = pch_get_backlight(connector);
-       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
 
        cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
        panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
-               (pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
+               (pch_ctl1 & BLM_PCH_PWM_ENABLE);
 
        return 0;
 }
@@ -1498,9 +1501,11 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
        panel->backlight.min = get_backlight_min_vbt(connector);
 
        val = i9xx_get_backlight(connector);
-       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
 
-       panel->backlight.enabled = panel->backlight.level != 0;
+       panel->backlight.enabled = val != 0;
 
        return 0;
 }
@@ -1530,10 +1535,11 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
        panel->backlight.min = get_backlight_min_vbt(connector);
 
        val = i9xx_get_backlight(connector);
-       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
 
-       panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
-               panel->backlight.level != 0;
+       panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
 
        return 0;
 }
@@ -1562,10 +1568,11 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
        panel->backlight.min = get_backlight_min_vbt(connector);
 
        val = _vlv_get_backlight(dev_priv, pipe);
-       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
 
-       panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
-               panel->backlight.level != 0;
+       panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
 
        return 0;
 }
@@ -1607,10 +1614,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
                return -ENODEV;
 
        val = bxt_get_backlight(connector);
-       panel->backlight.level = intel_panel_compute_brightness(connector, val);
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
 
-       panel->backlight.enabled = (pwm_ctl & BXT_BLC_PWM_ENABLE) &&
-               panel->backlight.level != 0;
+       panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
 
        return 0;
 }
index 8a6751e..2df06b7 100644 (file)
@@ -2853,13 +2853,7 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
        return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
 }
 
-/*
- * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
- * different active planes.
- */
-
-#define SKL_DDB_SIZE           896     /* in blocks */
-#define BXT_DDB_SIZE           512
+#define SKL_SAGV_BLOCK_TIME    30 /* µs */
 
 /*
  * Return the index of a plane in the SKL DDB and wm result arrays.  Primary
@@ -2883,6 +2877,153 @@ skl_wm_plane_id(const struct intel_plane *plane)
        }
 }
 
+/*
+ * SAGV dynamically adjusts the system agent voltage and clock frequencies
+ * depending on power and performance requirements. The display engine access
+ * to system memory is blocked during the adjustment time. Because of the
+ * blocking time, having this enabled can cause full system hangs and/or pipe
+ * underruns if we don't meet all of the following requirements:
+ *
+ *  - <= 1 pipe enabled
+ *  - All planes can enable watermarks for latencies >= SAGV engine block time
+ *  - We're not using an interlaced display configuration
+ */
+int
+skl_enable_sagv(struct drm_i915_private *dev_priv)
+{
+       int ret;
+
+       if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
+           dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
+               return 0;
+
+       DRM_DEBUG_KMS("Enabling the SAGV\n");
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+                                     GEN9_SAGV_ENABLE);
+
+       /* We don't need to wait for the SAGV when enabling */
+       mutex_unlock(&dev_priv->rps.hw_lock);
+
+       /*
+        * Some skl systems, pre-release machines in particular,
+        * don't actually have an SAGV.
+        */
+       if (ret == -ENXIO) {
+               DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+               dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+               return 0;
+       } else if (ret < 0) {
+               DRM_ERROR("Failed to enable the SAGV\n");
+               return ret;
+       }
+
+       dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
+       return 0;
+}
+
+static int
+skl_do_sagv_disable(struct drm_i915_private *dev_priv)
+{
+       int ret;
+       uint32_t temp = GEN9_SAGV_DISABLE;
+
+       ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+                                    &temp);
+       if (ret)
+               return ret;
+       else
+               return temp & GEN9_SAGV_IS_DISABLED;
+}
+
+int
+skl_disable_sagv(struct drm_i915_private *dev_priv)
+{
+       int ret, result;
+
+       if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
+           dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
+               return 0;
+
+       DRM_DEBUG_KMS("Disabling the SAGV\n");
+       mutex_lock(&dev_priv->rps.hw_lock);
+
+       /* bspec says to keep retrying for at least 1 ms */
+       ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+
+       if (ret == -ETIMEDOUT) {
+               DRM_ERROR("Request to disable SAGV timed out\n");
+               return -ETIMEDOUT;
+       }
+
+       /*
+        * Some skl systems, pre-release machines in particular,
+        * don't actually have an SAGV.
+        */
+       if (result == -ENXIO) {
+               DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+               dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+               return 0;
+       } else if (result < 0) {
+               DRM_ERROR("Failed to disable the SAGV\n");
+               return result;
+       }
+
+       dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
+       return 0;
+}
+
+bool skl_can_enable_sagv(struct drm_atomic_state *state)
+{
+       struct drm_device *dev = state->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_crtc *crtc;
+       enum pipe pipe;
+       int level, plane;
+
+       /*
+        * SKL workaround: bspec recommends we disable the SAGV when we have
+        * more then one pipe enabled
+        *
+        * If there are no active CRTCs, no additional checks need be performed
+        */
+       if (hweight32(intel_state->active_crtcs) == 0)
+               return true;
+       else if (hweight32(intel_state->active_crtcs) > 1)
+               return false;
+
+       /* Since we're now guaranteed to only have one active CRTC... */
+       pipe = ffs(intel_state->active_crtcs) - 1;
+       crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+       if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
+               return false;
+
+       for_each_plane(dev_priv, pipe, plane) {
+               /* Skip this plane if it's not enabled */
+               if (intel_state->wm_results.plane[pipe][plane][0] == 0)
+                       continue;
+
+               /* Find the highest enabled wm level for this plane */
+               for (level = ilk_wm_max_level(dev);
+                    intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
+                    { }
+
+               /*
+                * If any of the planes on this pipe don't enable wm levels
+                * that incur memory latencies higher then 30µs we can't enable
+                * the SAGV
+                */
+               if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
+                       return false;
+       }
+
+       return true;
+}
+
 static void
 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
                                   const struct intel_crtc_state *cstate,
@@ -2909,10 +3050,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
        else
                *num_active = hweight32(dev_priv->active_crtcs);
 
-       if (IS_BROXTON(dev))
-               ddb_size = BXT_DDB_SIZE;
-       else
-               ddb_size = SKL_DDB_SIZE;
+       ddb_size = INTEL_INFO(dev_priv)->ddb_size;
+       WARN_ON(ddb_size == 0);
 
        ddb_size -= 4; /* 4 blocks for bypass path allocation */
 
@@ -3688,183 +3827,82 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
                I915_WRITE(reg, 0);
 }
 
-static void skl_write_wm_values(struct drm_i915_private *dev_priv,
-                               const struct skl_wm_values *new)
+void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+                       const struct skl_wm_values *wm,
+                       int plane)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct intel_crtc *crtc;
-
-       for_each_intel_crtc(dev, crtc) {
-               int i, level, max_level = ilk_wm_max_level(dev);
-               enum pipe pipe = crtc->pipe;
-
-               if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
-                       continue;
-               if (!crtc->active)
-                       continue;
-
-               I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
-
-               for (level = 0; level <= max_level; level++) {
-                       for (i = 0; i < intel_num_planes(crtc); i++)
-                               I915_WRITE(PLANE_WM(pipe, i, level),
-                                          new->plane[pipe][i][level]);
-                       I915_WRITE(CUR_WM(pipe, level),
-                                  new->plane[pipe][PLANE_CURSOR][level]);
-               }
-               for (i = 0; i < intel_num_planes(crtc); i++)
-                       I915_WRITE(PLANE_WM_TRANS(pipe, i),
-                                  new->plane_trans[pipe][i]);
-               I915_WRITE(CUR_WM_TRANS(pipe),
-                          new->plane_trans[pipe][PLANE_CURSOR]);
-
-               for (i = 0; i < intel_num_planes(crtc); i++) {
-                       skl_ddb_entry_write(dev_priv,
-                                           PLANE_BUF_CFG(pipe, i),
-                                           &new->ddb.plane[pipe][i]);
-                       skl_ddb_entry_write(dev_priv,
-                                           PLANE_NV12_BUF_CFG(pipe, i),
-                                           &new->ddb.y_plane[pipe][i]);
-               }
+       struct drm_crtc *crtc = &intel_crtc->base;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int level, max_level = ilk_wm_max_level(dev);
+       enum pipe pipe = intel_crtc->pipe;
 
-               skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
-                                   &new->ddb.plane[pipe][PLANE_CURSOR]);
+       for (level = 0; level <= max_level; level++) {
+               I915_WRITE(PLANE_WM(pipe, plane, level),
+                          wm->plane[pipe][plane][level]);
        }
-}
+       I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
 
-/*
- * When setting up a new DDB allocation arrangement, we need to correctly
- * sequence the times at which the new allocations for the pipes are taken into
- * account or we'll have pipes fetching from space previously allocated to
- * another pipe.
- *
- * Roughly the sequence looks like:
- *  1. re-allocate the pipe(s) with the allocation being reduced and not
- *     overlapping with a previous light-up pipe (another way to put it is:
- *     pipes with their new allocation strickly included into their old ones).
- *  2. re-allocate the other pipes that get their allocation reduced
- *  3. allocate the pipes having their allocation increased
- *
- * Steps 1. and 2. are here to take care of the following case:
- * - Initially DDB looks like this:
- *     |   B    |   C    |
- * - enable pipe A.
- * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
- *   allocation
- *     |  A  |  B  |  C  |
- *
- * We need to sequence the re-allocation: C, B, A (and not B, C, A).
- */
+       skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
+                           &wm->ddb.plane[pipe][plane]);
+       skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
+                           &wm->ddb.y_plane[pipe][plane]);
+}
 
-static void
-skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
+void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+                        const struct skl_wm_values *wm)
 {
-       int plane;
-
-       DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
+       struct drm_crtc *crtc = &intel_crtc->base;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int level, max_level = ilk_wm_max_level(dev);
+       enum pipe pipe = intel_crtc->pipe;
 
-       for_each_plane(dev_priv, pipe, plane) {
-               I915_WRITE(PLANE_SURF(pipe, plane),
-                          I915_READ(PLANE_SURF(pipe, plane)));
+       for (level = 0; level <= max_level; level++) {
+               I915_WRITE(CUR_WM(pipe, level),
+                          wm->plane[pipe][PLANE_CURSOR][level]);
        }
-       I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+       I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
+
+       skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
+                           &wm->ddb.plane[pipe][PLANE_CURSOR]);
 }
 
-static bool
-skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
-                           const struct skl_ddb_allocation *new,
-                           enum pipe pipe)
+bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
+                              const struct skl_ddb_allocation *new,
+                              enum pipe pipe)
 {
-       uint16_t old_size, new_size;
-
-       old_size = skl_ddb_entry_size(&old->pipe[pipe]);
-       new_size = skl_ddb_entry_size(&new->pipe[pipe]);
-
-       return old_size != new_size &&
-              new->pipe[pipe].start >= old->pipe[pipe].start &&
-              new->pipe[pipe].end <= old->pipe[pipe].end;
+       return new->pipe[pipe].start == old->pipe[pipe].start &&
+              new->pipe[pipe].end == old->pipe[pipe].end;
 }
 
-static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
-                               struct skl_wm_values *new_values)
+static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+                                          const struct skl_ddb_entry *b)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct skl_ddb_allocation *cur_ddb, *new_ddb;
-       bool reallocated[I915_MAX_PIPES] = {};
-       struct intel_crtc *crtc;
-       enum pipe pipe;
-
-       new_ddb = &new_values->ddb;
-       cur_ddb = &dev_priv->wm.skl_hw.ddb;
-
-       /*
-        * First pass: flush the pipes with the new allocation contained into
-        * the old space.
-        *
-        * We'll wait for the vblank on those pipes to ensure we can safely
-        * re-allocate the freed space without this pipe fetching from it.
-        */
-       for_each_intel_crtc(dev, crtc) {
-               if (!crtc->active)
-                       continue;
-
-               pipe = crtc->pipe;
-
-               if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
-                       continue;
-
-               skl_wm_flush_pipe(dev_priv, pipe, 1);
-               intel_wait_for_vblank(dev, pipe);
-
-               reallocated[pipe] = true;
-       }
-
+       return a->start < b->end && b->start < a->end;
+}
 
-       /*
-        * Second pass: flush the pipes that are having their allocation
-        * reduced, but overlapping with a previous allocation.
-        *
-        * Here as well we need to wait for the vblank to make sure the freed
-        * space is not used anymore.
-        */
-       for_each_intel_crtc(dev, crtc) {
-               if (!crtc->active)
-                       continue;
+bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
+                                const struct skl_ddb_allocation *old,
+                                const struct skl_ddb_allocation *new,
+                                enum pipe pipe)
+{
+       struct drm_device *dev = state->dev;
+       struct intel_crtc *intel_crtc;
+       enum pipe otherp;
 
-               pipe = crtc->pipe;
+       for_each_intel_crtc(dev, intel_crtc) {
+               otherp = intel_crtc->pipe;
 
-               if (reallocated[pipe])
+               if (otherp == pipe)
                        continue;
 
-               if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
-                   skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
-                       skl_wm_flush_pipe(dev_priv, pipe, 2);
-                       intel_wait_for_vblank(dev, pipe);
-                       reallocated[pipe] = true;
-               }
+               if (skl_ddb_entries_overlap(&new->pipe[pipe],
+                                           &old->pipe[otherp]))
+                       return true;
        }
 
-       /*
-        * Third pass: flush the pipes that got more space allocated.
-        *
-        * We don't need to actively wait for the update here, next vblank
-        * will just get more DDB space with the correct WM values.
-        */
-       for_each_intel_crtc(dev, crtc) {
-               if (!crtc->active)
-                       continue;
-
-               pipe = crtc->pipe;
-
-               /*
-                * At this point, only the pipes more space than before are
-                * left to re-allocate.
-                */
-               if (reallocated[pipe])
-                       continue;
-
-               skl_wm_flush_pipe(dev_priv, pipe, 3);
-       }
+       return false;
 }
 
 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
@@ -3964,11 +4002,33 @@ skl_compute_ddb(struct drm_atomic_state *state)
                ret = skl_allocate_pipe_ddb(cstate, ddb);
                if (ret)
                        return ret;
+
+               ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
+               if (ret)
+                       return ret;
        }
 
        return 0;
 }
 
+static void
+skl_copy_wm_for_pipe(struct skl_wm_values *dst,
+                    struct skl_wm_values *src,
+                    enum pipe pipe)
+{
+       dst->wm_linetime[pipe] = src->wm_linetime[pipe];
+       memcpy(dst->plane[pipe], src->plane[pipe],
+              sizeof(dst->plane[pipe]));
+       memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
+              sizeof(dst->plane_trans[pipe]));
+
+       dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
+       memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
+              sizeof(dst->ddb.y_plane[pipe]));
+       memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
+              sizeof(dst->ddb.plane[pipe]));
+}
+
 static int
 skl_compute_wm(struct drm_atomic_state *state)
 {
@@ -4041,8 +4101,10 @@ static void skl_update_wm(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_wm_values *results = &dev_priv->wm.skl_results;
+       struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
        struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
        struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
+       enum pipe pipe = intel_crtc->pipe;
 
        if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
                return;
@@ -4051,11 +4113,22 @@ static void skl_update_wm(struct drm_crtc *crtc)
 
        mutex_lock(&dev_priv->wm.wm_mutex);
 
-       skl_write_wm_values(dev_priv, results);
-       skl_flush_wm_values(dev_priv, results);
+       /*
+        * If this pipe isn't active already, we're going to be enabling it
+        * very soon. Since it's safe to update a pipe's ddb allocation while
+        * the pipe's shut off, just do so here. Already active pipes will have
+        * their watermarks updated once we update their planes.
+        */
+       if (crtc->state->active_changed) {
+               int plane;
+
+               for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
+                       skl_write_plane_wm(intel_crtc, results, plane);
 
-       /* store the new configuration */
-       dev_priv->wm.skl_hw = *results;
+               skl_write_cursor_wm(intel_crtc, results);
+       }
+
+       skl_copy_wm_for_pipe(hw_vals, results, pipe);
 
        mutex_unlock(&dev_priv->wm.wm_mutex);
 }
@@ -5550,7 +5623,7 @@ static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
 
        val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
 
-       switch (INTEL_INFO(dev_priv)->eu_total) {
+       switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
        case 8:
                /* (2 * 4) config */
                rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -6691,9 +6764,7 @@ void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
 
        if (IS_IRONLAKE_M(dev_priv)) {
                ironlake_enable_drps(dev_priv);
-               mutex_lock(&dev_priv->drm.struct_mutex);
                intel_init_emon(dev_priv);
-               mutex_unlock(&dev_priv->drm.struct_mutex);
        } else if (INTEL_INFO(dev_priv)->gen >= 6) {
                /*
                 * PCU communication is slow and this doesn't need to be
@@ -7659,8 +7730,54 @@ void intel_init_pm(struct drm_device *dev)
        }
 }
 
+static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
+{
+       uint32_t flags =
+               I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
+
+       switch (flags) {
+       case GEN6_PCODE_SUCCESS:
+               return 0;
+       case GEN6_PCODE_UNIMPLEMENTED_CMD:
+       case GEN6_PCODE_ILLEGAL_CMD:
+               return -ENXIO;
+       case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+       case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+               return -EOVERFLOW;
+       case GEN6_PCODE_TIMEOUT:
+               return -ETIMEDOUT;
+       default:
+               MISSING_CASE(flags)
+               return 0;
+       }
+}
+
+static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
+{
+       uint32_t flags =
+               I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
+
+       switch (flags) {
+       case GEN6_PCODE_SUCCESS:
+               return 0;
+       case GEN6_PCODE_ILLEGAL_CMD:
+               return -ENXIO;
+       case GEN7_PCODE_TIMEOUT:
+               return -ETIMEDOUT;
+       case GEN7_PCODE_ILLEGAL_DATA:
+               return -EINVAL;
+       case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+               return -EOVERFLOW;
+       default:
+               MISSING_CASE(flags);
+               return 0;
+       }
+}
+
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
 {
+       int status;
+
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
        /* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7687,12 +7804,25 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
        *val = I915_READ_FW(GEN6_PCODE_DATA);
        I915_WRITE_FW(GEN6_PCODE_DATA, 0);
 
+       if (INTEL_GEN(dev_priv) > 6)
+               status = gen7_check_mailbox_status(dev_priv);
+       else
+               status = gen6_check_mailbox_status(dev_priv);
+
+       if (status) {
+               DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
+                                status);
+               return status;
+       }
+
        return 0;
 }
 
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
-                              u32 mbox, u32 val)
+                           u32 mbox, u32 val)
 {
+       int status;
+
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
        /* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7717,6 +7847,17 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
 
        I915_WRITE_FW(GEN6_PCODE_DATA, 0);
 
+       if (INTEL_GEN(dev_priv) > 6)
+               status = gen7_check_mailbox_status(dev_priv);
+       else
+               status = gen6_check_mailbox_status(dev_priv);
+
+       if (status) {
+               DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
+                                status);
+               return status;
+       }
+
        return 0;
 }
 
index 59a21c9..108ba1e 100644 (file)
@@ -255,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        uint32_t max_sleep_time = 0x1f;
-       /* Lately it was identified that depending on panel idle frame count
-        * calculated at HW can be off by 1. So let's use what came
-        * from VBT + 1.
-        * There are also other cases where panel demands at least 4
-        * but VBT is not being set. To cover these 2 cases lets use
-        * at least 5 when VBT isn't set to be on the safest side.
+       /*
+        * Let's respect VBT in case VBT asks a higher idle_frame value.
+        * Let's use 6 as the minimum to cover all known cases including
+        * the off-by-one issue that HW has in some cases. Also there are
+        * cases where sink should be able to train
+        * with the 5 or 6 idle patterns.
         */
-       uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
+       uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
        uint32_t val = EDP_PSR_ENABLE;
 
        val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
index cc5bcd1..7a74750 100644 (file)
@@ -559,10 +559,12 @@ static int init_ring_common(struct intel_engine_cs *engine)
                }
        }
 
-       if (I915_NEED_GFX_HWS(dev_priv))
-               intel_ring_setup_status_page(engine);
-       else
+       if (HWS_NEEDS_PHYSICAL(dev_priv))
                ring_setup_phys_status_page(engine);
+       else
+               intel_ring_setup_status_page(engine);
+
+       intel_engine_reset_irq(engine);
 
        /* Enforce ordering by reading HEAD register back */
        I915_READ_HEAD(engine);
@@ -577,34 +579,33 @@ static int init_ring_common(struct intel_engine_cs *engine)
        if (I915_READ_HEAD(engine))
                DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
                          engine->name, I915_READ_HEAD(engine));
-       I915_WRITE_HEAD(engine, 0);
-       (void)I915_READ_HEAD(engine);
+
+       intel_ring_update_space(ring);
+       I915_WRITE_HEAD(engine, ring->head);
+       I915_WRITE_TAIL(engine, ring->tail);
+       (void)I915_READ_TAIL(engine);
 
        I915_WRITE_CTL(engine,
                        ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
-       if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
-                    I915_READ_START(engine) == i915_ggtt_offset(ring->vma) &&
-                    (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
+       if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
+                                      RING_VALID, RING_VALID,
+                                      50)) {
                DRM_ERROR("%s initialization failed "
-                         "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08x]\n",
+                         "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
                          engine->name,
                          I915_READ_CTL(engine),
                          I915_READ_CTL(engine) & RING_VALID,
-                         I915_READ_HEAD(engine), I915_READ_TAIL(engine),
+                         I915_READ_HEAD(engine), ring->head,
+                         I915_READ_TAIL(engine), ring->tail,
                          I915_READ_START(engine),
                          i915_ggtt_offset(ring->vma));
                ret = -EIO;
                goto out;
        }
 
-       ring->last_retired_head = -1;
-       ring->head = I915_READ_HEAD(engine);
-       ring->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
-       intel_ring_update_space(ring);
-
        intel_engine_init_hangcheck(engine);
 
 out:
@@ -613,6 +614,15 @@ out:
        return ret;
 }
 
+static void reset_ring_common(struct intel_engine_cs *engine,
+                             struct drm_i915_gem_request *request)
+{
+       struct intel_ring *ring = request->ring;
+
+       ring->head = request->postfix;
+       ring->last_retired_head = -1;
+}
+
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
        struct intel_ring *ring = req->ring;
@@ -951,7 +961,7 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
                 * Only consider slices where one, and only one, subslice has 7
                 * EUs
                 */
-               if (!is_power_of_2(dev_priv->info.subslice_7eu[i]))
+               if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
                        continue;
 
                /*
@@ -960,7 +970,7 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
                 *
                 * ->    0 <= ss <= 3;
                 */
-               ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
+               ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
                vals[i] = 3 - ss;
        }
 
@@ -2007,7 +2017,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
        }
        ring->vma = vma;
 
-       list_add(&ring->link, &engine->buffers);
        return ring;
 }
 
@@ -2015,7 +2024,6 @@ void
 intel_ring_free(struct intel_ring *ring)
 {
        i915_vma_put(ring->vma);
-       list_del(&ring->link);
        kfree(ring);
 }
 
@@ -2109,13 +2117,13 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
                goto error;
        }
 
-       if (I915_NEED_GFX_HWS(dev_priv)) {
-               ret = init_status_page(engine);
+       if (HWS_NEEDS_PHYSICAL(dev_priv)) {
+               WARN_ON(engine->id != RCS);
+               ret = init_phys_status_page(engine);
                if (ret)
                        goto error;
        } else {
-               WARN_ON(engine->id != RCS);
-               ret = init_phys_status_page(engine);
+               ret = init_status_page(engine);
                if (ret)
                        goto error;
        }
@@ -2155,11 +2163,11 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
        if (engine->cleanup)
                engine->cleanup(engine);
 
-       if (I915_NEED_GFX_HWS(dev_priv)) {
-               cleanup_status_page(engine);
-       } else {
+       if (HWS_NEEDS_PHYSICAL(dev_priv)) {
                WARN_ON(engine->id != RCS);
                cleanup_phys_status_page(engine);
+       } else {
+               cleanup_status_page(engine);
        }
 
        intel_engine_cleanup_common(engine);
@@ -2169,6 +2177,16 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
        engine->i915 = NULL;
 }
 
+void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *engine;
+
+       for_each_engine(engine, dev_priv) {
+               engine->buffer->head = engine->buffer->tail;
+               engine->buffer->last_retired_head = -1;
+       }
+}
+
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
        int ret;
@@ -2223,13 +2241,12 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
        if (WARN_ON(&target->ring_link == &ring->request_list))
                return -ENOSPC;
 
-       ret = i915_wait_request(target, true, NULL, NO_WAITBOOST);
+       ret = i915_wait_request(target,
+                               I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+                               NULL, NO_WAITBOOST);
        if (ret)
                return ret;
 
-       if (i915_reset_in_progress(&target->i915->gpu_error))
-               return -EAGAIN;
-
        i915_gem_request_retire_upto(target);
 
        intel_ring_update_space(ring);
@@ -2655,6 +2672,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
        intel_ring_init_semaphores(dev_priv, engine);
 
        engine->init_hw = init_ring_common;
+       engine->reset_hw = reset_ring_common;
 
        engine->emit_request = i9xx_emit_request;
        if (i915.semaphores)
index 84aea54..7f64d61 100644 (file)
@@ -87,7 +87,6 @@ struct intel_ring {
        void *vaddr;
 
        struct intel_engine_cs *engine;
-       struct list_head link;
 
        struct list_head request_list;
 
@@ -157,7 +156,6 @@ struct intel_engine_cs {
        u32             mmio_base;
        unsigned int irq_shift;
        struct intel_ring *buffer;
-       struct list_head buffers;
 
        /* Rather than have every client wait upon all user interrupts,
         * with the herd waking after every interrupt and each doing the
@@ -211,6 +209,8 @@ struct intel_engine_cs {
        void            (*irq_disable)(struct intel_engine_cs *engine);
 
        int             (*init_hw)(struct intel_engine_cs *engine);
+       void            (*reset_hw)(struct intel_engine_cs *engine,
+                                   struct drm_i915_gem_request *req);
 
        int             (*init_context)(struct drm_i915_gem_request *req);
 
@@ -226,7 +226,15 @@ struct intel_engine_cs {
 #define I915_DISPATCH_PINNED BIT(1)
 #define I915_DISPATCH_RS     BIT(2)
        int             (*emit_request)(struct drm_i915_gem_request *req);
+
+       /* Pass the request to the hardware queue (e.g. directly into
+        * the legacy ringbuffer or to the end of an execlist).
+        *
+        * This is called from an atomic context with irqs disabled; must
+        * be irq safe.
+        */
        void            (*submit_request)(struct drm_i915_gem_request *req);
+
        /* Some chipsets are not quite as coherent as advertised and need
         * an expensive kick to force a true read of the up-to-date seqno.
         * However, the up-to-date seqno is not always required and the last
@@ -298,11 +306,14 @@ struct intel_engine_cs {
        /* Execlists */
        struct tasklet_struct irq_tasklet;
        spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
+       struct execlist_port {
+               struct drm_i915_gem_request *request;
+               unsigned int count;
+       } execlist_port[2];
        struct list_head execlist_queue;
        unsigned int fw_domains;
-       unsigned int next_context_status_buffer;
-       unsigned int idle_lite_restore_wa;
        bool disable_lite_restore_wa;
+       bool preempt_wa;
        u32 ctx_desc_template;
 
        /**
@@ -441,6 +452,8 @@ void intel_ring_free(struct intel_ring *ring);
 void intel_engine_stop(struct intel_engine_cs *engine);
 void intel_engine_cleanup(struct intel_engine_cs *engine);
 
+void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
+
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
@@ -479,6 +492,7 @@ int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ring *ring);
 
 void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+void intel_engine_reset_irq(struct intel_engine_cs *engine);
 
 void intel_engine_setup_common(struct intel_engine_cs *engine);
 int intel_engine_init_common(struct intel_engine_cs *engine);
@@ -486,11 +500,11 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
 static inline int intel_engine_idle(struct intel_engine_cs *engine,
-                                   bool interruptible)
+                                   unsigned int flags)
 {
        /* Wait upon the last request to be completed */
        return i915_gem_active_wait_unlocked(&engine->last_request,
-                                            interruptible, NULL, NULL);
+                                            flags, NULL, NULL);
 }
 
 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
index a1d73c2..6c11168 100644 (file)
@@ -287,6 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  */
 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
 {
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct drm_device *dev = &dev_priv->drm;
 
        /*
@@ -299,9 +300,9 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
         * sure vgacon can keep working normally without triggering interrupts
         * and error messages.
         */
-       vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+       vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
        outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-       vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+       vga_put(pdev, VGA_RSRC_LEGACY_IO);
 
        if (IS_BROADWELL(dev))
                gen8_irq_power_well_post_enable(dev_priv,
@@ -318,7 +319,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
                                       struct i915_power_well *power_well)
 {
-       struct drm_device *dev = &dev_priv->drm;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
 
        /*
         * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -331,9 +332,9 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
         * and error messages.
         */
        if (power_well->data == SKL_DISP_PW_2) {
-               vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+               vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
                outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-               vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+               vga_put(pdev, VGA_RSRC_LEGACY_IO);
 
                gen8_irq_power_well_post_enable(dev_priv,
                                                1 << PIPE_C | 1 << PIPE_B);
@@ -2288,7 +2289,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
  */
 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
 {
-       struct device *device = &dev_priv->drm.pdev->dev;
+       struct device *kdev = &dev_priv->drm.pdev->dev;
 
        /*
         * The i915.ko module is still not prepared to be loaded when
@@ -2310,7 +2311,7 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv)
         * the platform doesn't support runtime PM.
         */
        if (!HAS_RUNTIME_PM(dev_priv))
-               pm_runtime_put(device);
+               pm_runtime_put(kdev);
 }
 
 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
@@ -2651,10 +2652,10 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct device *device = &dev->pdev->dev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct device *kdev = &pdev->dev;
 
-       pm_runtime_get_sync(device);
+       pm_runtime_get_sync(kdev);
 
        atomic_inc(&dev_priv->pm.wakeref_count);
        assert_rpm_wakelock_held(dev_priv);
@@ -2672,11 +2673,11 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  */
 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct device *device = &dev->pdev->dev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct device *kdev = &pdev->dev;
 
        if (IS_ENABLED(CONFIG_PM)) {
-               int ret = pm_runtime_get_if_in_use(device);
+               int ret = pm_runtime_get_if_in_use(kdev);
 
                /*
                 * In cases runtime PM is disabled by the RPM core and we get
@@ -2714,11 +2715,11 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct device *device = &dev->pdev->dev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct device *kdev = &pdev->dev;
 
        assert_rpm_wakelock_held(dev_priv);
-       pm_runtime_get_noresume(device);
+       pm_runtime_get_noresume(kdev);
 
        atomic_inc(&dev_priv->pm.wakeref_count);
 }
@@ -2733,15 +2734,15 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = &dev_priv->drm;
-       struct device *device = &dev->pdev->dev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct device *kdev = &pdev->dev;
 
        assert_rpm_wakelock_held(dev_priv);
        if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
                atomic_inc(&dev_priv->pm.atomic_seq);
 
-       pm_runtime_mark_last_busy(device);
-       pm_runtime_put_autosuspend(device);
+       pm_runtime_mark_last_busy(kdev);
+       pm_runtime_put_autosuspend(kdev);
 }
 
 /**
@@ -2756,11 +2757,12 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
 {
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct drm_device *dev = &dev_priv->drm;
-       struct device *device = &dev->pdev->dev;
+       struct device *kdev = &pdev->dev;
 
-       pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
-       pm_runtime_mark_last_busy(device);
+       pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
+       pm_runtime_mark_last_busy(kdev);
 
        /*
         * Take a permanent reference to disable the RPM functionality and drop
@@ -2769,10 +2771,10 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
         * platforms without RPM support.
         */
        if (!HAS_RUNTIME_PM(dev)) {
-               pm_runtime_dont_use_autosuspend(device);
-               pm_runtime_get_sync(device);
+               pm_runtime_dont_use_autosuspend(kdev);
+               pm_runtime_get_sync(kdev);
        } else {
-               pm_runtime_use_autosuspend(device);
+               pm_runtime_use_autosuspend(kdev);
        }
 
        /*
@@ -2780,6 +2782,5 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
         * We drop that here and will reacquire it during unloading in
         * intel_power_domains_fini().
         */
-       pm_runtime_put_autosuspend(device);
+       pm_runtime_put_autosuspend(kdev);
 }
-
index e378f35..c551024 100644 (file)
@@ -1003,24 +1003,22 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
 }
 
 static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
-                                        const struct drm_display_mode *adjusted_mode)
+                                        struct intel_crtc_state *pipe_config)
 {
        uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
-       struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        union hdmi_infoframe frame;
        int ret;
        ssize_t len;
 
        ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
-                                                      adjusted_mode);
+                                                      &pipe_config->base.adjusted_mode);
        if (ret < 0) {
                DRM_ERROR("couldn't fill AVI infoframe\n");
                return false;
        }
 
        if (intel_sdvo->rgb_quant_range_selectable) {
-               if (intel_crtc->config->limited_color_range)
+               if (pipe_config->limited_color_range)
                        frame.avi.quantization_range =
                                HDMI_QUANTIZATION_RANGE_LIMITED;
                else
@@ -1125,7 +1123,8 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
 }
 
 static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
-                                     struct intel_crtc_state *pipe_config)
+                                     struct intel_crtc_state *pipe_config,
+                                     struct drm_connector_state *conn_state)
 {
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
@@ -1192,22 +1191,21 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
        return true;
 }
 
-static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
+static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
+                                 struct intel_crtc_state *crtc_state,
+                                 struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
-       const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
-       struct drm_display_mode *mode = &crtc->config->base.mode;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+       struct drm_display_mode *mode = &crtc_state->base.mode;
        struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
        u32 sdvox;
        struct intel_sdvo_in_out_map in_out;
        struct intel_sdvo_dtd input_dtd, output_dtd;
        int rate;
 
-       if (!mode)
-               return;
-
        /* First, set the input mapping for the first input to our controlled
         * output. This is only correct if we're a single-input device, in
         * which case the first input is the output from the appropriate SDVO
@@ -1240,11 +1238,11 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
        if (!intel_sdvo_set_target_input(intel_sdvo))
                return;
 
-       if (crtc->config->has_hdmi_sink) {
+       if (crtc_state->has_hdmi_sink) {
                intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
                intel_sdvo_set_colorimetry(intel_sdvo,
                                           SDVO_COLORIMETRY_RGB256);
-               intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
+               intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
        } else
                intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
 
@@ -1260,7 +1258,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
                DRM_INFO("Setting input timings on %s failed\n",
                         SDVO_NAME(intel_sdvo));
 
-       switch (crtc->config->pixel_multiplier) {
+       switch (crtc_state->pixel_multiplier) {
        default:
                WARN(1, "unknown pixel multiplier specified\n");
        case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1275,7 +1273,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
                /* The real mode polarity is set by the SDVO commands, using
                 * struct intel_sdvo_dtd. */
                sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
-               if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+               if (!HAS_PCH_SPLIT(dev) && crtc_state->limited_color_range)
                        sdvox |= HDMI_COLOR_RANGE_16_235;
                if (INTEL_INFO(dev)->gen < 5)
                        sdvox |= SDVO_BORDER_ENABLE;
@@ -1301,7 +1299,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
        } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
                /* done in crtc_mode_set as it lives inside the dpll register */
        } else {
-               sdvox |= (crtc->config->pixel_multiplier - 1)
+               sdvox |= (crtc_state->pixel_multiplier - 1)
                        << SDVO_PORT_MULTIPLY_SHIFT;
        }
 
@@ -1434,7 +1432,9 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
             pipe_config->pixel_multiplier, encoder_pixel_multiplier);
 }
 
-static void intel_disable_sdvo(struct intel_encoder *encoder)
+static void intel_disable_sdvo(struct intel_encoder *encoder,
+                              struct intel_crtc_state *old_crtc_state,
+                              struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
@@ -1477,16 +1477,22 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
        }
 }
 
-static void pch_disable_sdvo(struct intel_encoder *encoder)
+static void pch_disable_sdvo(struct intel_encoder *encoder,
+                            struct intel_crtc_state *old_crtc_state,
+                            struct drm_connector_state *old_conn_state)
 {
 }
 
-static void pch_post_disable_sdvo(struct intel_encoder *encoder)
+static void pch_post_disable_sdvo(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *old_crtc_state,
+                                 struct drm_connector_state *old_conn_state)
 {
-       intel_disable_sdvo(encoder);
+       intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
 }
 
-static void intel_enable_sdvo(struct intel_encoder *encoder)
+static void intel_enable_sdvo(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config,
+                             struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2930,10 +2936,12 @@ static bool
 intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
                          struct drm_device *dev)
 {
+       struct pci_dev *pdev = dev->pdev;
+
        sdvo->ddc.owner = THIS_MODULE;
        sdvo->ddc.class = I2C_CLASS_DDC;
        snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
-       sdvo->ddc.dev.parent = &dev->pdev->dev;
+       sdvo->ddc.dev.parent = &pdev->dev;
        sdvo->ddc.algo_data = sdvo;
        sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
 
index 366900d..73a521f 100644 (file)
@@ -203,6 +203,9 @@ skl_update_plane(struct drm_plane *drm_plane,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(drm_plane);
        struct drm_framebuffer *fb = plane_state->base.fb;
+       const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
+       struct drm_crtc *crtc = crtc_state->base.crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        const int pipe = intel_plane->pipe;
        const int plane = intel_plane->plane + 1;
        u32 plane_ctl;
@@ -228,6 +231,9 @@ skl_update_plane(struct drm_plane *drm_plane,
 
        plane_ctl |= skl_plane_ctl_rotation(rotation);
 
+       if (wm->dirty_pipes & drm_crtc_mask(crtc))
+               skl_write_plane_wm(intel_crtc, wm, plane);
+
        if (key->flags) {
                I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
                I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
@@ -286,6 +292,14 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
        const int pipe = intel_plane->pipe;
        const int plane = intel_plane->plane + 1;
 
+       /*
+        * We only populate skl_results on watermark updates, and if the
+        * plane's visiblity isn't actually changing neither is its watermarks.
+        */
+       if (!dplane->state->visible)
+               skl_write_plane_wm(to_intel_crtc(crtc),
+                                  &dev_priv->wm.skl_results, plane);
+
        I915_WRITE(PLANE_CTL(pipe, plane), 0);
 
        I915_WRITE(PLANE_SURF(pipe, plane), 0);
index 49136ad..d960e48 100644 (file)
@@ -838,7 +838,9 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
 }
 
 static void
-intel_enable_tv(struct intel_encoder *encoder)
+intel_enable_tv(struct intel_encoder *encoder,
+               struct intel_crtc_state *pipe_config,
+               struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -851,7 +853,9 @@ intel_enable_tv(struct intel_encoder *encoder)
 }
 
 static void
-intel_disable_tv(struct intel_encoder *encoder)
+intel_disable_tv(struct intel_encoder *encoder,
+                struct intel_crtc_state *old_crtc_state,
+                struct drm_connector_state *old_conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -908,7 +912,8 @@ intel_tv_get_config(struct intel_encoder *encoder,
 
 static bool
 intel_tv_compute_config(struct intel_encoder *encoder,
-                       struct intel_crtc_state *pipe_config)
+                       struct intel_crtc_state *pipe_config,
+                       struct drm_connector_state *conn_state)
 {
        struct intel_tv *intel_tv = enc_to_tv(encoder);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1010,7 +1015,9 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
                   color_conversion->av);
 }
 
-static void intel_tv_pre_enable(struct intel_encoder *encoder)
+static void intel_tv_pre_enable(struct intel_encoder *encoder,
+                               struct intel_crtc_state *pipe_config,
+                               struct drm_connector_state *conn_state)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
index 43f8339..a9b6c93 100644 (file)
@@ -1018,11 +1018,9 @@ gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
 __gen5_write(8)
 __gen5_write(16)
 __gen5_write(32)
-__gen5_write(64)
 __gen2_write(8)
 __gen2_write(16)
 __gen2_write(32)
-__gen2_write(64)
 
 #undef __gen5_write
 #undef __gen2_write
@@ -1112,23 +1110,18 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
 __gen9_write(8)
 __gen9_write(16)
 __gen9_write(32)
-__gen9_write(64)
 __chv_write(8)
 __chv_write(16)
 __chv_write(32)
-__chv_write(64)
 __gen8_write(8)
 __gen8_write(16)
 __gen8_write(32)
-__gen8_write(64)
 __hsw_write(8)
 __hsw_write(16)
 __hsw_write(32)
-__hsw_write(64)
 __gen6_write(8)
 __gen6_write(16)
 __gen6_write(32)
-__gen6_write(64)
 
 #undef __gen9_write
 #undef __chv_write
@@ -1158,7 +1151,6 @@ static void vgpu_write##x(struct drm_i915_private *dev_priv, \
 __vgpu_write(8)
 __vgpu_write(16)
 __vgpu_write(32)
-__vgpu_write(64)
 
 #undef __vgpu_write
 #undef VGPU_WRITE_FOOTER
@@ -1169,7 +1161,6 @@ do { \
        dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
        dev_priv->uncore.funcs.mmio_writew = x##_write16; \
        dev_priv->uncore.funcs.mmio_writel = x##_write32; \
-       dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
 } while (0)
 
 #define ASSIGN_READ_MMIO_VFUNCS(x) \
@@ -1597,8 +1588,10 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
        if (engine_mask == ALL_ENGINES) {
                hw_mask = GEN6_GRDOM_FULL;
        } else {
+               unsigned int tmp;
+
                hw_mask = 0;
-               for_each_engine_masked(engine, dev_priv, engine_mask)
+               for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                        hw_mask |= hw_engine_mask[engine->id];
        }
 
@@ -1714,15 +1707,16 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
                              unsigned engine_mask)
 {
        struct intel_engine_cs *engine;
+       unsigned int tmp;
 
-       for_each_engine_masked(engine, dev_priv, engine_mask)
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                if (gen8_request_engine_reset(engine))
                        goto not_ready;
 
        return gen6_reset_engines(dev_priv, engine_mask);
 
 not_ready:
-       for_each_engine_masked(engine, dev_priv, engine_mask)
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                gen8_unrequest_engine_reset(engine);
 
        return -EIO;
index 56dfc4c..98df09c 100644 (file)
@@ -64,25 +64,6 @@ static void imx_drm_driver_lastclose(struct drm_device *drm)
        drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
 }
 
-static int imx_drm_driver_unload(struct drm_device *drm)
-{
-       struct imx_drm_device *imxdrm = drm->dev_private;
-
-       drm_kms_helper_poll_fini(drm);
-
-       if (imxdrm->fbhelper)
-               drm_fbdev_cma_fini(imxdrm->fbhelper);
-
-       component_unbind_all(drm->dev, drm);
-
-       drm_vblank_cleanup(drm);
-       drm_mode_config_cleanup(drm);
-
-       platform_set_drvdata(drm->platformdev, NULL);
-
-       return 0;
-}
-
 static int imx_drm_enable_vblank(struct drm_device *drm, unsigned int pipe)
 {
        struct imx_drm_device *imxdrm = drm->dev_private;
@@ -146,55 +127,73 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
        drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
 }
 
+static int imx_drm_atomic_check(struct drm_device *dev,
+                               struct drm_atomic_state *state)
+{
+       int ret;
+
+       ret = drm_atomic_helper_check_modeset(dev, state);
+       if (ret)
+               return ret;
+
+       ret = drm_atomic_helper_check_planes(dev, state);
+       if (ret)
+               return ret;
+
+       /*
+        * Check modeset again in case crtc_state->mode_changed is
+        * updated in plane's ->atomic_check callback.
+        */
+       ret = drm_atomic_helper_check_modeset(dev, state);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+static int imx_drm_atomic_commit(struct drm_device *dev,
+                                struct drm_atomic_state *state,
+                                bool nonblock)
+{
+       struct drm_plane_state *plane_state;
+       struct drm_plane *plane;
+       struct dma_buf *dma_buf;
+       int i;
+
+       /*
+        * If the plane fb has an dma-buf attached, fish out the exclusive
+        * fence for the atomic helper to wait on.
+        */
+       for_each_plane_in_state(state, plane, plane_state, i) {
+               if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
+                       dma_buf = drm_fb_cma_get_gem_obj(plane_state->fb,
+                                                        0)->base.dma_buf;
+                       if (!dma_buf)
+                               continue;
+                       plane_state->fence =
+                               reservation_object_get_excl_rcu(dma_buf->resv);
+               }
+       }
+
+       return drm_atomic_helper_commit(dev, state, nonblock);
+}
+
 static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
        .fb_create = drm_fb_cma_create,
        .output_poll_changed = imx_drm_output_poll_changed,
-       .atomic_check = drm_atomic_helper_check,
-       .atomic_commit = drm_atomic_helper_commit,
+       .atomic_check = imx_drm_atomic_check,
+       .atomic_commit = imx_drm_atomic_commit,
 };
 
 static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *crtc_state;
-       struct drm_plane_state *plane_state;
-       struct drm_gem_cma_object *cma_obj;
-       struct fence *excl;
-       unsigned shared_count;
-       struct fence **shared;
-       unsigned int i, j;
-       int ret;
-
-       /* Wait for fences. */
-       for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               plane_state = crtc->primary->state;
-               if (plane_state->fb) {
-                       cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
-                       if (cma_obj->base.dma_buf) {
-                               ret = reservation_object_get_fences_rcu(
-                                       cma_obj->base.dma_buf->resv, &excl,
-                                       &shared_count, &shared);
-                               if (unlikely(ret))
-                                       DRM_ERROR("failed to get fences "
-                                                 "for buffer\n");
-
-                               if (excl) {
-                                       fence_wait(excl, false);
-                                       fence_put(excl);
-                               }
-                               for (j = 0; j < shared_count; i++) {
-                                       fence_wait(shared[j], false);
-                                       fence_put(shared[j]);
-                               }
-                       }
-               }
-       }
 
        drm_atomic_helper_commit_modeset_disables(dev, state);
 
        drm_atomic_helper_commit_planes(dev, state,
-                                       DRM_PLANE_COMMIT_ACTIVE_ONLY);
+                               DRM_PLANE_COMMIT_ACTIVE_ONLY |
+                               DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET);
 
        drm_atomic_helper_commit_modeset_enables(dev, state);
 
@@ -209,111 +208,6 @@ static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
        .atomic_commit_tail = imx_drm_atomic_commit_tail,
 };
 
-/*
- * Main DRM initialisation. This binds, initialises and registers
- * with DRM the subcomponents of the driver.
- */
-static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
-{
-       struct imx_drm_device *imxdrm;
-       struct drm_connector *connector;
-       int ret;
-
-       imxdrm = devm_kzalloc(drm->dev, sizeof(*imxdrm), GFP_KERNEL);
-       if (!imxdrm)
-               return -ENOMEM;
-
-       imxdrm->drm = drm;
-
-       drm->dev_private = imxdrm;
-
-       /*
-        * enable drm irq mode.
-        * - with irq_enabled = true, we can use the vblank feature.
-        *
-        * P.S. note that we wouldn't use drm irq handler but
-        *      just specific driver own one instead because
-        *      drm framework supports only one irq handler and
-        *      drivers can well take care of their interrupts
-        */
-       drm->irq_enabled = true;
-
-       /*
-        * set max width and height as default value(4096x4096).
-        * this value would be used to check framebuffer size limitation
-        * at drm_mode_addfb().
-        */
-       drm->mode_config.min_width = 64;
-       drm->mode_config.min_height = 64;
-       drm->mode_config.max_width = 4096;
-       drm->mode_config.max_height = 4096;
-       drm->mode_config.funcs = &imx_drm_mode_config_funcs;
-       drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
-
-       drm_mode_config_init(drm);
-
-       ret = drm_vblank_init(drm, MAX_CRTC);
-       if (ret)
-               goto err_kms;
-
-       platform_set_drvdata(drm->platformdev, drm);
-
-       /* Now try and bind all our sub-components */
-       ret = component_bind_all(drm->dev, drm);
-       if (ret)
-               goto err_vblank;
-
-       /*
-        * All components are now added, we can publish the connector sysfs
-        * entries to userspace.  This will generate hotplug events and so
-        * userspace will expect to be able to access DRM at this point.
-        */
-       list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
-               ret = drm_connector_register(connector);
-               if (ret) {
-                       dev_err(drm->dev,
-                               "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
-                               connector->base.id,
-                               connector->name, ret);
-                       goto err_unbind;
-               }
-       }
-
-       drm_mode_config_reset(drm);
-
-       /*
-        * All components are now initialised, so setup the fb helper.
-        * The fb helper takes copies of key hardware information, so the
-        * crtcs/connectors/encoders must not change after this point.
-        */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
-       if (legacyfb_depth != 16 && legacyfb_depth != 32) {
-               dev_warn(drm->dev, "Invalid legacyfb_depth.  Defaulting to 16bpp\n");
-               legacyfb_depth = 16;
-       }
-       imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
-                               drm->mode_config.num_crtc, MAX_CRTC);
-       if (IS_ERR(imxdrm->fbhelper)) {
-               ret = PTR_ERR(imxdrm->fbhelper);
-               imxdrm->fbhelper = NULL;
-               goto err_unbind;
-       }
-#endif
-
-       drm_kms_helper_poll_init(drm);
-
-       return 0;
-
-err_unbind:
-       component_unbind_all(drm->dev, drm);
-err_vblank:
-       drm_vblank_cleanup(drm);
-err_kms:
-       drm_mode_config_cleanup(drm);
-
-       return ret;
-}
-
 /*
  * imx_drm_add_crtc - add a new crtc
  */
@@ -406,8 +300,6 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
 static struct drm_driver imx_drm_driver = {
        .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
                                  DRIVER_ATOMIC,
-       .load                   = imx_drm_driver_load,
-       .unload                 = imx_drm_driver_unload,
        .lastclose              = imx_drm_driver_lastclose,
        .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
@@ -460,12 +352,122 @@ static int compare_of(struct device *dev, void *data)
 
 static int imx_drm_bind(struct device *dev)
 {
-       return drm_platform_init(&imx_drm_driver, to_platform_device(dev));
+       struct drm_device *drm;
+       struct imx_drm_device *imxdrm;
+       int ret;
+
+       drm = drm_dev_alloc(&imx_drm_driver, dev);
+       if (!drm)
+               return -ENOMEM;
+
+       imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
+       if (!imxdrm) {
+               ret = -ENOMEM;
+               goto err_unref;
+       }
+
+       imxdrm->drm = drm;
+       drm->dev_private = imxdrm;
+
+       /*
+        * enable drm irq mode.
+        * - with irq_enabled = true, we can use the vblank feature.
+        *
+        * P.S. note that we wouldn't use drm irq handler but
+        *      just specific driver own one instead because
+        *      drm framework supports only one irq handler and
+        *      drivers can well take care of their interrupts
+        */
+       drm->irq_enabled = true;
+
+       /*
+        * set max width and height as default value(4096x4096).
+        * this value would be used to check framebuffer size limitation
+        * at drm_mode_addfb().
+        */
+       drm->mode_config.min_width = 64;
+       drm->mode_config.min_height = 64;
+       drm->mode_config.max_width = 4096;
+       drm->mode_config.max_height = 4096;
+       drm->mode_config.funcs = &imx_drm_mode_config_funcs;
+       drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
+
+       drm_mode_config_init(drm);
+
+       ret = drm_vblank_init(drm, MAX_CRTC);
+       if (ret)
+               goto err_kms;
+
+       dev_set_drvdata(dev, drm);
+
+       /* Now try and bind all our sub-components */
+       ret = component_bind_all(dev, drm);
+       if (ret)
+               goto err_vblank;
+
+       drm_mode_config_reset(drm);
+
+       /*
+        * All components are now initialised, so setup the fb helper.
+        * The fb helper takes copies of key hardware information, so the
+        * crtcs/connectors/encoders must not change after this point.
+        */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
+       if (legacyfb_depth != 16 && legacyfb_depth != 32) {
+               dev_warn(dev, "Invalid legacyfb_depth.  Defaulting to 16bpp\n");
+               legacyfb_depth = 16;
+       }
+       imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
+                               drm->mode_config.num_crtc, MAX_CRTC);
+       if (IS_ERR(imxdrm->fbhelper)) {
+               ret = PTR_ERR(imxdrm->fbhelper);
+               imxdrm->fbhelper = NULL;
+               goto err_unbind;
+       }
+#endif
+
+       drm_kms_helper_poll_init(drm);
+
+       ret = drm_dev_register(drm, 0);
+       if (ret)
+               goto err_fbhelper;
+
+       return 0;
+
+err_fbhelper:
+       drm_kms_helper_poll_fini(drm);
+       if (imxdrm->fbhelper)
+               drm_fbdev_cma_fini(imxdrm->fbhelper);
+err_unbind:
+       component_unbind_all(drm->dev, drm);
+err_vblank:
+       drm_vblank_cleanup(drm);
+err_kms:
+       drm_mode_config_cleanup(drm);
+err_unref:
+       drm_dev_unref(drm);
+
+       return ret;
 }
 
 static void imx_drm_unbind(struct device *dev)
 {
-       drm_put_dev(dev_get_drvdata(dev));
+       struct drm_device *drm = dev_get_drvdata(dev);
+       struct imx_drm_device *imxdrm = drm->dev_private;
+
+       drm_dev_unregister(drm);
+
+       drm_kms_helper_poll_fini(drm);
+
+       if (imxdrm->fbhelper)
+               drm_fbdev_cma_fini(imxdrm->fbhelper);
+
+       drm_mode_config_cleanup(drm);
+
+       component_unbind_all(drm->dev, drm);
+       dev_set_drvdata(dev, NULL);
+
+       drm_dev_unref(drm);
 }
 
 static const struct component_master_ops imx_drm_ops = {
index 4eed3a6..3ce391c 100644 (file)
@@ -757,11 +757,10 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
        for (i = 0; i < 2; i++) {
                struct imx_ldb_channel *channel = &imx_ldb->channel[i];
 
-               if (!channel->connector.funcs)
-                       continue;
-
-               channel->connector.funcs->destroy(&channel->connector);
-               channel->encoder.funcs->destroy(&channel->encoder);
+               if (channel->bridge)
+                       drm_bridge_detach(channel->bridge);
+               if (channel->panel)
+                       drm_panel_detach(channel->panel);
 
                kfree(channel->edid);
                i2c_put_adapter(channel->ddc);
index 5e87594..8fc0888 100644 (file)
@@ -685,9 +685,6 @@ static void imx_tve_unbind(struct device *dev, struct device *master,
 {
        struct imx_tve *tve = dev_get_drvdata(dev);
 
-       tve->connector.funcs->destroy(&tve->connector);
-       tve->encoder.funcs->destroy(&tve->encoder);
-
        if (!IS_ERR(tve->dac_reg))
                regulator_disable(tve->dac_reg);
 }
index 6e1dc90..9df29f1 100644 (file)
@@ -60,7 +60,8 @@ static void ipu_crtc_enable(struct drm_crtc *crtc)
        ipu_di_enable(ipu_crtc->di);
 }
 
-static void ipu_crtc_disable(struct drm_crtc *crtc)
+static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *old_crtc_state)
 {
        struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
        struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
@@ -75,6 +76,9 @@ static void ipu_crtc_disable(struct drm_crtc *crtc)
                crtc->state->event = NULL;
        }
        spin_unlock_irq(&crtc->dev->event_lock);
+
+       /* always disable planes on the CRTC */
+       drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
 }
 
 static void imx_drm_crtc_reset(struct drm_crtc *crtc)
@@ -120,9 +124,14 @@ static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc,
        kfree(to_imx_crtc_state(state));
 }
 
+static void imx_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+       imx_drm_remove_crtc(to_ipu_crtc(crtc)->imx_crtc);
+}
+
 static const struct drm_crtc_funcs ipu_crtc_funcs = {
        .set_config = drm_atomic_helper_set_config,
-       .destroy = drm_crtc_cleanup,
+       .destroy = imx_drm_crtc_destroy,
        .page_flip = drm_atomic_helper_page_flip,
        .reset = imx_drm_crtc_reset,
        .atomic_duplicate_state = imx_drm_crtc_duplicate_state,
@@ -241,7 +250,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
        .mode_set_nofb = ipu_crtc_mode_set_nofb,
        .atomic_check = ipu_crtc_atomic_check,
        .atomic_begin = ipu_crtc_atomic_begin,
-       .disable = ipu_crtc_disable,
+       .atomic_disable = ipu_crtc_atomic_disable,
        .enable = ipu_crtc_enable,
 };
 
@@ -409,8 +418,6 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
 {
        struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
 
-       imx_drm_remove_crtc(ipu_crtc->imx_crtc);
-
        ipu_put_resources(ipu_crtc);
        if (ipu_crtc->plane[1])
                ipu_plane_put_resources(ipu_crtc->plane[1]);
index 4ad67d0..ce22d0a 100644 (file)
@@ -213,8 +213,12 @@ static void ipu_plane_enable(struct ipu_plane *ipu_plane)
                ipu_dp_enable_channel(ipu_plane->dp);
 }
 
-static void ipu_plane_disable(struct ipu_plane *ipu_plane)
+static int ipu_disable_plane(struct drm_plane *plane)
 {
+       struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
        ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
 
        if (ipu_plane->dp)
@@ -223,15 +227,6 @@ static void ipu_plane_disable(struct ipu_plane *ipu_plane)
        ipu_dmfc_disable_channel(ipu_plane->dmfc);
        if (ipu_plane->dp)
                ipu_dp_disable(ipu_plane->ipu);
-}
-
-static int ipu_disable_plane(struct drm_plane *plane)
-{
-       struct ipu_plane *ipu_plane = to_ipu_plane(plane);
-
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
-       ipu_plane_disable(ipu_plane);
 
        return 0;
 }
@@ -242,7 +237,6 @@ static void ipu_plane_destroy(struct drm_plane *plane)
 
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
-       ipu_disable_plane(plane);
        drm_plane_cleanup(plane);
        kfree(ipu_plane);
 }
@@ -319,13 +313,16 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
 
        /*
-        * since we cannot touch active IDMAC channels, we do not support
-        * resizing the enabled plane or changing its format
+        * We support resizing active plane or changing its format by
+        * forcing CRTC mode change in plane's ->atomic_check callback
+        * and disabling all affected active planes in CRTC's ->atomic_disable
+        * callback.  The planes will be reenabled in plane's ->atomic_update
+        * callback.
         */
        if (old_fb && (state->src_w != old_state->src_w ||
                              state->src_h != old_state->src_h ||
                              fb->pixel_format != old_fb->pixel_format))
-               return -EINVAL;
+               crtc_state->mode_changed = true;
 
        eba = drm_plane_state_to_eba(state);
 
@@ -336,7 +333,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
 
        if (old_fb && fb->pitches[0] != old_fb->pitches[0])
-               return -EINVAL;
+               crtc_state->mode_changed = true;
 
        switch (fb->pixel_format) {
        case DRM_FORMAT_YUV420:
@@ -372,7 +369,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
                        return -EINVAL;
 
                if (old_fb && old_fb->pitches[1] != fb->pitches[1])
-                       return -EINVAL;
+                       crtc_state->mode_changed = true;
        }
 
        return 0;
@@ -392,8 +389,12 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
        enum ipu_color_space ics;
 
        if (old_state->fb) {
-               ipu_plane_atomic_set_base(ipu_plane, old_state);
-               return;
+               struct drm_crtc_state *crtc_state = state->crtc->state;
+
+               if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
+                       ipu_plane_atomic_set_base(ipu_plane, old_state);
+                       return;
+               }
        }
 
        switch (ipu_plane->dp_flow) {
index 74b0ac0..d796ada 100644 (file)
@@ -293,8 +293,10 @@ static void imx_pd_unbind(struct device *dev, struct device *master,
 {
        struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
 
-       imxpd->encoder.funcs->destroy(&imxpd->encoder);
-       imxpd->connector.funcs->destroy(&imxpd->connector);
+       if (imxpd->bridge)
+               drm_bridge_detach(imxpd->bridge);
+       if (imxpd->panel)
+               drm_panel_detach(imxpd->panel);
 
        kfree(imxpd->edid);
 }
index 72c1ae4..cf83f65 100644 (file)
@@ -294,8 +294,8 @@ static int mtk_drm_bind(struct device *dev)
        int ret;
 
        drm = drm_dev_alloc(&mtk_drm_driver, dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
 
        drm->dev_private = private;
        private->drm = drm;
index 68268e5..919b35f 100644 (file)
@@ -150,7 +150,8 @@ static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *f
 {
        struct mgag200_bo *mgabo = mgag200_bo(bo);
 
-       return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp);
+       return drm_vma_node_verify_access(&mgabo->gem.vma_node,
+                                         filp->private_data);
 }
 
 static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
index 5df252c..73bae38 100644 (file)
@@ -112,7 +112,7 @@ static void complete_commit(struct msm_commit *c, bool async)
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_kms *kms = priv->kms;
 
-       drm_atomic_helper_wait_for_fences(dev, state);
+       drm_atomic_helper_wait_for_fences(dev, state, false);
 
        kms->funcs->prepare_commit(kms, state);
 
index 46419be..fb5c0b0 100644 (file)
@@ -348,9 +348,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
        int ret;
 
        ddev = drm_dev_alloc(drv, dev);
-       if (!ddev) {
+       if (IS_ERR(ddev)) {
                dev_err(dev, "failed to allocate drm_device\n");
-               return -ENOMEM;
+               return PTR_ERR(ddev);
        }
 
        platform_set_drvdata(pdev, ddev);
index 8ab9ce5..66f31c3 100644 (file)
@@ -1315,7 +1315,8 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-       return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
+       return drm_vma_node_verify_access(&nvbo->gem.vma_node,
+                                         filp->private_data);
 }
 
 static int
index 652ab11..3100fd8 100644 (file)
@@ -1067,8 +1067,8 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
                goto err_free;
 
        drm = drm_dev_alloc(&driver_platform, &pdev->dev);
-       if (!drm) {
-               err = -ENOMEM;
+       if (IS_ERR(drm)) {
+               err = PTR_ERR(drm);
                goto err_free;
        }
 
index ffe8853..9b728ed 100644 (file)
@@ -57,11 +57,8 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
 static int
 alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
 {
-       int ret;
-       ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
-                                        QXL_RELEASE_DRAWABLE, release,
-                                        NULL);
-       return ret;
+       return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
+                                         QXL_RELEASE_DRAWABLE, release, NULL);
 }
 
 static void
index 5e1d789..fa5440d 100644 (file)
@@ -61,7 +61,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
        if (domain == QXL_GEM_DOMAIN_VRAM)
                qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
        if (domain == QXL_GEM_DOMAIN_SURFACE)
-               qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
+               qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
        if (domain == QXL_GEM_DOMAIN_CPU)
                qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
        if (!c)
@@ -151,7 +151,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
 
        if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
                map = qdev->vram_mapping;
-       else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+       else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
                map = qdev->surface_mapping;
        else
                goto fallback;
@@ -191,7 +191,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
 
        if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
                map = qdev->vram_mapping;
-       else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+       else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
                map = qdev->surface_mapping;
        else
                goto fallback;
@@ -311,7 +311,7 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
 
 int qxl_surf_evict(struct qxl_device *qdev)
 {
-       return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+       return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
 }
 
 int qxl_vram_evict(struct qxl_device *qdev)
index f599cd0..cd83f05 100644 (file)
@@ -203,12 +203,9 @@ qxl_release_free(struct qxl_device *qdev,
 static int qxl_release_bo_alloc(struct qxl_device *qdev,
                                struct qxl_bo **bo)
 {
-       int ret;
        /* pin releases bo's they are too messy to evict */
-       ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
-                           QXL_GEM_DOMAIN_VRAM, NULL,
-                           bo);
-       return ret;
+       return qxl_bo_create(qdev, PAGE_SIZE, false, true,
+                            QXL_GEM_DOMAIN_VRAM, NULL, bo);
 }
 
 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
index 6a22de0..e26c82d 100644 (file)
@@ -168,7 +168,7 @@ static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                man->default_caching = TTM_PL_FLAG_CACHED;
                break;
        case TTM_PL_VRAM:
-       case TTM_PL_PRIV0:
+       case TTM_PL_PRIV:
                /* "On-card" video ram */
                man->func = &ttm_bo_manager_func;
                man->gpu_offset = 0;
@@ -210,7 +210,8 @@ static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct qxl_bo *qbo = to_qxl_bo(bo);
 
-       return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp);
+       return drm_vma_node_verify_access(&qbo->gem_base.vma_node,
+                                         filp->private_data);
 }
 
 static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -235,7 +236,7 @@ static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
                mem->bus.base = qdev->vram_base;
                mem->bus.offset = mem->start << PAGE_SHIFT;
                break;
-       case TTM_PL_PRIV0:
+       case TTM_PL_PRIV:
                mem->bus.is_iomem = true;
                mem->bus.base = qdev->surfaceram_base;
                mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -376,7 +377,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
        qbo = to_qxl_bo(bo);
        qdev = qbo->gem_base.dev->dev_private;
 
-       if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
+       if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
                qxl_surface_evict(qdev, qbo, new_mem ? true : false);
 }
 
@@ -422,7 +423,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
                DRM_ERROR("Failed initializing VRAM heap.\n");
                return r;
        }
-       r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
+       r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV,
                           qdev->surfaceram_size / PAGE_SIZE);
        if (r) {
                DRM_ERROR("Failed initializing Surfaces heap.\n");
@@ -445,7 +446,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
 void qxl_ttm_fini(struct qxl_device *qdev)
 {
        ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
-       ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+       ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
        ttm_bo_device_release(&qdev->mman.bdev);
        qxl_ttm_global_fini(qdev);
        DRM_INFO("qxl: ttm finalized\n");
@@ -489,7 +490,7 @@ static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
                if (i == 0)
                        qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
                else
-                       qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+                       qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
 
        }
        return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
index b1784a1..f6ff41a 100644 (file)
@@ -4193,11 +4193,7 @@ u32 cik_gfx_get_rptr(struct radeon_device *rdev,
 u32 cik_gfx_get_wptr(struct radeon_device *rdev,
                     struct radeon_ring *ring)
 {
-       u32 wptr;
-
-       wptr = RREG32(CP_RB0_WPTR);
-
-       return wptr;
+       return RREG32(CP_RB0_WPTR);
 }
 
 void cik_gfx_set_wptr(struct radeon_device *rdev,
index f25994b..f5e84f4 100644 (file)
@@ -1071,11 +1071,7 @@ u32 r100_gfx_get_rptr(struct radeon_device *rdev,
 u32 r100_gfx_get_wptr(struct radeon_device *rdev,
                      struct radeon_ring *ring)
 {
-       u32 wptr;
-
-       wptr = RREG32(RADEON_CP_RB_WPTR);
-
-       return wptr;
+       return RREG32(RADEON_CP_RB_WPTR);
 }
 
 void r100_gfx_set_wptr(struct radeon_device *rdev,
index 6406536..a951881 100644 (file)
@@ -2631,11 +2631,7 @@ u32 r600_gfx_get_rptr(struct radeon_device *rdev,
 u32 r600_gfx_get_wptr(struct radeon_device *rdev,
                      struct radeon_ring *ring)
 {
-       u32 wptr;
-
-       wptr = RREG32(R600_CP_RB_WPTR);
-
-       return wptr;
+       return RREG32(R600_CP_RB_WPTR);
 }
 
 void r600_gfx_set_wptr(struct radeon_device *rdev,
index a00dd2f..b423c01 100644 (file)
@@ -639,7 +639,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
  * Used at driver startup.
  * Returns true if virtual or false if not.
  */
-static bool radeon_device_is_virtual(void)
+bool radeon_device_is_virtual(void)
 {
 #ifdef CONFIG_X86
        return boot_cpu_has(X86_FEATURE_HYPERVISOR);
@@ -1594,7 +1594,8 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
 
        rdev = dev->dev_private;
 
-       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+           dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
                return 0;
 
        drm_kms_helper_poll_disable(dev);
@@ -1689,7 +1690,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        struct drm_crtc *crtc;
        int r;
 
-       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+           dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
                return 0;
 
        if (fbcon) {
@@ -1956,14 +1958,3 @@ static void radeon_debugfs_remove_files(struct radeon_device *rdev)
        }
 #endif
 }
-
-#if defined(CONFIG_DEBUG_FS)
-int radeon_debugfs_init(struct drm_minor *minor)
-{
-       return 0;
-}
-
-void radeon_debugfs_cleanup(struct drm_minor *minor)
-{
-}
-#endif
index 07e4493..78367ba 100644 (file)
@@ -156,11 +156,6 @@ void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
                                    unsigned long arg);
 
-#if defined(CONFIG_DEBUG_FS)
-int radeon_debugfs_init(struct drm_minor *minor);
-void radeon_debugfs_cleanup(struct drm_minor *minor);
-#endif
-
 /* atpx handler */
 #if defined(CONFIG_VGA_SWITCHEROO)
 void radeon_register_atpx_handler(void);
@@ -311,6 +306,8 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
 
 static struct drm_driver kms_driver;
 
+bool radeon_device_is_virtual(void);
+
 static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
 {
        struct apertures_struct *ap;
@@ -364,6 +361,16 @@ radeon_pci_remove(struct pci_dev *pdev)
        drm_put_dev(dev);
 }
 
+static void
+radeon_pci_shutdown(struct pci_dev *pdev)
+{
+       /* if we are running in a VM, make sure the device
+        * torn down properly on reboot/shutdown
+        */
+       if (radeon_device_is_virtual())
+               radeon_pci_remove(pdev);
+}
+
 static int radeon_pmops_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -375,6 +382,14 @@ static int radeon_pmops_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+       /* GPU comes up enabled by the bios on resume */
+       if (radeon_is_px(drm_dev)) {
+               pm_runtime_disable(dev);
+               pm_runtime_set_active(dev);
+               pm_runtime_enable(dev);
+       }
+
        return radeon_resume_kms(drm_dev, true, true);
 }
 
@@ -531,10 +546,6 @@ static struct drm_driver kms_driver = {
        .disable_vblank = radeon_disable_vblank_kms,
        .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
        .get_scanout_position = radeon_get_crtc_scanoutpos,
-#if defined(CONFIG_DEBUG_FS)
-       .debugfs_init = radeon_debugfs_init,
-       .debugfs_cleanup = radeon_debugfs_cleanup,
-#endif
        .irq_preinstall = radeon_driver_irq_preinstall_kms,
        .irq_postinstall = radeon_driver_irq_postinstall_kms,
        .irq_uninstall = radeon_driver_irq_uninstall_kms,
@@ -576,6 +587,7 @@ static struct pci_driver radeon_kms_pci_driver = {
        .id_table = pciidlist,
        .probe = radeon_pci_probe,
        .remove = radeon_pci_remove,
+       .shutdown = radeon_pci_shutdown,
        .driver.pm = &radeon_pm_ops,
 };
 
index 568e036..0daad44 100644 (file)
@@ -25,6 +25,7 @@
  */
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
@@ -46,8 +47,35 @@ struct radeon_fbdev {
        struct radeon_device *rdev;
 };
 
+static int
+radeonfb_open(struct fb_info *info, int user)
+{
+       struct radeon_fbdev *rfbdev = info->par;
+       struct radeon_device *rdev = rfbdev->rdev;
+       int ret = pm_runtime_get_sync(rdev->ddev->dev);
+       if (ret < 0 && ret != -EACCES) {
+               pm_runtime_mark_last_busy(rdev->ddev->dev);
+               pm_runtime_put_autosuspend(rdev->ddev->dev);
+               return ret;
+       }
+       return 0;
+}
+
+static int
+radeonfb_release(struct fb_info *info, int user)
+{
+       struct radeon_fbdev *rfbdev = info->par;
+       struct radeon_device *rdev = rfbdev->rdev;
+
+       pm_runtime_mark_last_busy(rdev->ddev->dev);
+       pm_runtime_put_autosuspend(rdev->ddev->dev);
+       return 0;
+}
+
 static struct fb_ops radeonfb_ops = {
        .owner = THIS_MODULE,
+       .fb_open = radeonfb_open,
+       .fb_release = radeonfb_release,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = drm_fb_helper_cfb_fillrect,
index 835563c..4388dde 100644 (file)
@@ -641,11 +641,11 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        if (rdev->family >= CHIP_CAYMAN) {
                struct radeon_fpriv *fpriv;
                struct radeon_vm *vm;
-               int r;
 
                fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
                if (unlikely(!fpriv)) {
-                       return -ENOMEM;
+                       r = -ENOMEM;
+                       goto out_suspend;
                }
 
                if (rdev->accel_working) {
@@ -653,14 +653,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        r = radeon_vm_init(rdev, vm);
                        if (r) {
                                kfree(fpriv);
-                               return r;
+                               goto out_suspend;
                        }
 
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (r) {
                                radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
-                               return r;
+                               goto out_suspend;
                        }
 
                        /* map the ib pool buffer read only into
@@ -674,15 +674,16 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        if (r) {
                                radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
-                               return r;
+                               goto out_suspend;
                        }
                }
                file_priv->driver_priv = fpriv;
        }
 
+out_suspend:
        pm_runtime_mark_last_busy(dev->dev);
        pm_runtime_put_autosuspend(dev->dev);
-       return 0;
+       return r;
 }
 
 /**
@@ -717,6 +718,8 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
                kfree(fpriv);
                file_priv->driver_priv = NULL;
        }
+       pm_runtime_mark_last_busy(dev->dev);
+       pm_runtime_put_autosuspend(dev->dev);
 }
 
 /**
@@ -733,6 +736,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
 {
        struct radeon_device *rdev = dev->dev_private;
 
+       pm_runtime_get_sync(dev->dev);
+
        mutex_lock(&rdev->gem.mutex);
        if (rdev->hyperz_filp == file_priv)
                rdev->hyperz_filp = NULL;
index 93414ac..27ee0ab 100644 (file)
@@ -237,7 +237,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 
        if (radeon_ttm_tt_has_userptr(bo->ttm))
                return -EPERM;
-       return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+       return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
+                                         filp->private_data);
 }
 
 static void radeon_move_null(struct ttm_buffer_object *bo,
index 899ef7a..73c971e 100644 (file)
@@ -316,8 +316,8 @@ static int rcar_du_probe(struct platform_device *pdev)
        rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
 
        ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
-       if (!ddev)
-               return -ENOMEM;
+       if (IS_ERR(ddev))
+               return PTR_ERR(ddev);
 
        rcdu->ddev = ddev;
        ddev->dev_private = rcdu;
index 76eaf1d..446b5d7 100644 (file)
@@ -143,8 +143,8 @@ static int rockchip_drm_bind(struct device *dev)
        int ret;
 
        drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
-       if (!drm_dev)
-               return -ENOMEM;
+       if (IS_ERR(drm_dev))
+               return PTR_ERR(drm_dev);
 
        dev_set_drvdata(dev, drm_dev);
 
index 7cd3804..49ed3c4 100644 (file)
@@ -365,8 +365,8 @@ static int sti_bind(struct device *dev)
        int ret;
 
        ddev = drm_dev_alloc(&sti_driver, dev);
-       if (!ddev)
-               return -ENOMEM;
+       if (IS_ERR(ddev))
+               return PTR_ERR(ddev);
 
        ddev->platformdev = to_platform_device(dev);
 
index 9059e3e..0da9862 100644 (file)
@@ -121,8 +121,8 @@ static int sun4i_drv_bind(struct device *dev)
        int ret;
 
        drm = drm_dev_alloc(&sun4i_drv_driver, dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
 
        drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
        if (!drv) {
index 4b9f1c7..8ab47b5 100644 (file)
@@ -983,8 +983,8 @@ static int host1x_drm_probe(struct host1x_device *dev)
        int err;
 
        drm = drm_dev_alloc(driver, &dev->dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
 
        dev_set_drvdata(&dev->dev, drm);
 
index 2087689..cb9df10 100644 (file)
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "drm_flip_work.h"
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_plane_helper.h>
 
 #include "tilcdc_drv.h"
 #include "tilcdc_regs.h"
index c2a30bd..fc6217d 100644 (file)
@@ -57,14 +57,14 @@ static struct attribute ttm_bo_count = {
 static inline int ttm_mem_type_from_place(const struct ttm_place *place,
                                          uint32_t *mem_type)
 {
-       int i;
+       int pos;
 
-       for (i = 0; i <= TTM_PL_PRIV5; i++)
-               if (place->flags & (1 << i)) {
-                       *mem_type = i;
-                       return 0;
-               }
-       return -EINVAL;
+       pos = ffs(place->flags & TTM_PL_MASK_MEM);
+       if (unlikely(!pos))
+               return -EINVAL;
+
+       *mem_type = pos - 1;
+       return 0;
 }
 
 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
index bef9f6f..cec4b4b 100644 (file)
@@ -858,7 +858,6 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
        if (count) {
                d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
                ttm->pages[index] = d_page->p;
-               ttm_dma->cpu_address[index] = d_page->vaddr;
                ttm_dma->dma_address[index] = d_page->dma;
                list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
                r = 0;
@@ -989,7 +988,6 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
        INIT_LIST_HEAD(&ttm_dma->pages_list);
        for (i = 0; i < ttm->num_pages; i++) {
                ttm->pages[i] = NULL;
-               ttm_dma->cpu_address[i] = 0;
                ttm_dma->dma_address[i] = 0;
        }
 
index bc5aa57..aee3c00 100644 (file)
@@ -57,10 +57,8 @@ static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
 {
        ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
                                          sizeof(*ttm->ttm.pages) +
-                                         sizeof(*ttm->dma_address) +
-                                         sizeof(*ttm->cpu_address));
-       ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
-       ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
+                                         sizeof(*ttm->dma_address));
+       ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
 }
 
 #ifdef CONFIG_X86
@@ -244,7 +242,6 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
 
        drm_free_large(ttm->pages);
        ttm->pages = NULL;
-       ttm_dma->cpu_address = NULL;
        ttm_dma->dma_address = NULL;
 }
 EXPORT_SYMBOL(ttm_dma_tt_fini);
index f0851db..cc45d98 100644 (file)
@@ -86,8 +86,8 @@ static int udl_usb_probe(struct usb_interface *interface,
        int r;
 
        dev = drm_dev_alloc(&driver, &interface->dev);
-       if (!dev)
-               return -ENOMEM;
+       if (IS_ERR(dev))
+               return PTR_ERR(dev);
 
        r = drm_dev_register(dev, (unsigned long)udev);
        if (r)
index deec535..3c9e7f6 100644 (file)
@@ -233,8 +233,8 @@ static int vc4_drm_bind(struct device *dev)
                return -ENOMEM;
 
        drm = drm_dev_alloc(&vc4_drm_driver, dev);
-       if (!drm)
-               return -ENOMEM;
+       if (IS_ERR(drm))
+               return PTR_ERR(drm);
        platform_set_drvdata(pdev, drm);
        vc4->dev = drm;
        drm->dev_private = vc4;
index c15bafb..f36c147 100644 (file)
@@ -334,8 +334,8 @@ static int __init vgem_init(void)
        int ret;
 
        vgem_device = drm_dev_alloc(&vgem_driver, NULL);
-       if (!vgem_device) {
-               ret = -ENOMEM;
+       if (IS_ERR(vgem_device)) {
+               ret = PTR_ERR(vgem_device);
                goto out;
        }
 
index a59d0e3..26197dd 100644 (file)
@@ -54,8 +54,8 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
        int ret;
 
        dev = drm_dev_alloc(driver, &vdev->dev);
-       if (!dev)
-               return -ENOMEM;
+       if (IS_ERR(dev))
+               return PTR_ERR(dev);
        dev->virtdev = vdev;
        vdev->priv = dev;
 
index 74304b0..070d750 100644 (file)
                        VMWGFX_NUM_GB_SURFACE +\
                        VMWGFX_NUM_GB_SCREEN_TARGET)
 
-#define VMW_PL_GMR TTM_PL_PRIV0
-#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
-#define VMW_PL_MOB TTM_PL_PRIV1
-#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
+#define VMW_PL_GMR (TTM_PL_PRIV + 0)
+#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0)
+#define VMW_PL_MOB (TTM_PL_PRIV + 1)
+#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1)
 
 #define VMW_RES_CONTEXT ttm_driver_type0
 #define VMW_RES_SURFACE ttm_driver_type1
index 107ec23..5f96141 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
 
 imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
-               ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-smfc.o
+               ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-image-convert.o \
+               ipu-smfc.o ipu-vdi.o
index d230988..b9539f7 100644 (file)
@@ -730,6 +730,137 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
 }
 EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
 
+
+/* Frame Synchronization Unit Channel Linking */
+
+struct fsu_link_reg_info {
+       int chno;
+       u32 reg;
+       u32 mask;
+       u32 val;
+};
+
+struct fsu_link_info {
+       struct fsu_link_reg_info src;
+       struct fsu_link_reg_info sink;
+};
+
+static const struct fsu_link_info fsu_link_info[] = {
+       {
+               .src  = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
+                         FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
+               .sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
+                         FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
+       }, {
+               .src =  { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
+                         FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
+               .sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
+                         FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
+       }, {
+               .src =  { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
+                         FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
+               .sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
+                         FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
+       }, {
+               .src =  { IPUV3_CHANNEL_CSI_DIRECT, 0 },
+               .sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
+                         FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
+       },
+};
+
+static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
+               if (src == fsu_link_info[i].src.chno &&
+                   sink == fsu_link_info[i].sink.chno)
+                       return &fsu_link_info[i];
+       }
+
+       return NULL;
+}
+
+/*
+ * Links a source channel to a sink channel in the FSU.
+ */
+int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
+{
+       const struct fsu_link_info *link;
+       u32 src_reg, sink_reg;
+       unsigned long flags;
+
+       link = find_fsu_link_info(src_ch, sink_ch);
+       if (!link)
+               return -EINVAL;
+
+       spin_lock_irqsave(&ipu->lock, flags);
+
+       if (link->src.mask) {
+               src_reg = ipu_cm_read(ipu, link->src.reg);
+               src_reg &= ~link->src.mask;
+               src_reg |= link->src.val;
+               ipu_cm_write(ipu, src_reg, link->src.reg);
+       }
+
+       if (link->sink.mask) {
+               sink_reg = ipu_cm_read(ipu, link->sink.reg);
+               sink_reg &= ~link->sink.mask;
+               sink_reg |= link->sink.val;
+               ipu_cm_write(ipu, sink_reg, link->sink.reg);
+       }
+
+       spin_unlock_irqrestore(&ipu->lock, flags);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fsu_link);
+
+/*
+ * Unlinks source and sink channels in the FSU.
+ */
+int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
+{
+       const struct fsu_link_info *link;
+       u32 src_reg, sink_reg;
+       unsigned long flags;
+
+       link = find_fsu_link_info(src_ch, sink_ch);
+       if (!link)
+               return -EINVAL;
+
+       spin_lock_irqsave(&ipu->lock, flags);
+
+       if (link->src.mask) {
+               src_reg = ipu_cm_read(ipu, link->src.reg);
+               src_reg &= ~link->src.mask;
+               ipu_cm_write(ipu, src_reg, link->src.reg);
+       }
+
+       if (link->sink.mask) {
+               sink_reg = ipu_cm_read(ipu, link->sink.reg);
+               sink_reg &= ~link->sink.mask;
+               ipu_cm_write(ipu, sink_reg, link->sink.reg);
+       }
+
+       spin_unlock_irqrestore(&ipu->lock, flags);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
+
+/* Link IDMAC channels in the FSU */
+int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
+{
+       return ipu_fsu_link(src->ipu, src->num, sink->num);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_link);
+
+/* Unlink IDMAC channels in the FSU */
+int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
+{
+       return ipu_fsu_unlink(src->ipu, src->num, sink->num);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
+
 struct ipu_devtype {
        const char *name;
        unsigned long cm_ofs;
@@ -839,6 +970,20 @@ static int ipu_submodules_init(struct ipu_soc *ipu,
                goto err_ic;
        }
 
+       ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
+                          IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
+                          IPU_CONF_IC_INPUT);
+       if (ret) {
+               unit = "vdi";
+               goto err_vdi;
+       }
+
+       ret = ipu_image_convert_init(ipu, dev);
+       if (ret) {
+               unit = "image_convert";
+               goto err_image_convert;
+       }
+
        ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
                          IPU_CONF_DI0_EN, ipu_clk);
        if (ret) {
@@ -893,6 +1038,10 @@ err_dc:
 err_di_1:
        ipu_di_exit(ipu, 0);
 err_di_0:
+       ipu_image_convert_exit(ipu);
+err_image_convert:
+       ipu_vdi_exit(ipu);
+err_vdi:
        ipu_ic_exit(ipu);
 err_ic:
        ipu_csi_exit(ipu, 1);
@@ -977,6 +1126,8 @@ static void ipu_submodules_exit(struct ipu_soc *ipu)
        ipu_dc_exit(ipu);
        ipu_di_exit(ipu, 1);
        ipu_di_exit(ipu, 0);
+       ipu_image_convert_exit(ipu);
+       ipu_vdi_exit(ipu);
        ipu_ic_exit(ipu);
        ipu_csi_exit(ipu, 1);
        ipu_csi_exit(ipu, 0);
@@ -1213,8 +1364,6 @@ EXPORT_SYMBOL_GPL(ipu_dump);
 
 static int ipu_probe(struct platform_device *pdev)
 {
-       const struct of_device_id *of_id =
-                       of_match_device(imx_ipu_dt_ids, &pdev->dev);
        struct device_node *np = pdev->dev.of_node;
        struct ipu_soc *ipu;
        struct resource *res;
@@ -1222,7 +1371,9 @@ static int ipu_probe(struct platform_device *pdev)
        int i, ret, irq_sync, irq_err;
        const struct ipu_devtype *devtype;
 
-       devtype = of_id->data;
+       devtype = of_device_get_match_data(&pdev->dev);
+       if (!devtype)
+               return -EINVAL;
 
        irq_sync = platform_get_irq(pdev, 0);
        irq_err = platform_get_irq(pdev, 1);
index 42705bb..a40f211 100644 (file)
@@ -123,20 +123,6 @@ int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc)
 }
 EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
 
-static void ipu_dmfc_wait_fifos(struct ipu_dmfc_priv *priv)
-{
-       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
-       while ((readl(priv->base + DMFC_STAT) & 0x02fff000) != 0x02fff000) {
-               if (time_after(jiffies, timeout)) {
-                       dev_warn(priv->dev,
-                                "Timeout waiting for DMFC FIFOs to clear\n");
-                       break;
-               }
-               cpu_relax();
-       }
-}
-
 void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
 {
        struct ipu_dmfc_priv *priv = dmfc->priv;
@@ -145,10 +131,8 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
 
        priv->use_count--;
 
-       if (!priv->use_count) {
-               ipu_dmfc_wait_fifos(priv);
+       if (!priv->use_count)
                ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
-       }
 
        if (priv->use_count < 0)
                priv->use_count = 0;
index 1a37afc..321eb98 100644 (file)
@@ -619,7 +619,7 @@ int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
        ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2);
        ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3);
 
-       if (rot >= IPU_ROTATE_90_RIGHT)
+       if (ipu_rot_mode_is_irt(rot))
                ic->rotation = true;
 
 unlock:
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
new file mode 100644 (file)
index 0000000..2ba7d43
--- /dev/null
@@ -0,0 +1,1709 @@
+/*
+ * Copyright (C) 2012-2016 Mentor Graphics Inc.
+ *
+ * Queued image conversion support, with tiling and rotation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <video/imx-ipu-image-convert.h>
+#include "ipu-prv.h"
+
+/*
+ * The IC Resizer has a restriction that the output frame from the
+ * resizer must be 1024 or less in both width (pixels) and height
+ * (lines).
+ *
+ * The image converter attempts to split up a conversion when
+ * the desired output (converted) frame resolution exceeds the
+ * IC resizer limit of 1024 in either dimension.
+ *
+ * If either dimension of the output frame exceeds the limit, the
+ * dimension is split into 1, 2, or 4 equal stripes, for a maximum
+ * of 4*4 or 16 tiles. A conversion is then carried out for each
+ * tile (but taking care to pass the full frame stride length to
+ * the DMA channel's parameter memory!). IDMA double-buffering is used
+ * to convert each tile back-to-back when possible (see note below
+ * when double_buffering boolean is set).
+ *
+ * Note that the input frame must be split up into the same number
+ * of tiles as the output frame.
+ *
+ * FIXME: at this point there is no attempt to deal with visible seams
+ * at the tile boundaries when upscaling. The seams are caused by a reset
+ * of the bilinear upscale interpolation when starting a new tile. The
+ * seams are barely visible for small upscale factors, but become
+ * increasingly visible as the upscale factor gets larger, since more
+ * interpolated pixels get thrown out at the tile boundaries. A possilble
+ * fix might be to overlap tiles of different sizes, but this must be done
+ * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
+ * alignment restrictions of each tile.
+ */
+
+#define MAX_STRIPES_W    4
+#define MAX_STRIPES_H    4
+#define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
+
+#define MIN_W     16
+#define MIN_H     8
+#define MAX_W     4096
+#define MAX_H     4096
+
+enum ipu_image_convert_type {
+       IMAGE_CONVERT_IN = 0,
+       IMAGE_CONVERT_OUT,
+};
+
+struct ipu_image_convert_dma_buf {
+       void          *virt;
+       dma_addr_t    phys;
+       unsigned long len;
+};
+
+struct ipu_image_convert_dma_chan {
+       int in;
+       int out;
+       int rot_in;
+       int rot_out;
+       int vdi_in_p;
+       int vdi_in;
+       int vdi_in_n;
+};
+
+/* dimensions of one tile */
+struct ipu_image_tile {
+       u32 width;
+       u32 height;
+       /* size and strides are in bytes */
+       u32 size;
+       u32 stride;
+       u32 rot_stride;
+       /* start Y or packed offset of this tile */
+       u32 offset;
+       /* offset from start to tile in U plane, for planar formats */
+       u32 u_off;
+       /* offset from start to tile in V plane, for planar formats */
+       u32 v_off;
+};
+
+struct ipu_image_convert_image {
+       struct ipu_image base;
+       enum ipu_image_convert_type type;
+
+       const struct ipu_image_pixfmt *fmt;
+       unsigned int stride;
+
+       /* # of rows (horizontal stripes) if dest height is > 1024 */
+       unsigned int num_rows;
+       /* # of columns (vertical stripes) if dest width is > 1024 */
+       unsigned int num_cols;
+
+       struct ipu_image_tile tile[MAX_TILES];
+};
+
+struct ipu_image_pixfmt {
+       u32     fourcc;        /* V4L2 fourcc */
+       int     bpp;           /* total bpp */
+       int     uv_width_dec;  /* decimation in width for U/V planes */
+       int     uv_height_dec; /* decimation in height for U/V planes */
+       bool    planar;        /* planar format */
+       bool    uv_swapped;    /* U and V planes are swapped */
+       bool    uv_packed;     /* partial planar (U and V in same plane) */
+};
+
+struct ipu_image_convert_ctx;
+struct ipu_image_convert_chan;
+struct ipu_image_convert_priv;
+
+struct ipu_image_convert_ctx {
+       struct ipu_image_convert_chan *chan;
+
+       ipu_image_convert_cb_t complete;
+       void *complete_context;
+
+       /* Source/destination image data and rotation mode */
+       struct ipu_image_convert_image in;
+       struct ipu_image_convert_image out;
+       enum ipu_rotate_mode rot_mode;
+
+       /* intermediate buffer for rotation */
+       struct ipu_image_convert_dma_buf rot_intermediate[2];
+
+       /* current buffer number for double buffering */
+       int cur_buf_num;
+
+       bool aborting;
+       struct completion aborted;
+
+       /* can we use double-buffering for this conversion operation? */
+       bool double_buffering;
+       /* num_rows * num_cols */
+       unsigned int num_tiles;
+       /* next tile to process */
+       unsigned int next_tile;
+       /* where to place converted tile in dest image */
+       unsigned int out_tile_map[MAX_TILES];
+
+       struct list_head list;
+};
+
+struct ipu_image_convert_chan {
+       struct ipu_image_convert_priv *priv;
+
+       enum ipu_ic_task ic_task;
+       const struct ipu_image_convert_dma_chan *dma_ch;
+
+       struct ipu_ic *ic;
+       struct ipuv3_channel *in_chan;
+       struct ipuv3_channel *out_chan;
+       struct ipuv3_channel *rotation_in_chan;
+       struct ipuv3_channel *rotation_out_chan;
+
+       /* the IPU end-of-frame irqs */
+       int out_eof_irq;
+       int rot_out_eof_irq;
+
+       spinlock_t irqlock;
+
+       /* list of convert contexts */
+       struct list_head ctx_list;
+       /* queue of conversion runs */
+       struct list_head pending_q;
+       /* queue of completed runs */
+       struct list_head done_q;
+
+       /* the current conversion run */
+       struct ipu_image_convert_run *current_run;
+};
+
+struct ipu_image_convert_priv {
+       struct ipu_image_convert_chan chan[IC_NUM_TASKS];
+       struct ipu_soc *ipu;
+};
+
+static const struct ipu_image_convert_dma_chan
+image_convert_dma_chan[IC_NUM_TASKS] = {
+       [IC_TASK_VIEWFINDER] = {
+               .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
+               .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
+               .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
+               .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
+               .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
+               .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
+               .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
+       },
+       [IC_TASK_POST_PROCESSOR] = {
+               .in = IPUV3_CHANNEL_MEM_IC_PP,
+               .out = IPUV3_CHANNEL_IC_PP_MEM,
+               .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
+               .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
+       },
+};
+
+static const struct ipu_image_pixfmt image_convert_formats[] = {
+       {
+               .fourcc = V4L2_PIX_FMT_RGB565,
+               .bpp    = 16,
+       }, {
+               .fourcc = V4L2_PIX_FMT_RGB24,
+               .bpp    = 24,
+       }, {
+               .fourcc = V4L2_PIX_FMT_BGR24,
+               .bpp    = 24,
+       }, {
+               .fourcc = V4L2_PIX_FMT_RGB32,
+               .bpp    = 32,
+       }, {
+               .fourcc = V4L2_PIX_FMT_BGR32,
+               .bpp    = 32,
+       }, {
+               .fourcc = V4L2_PIX_FMT_YUYV,
+               .bpp    = 16,
+               .uv_width_dec = 2,
+               .uv_height_dec = 1,
+       }, {
+               .fourcc = V4L2_PIX_FMT_UYVY,
+               .bpp    = 16,
+               .uv_width_dec = 2,
+               .uv_height_dec = 1,
+       }, {
+               .fourcc = V4L2_PIX_FMT_YUV420,
+               .bpp    = 12,
+               .planar = true,
+               .uv_width_dec = 2,
+               .uv_height_dec = 2,
+       }, {
+               .fourcc = V4L2_PIX_FMT_YVU420,
+               .bpp    = 12,
+               .planar = true,
+               .uv_width_dec = 2,
+               .uv_height_dec = 2,
+               .uv_swapped = true,
+       }, {
+               .fourcc = V4L2_PIX_FMT_NV12,
+               .bpp    = 12,
+               .planar = true,
+               .uv_width_dec = 2,
+               .uv_height_dec = 2,
+               .uv_packed = true,
+       }, {
+               .fourcc = V4L2_PIX_FMT_YUV422P,
+               .bpp    = 16,
+               .planar = true,
+               .uv_width_dec = 2,
+               .uv_height_dec = 1,
+       }, {
+               .fourcc = V4L2_PIX_FMT_NV16,
+               .bpp    = 16,
+               .planar = true,
+               .uv_width_dec = 2,
+               .uv_height_dec = 1,
+               .uv_packed = true,
+       },
+};
+
+static const struct ipu_image_pixfmt *get_format(u32 fourcc)
+{
+       const struct ipu_image_pixfmt *ret = NULL;
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
+               if (image_convert_formats[i].fourcc == fourcc) {
+                       ret = &image_convert_formats[i];
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+static void dump_format(struct ipu_image_convert_ctx *ctx,
+                       struct ipu_image_convert_image *ic_image)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+
+       dev_dbg(priv->ipu->dev,
+               "task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
+               chan->ic_task, ctx,
+               ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
+               ic_image->base.pix.width, ic_image->base.pix.height,
+               ic_image->num_cols, ic_image->num_rows,
+               ic_image->tile[0].width, ic_image->tile[0].height,
+               ic_image->fmt->fourcc & 0xff,
+               (ic_image->fmt->fourcc >> 8) & 0xff,
+               (ic_image->fmt->fourcc >> 16) & 0xff,
+               (ic_image->fmt->fourcc >> 24) & 0xff);
+}
+
+int ipu_image_convert_enum_format(int index, u32 *fourcc)
+{
+       const struct ipu_image_pixfmt *fmt;
+
+       if (index >= (int)ARRAY_SIZE(image_convert_formats))
+               return -EINVAL;
+
+       /* Format found */
+       fmt = &image_convert_formats[index];
+       *fourcc = fmt->fourcc;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
+
+static void free_dma_buf(struct ipu_image_convert_priv *priv,
+                        struct ipu_image_convert_dma_buf *buf)
+{
+       if (buf->virt)
+               dma_free_coherent(priv->ipu->dev,
+                                 buf->len, buf->virt, buf->phys);
+       buf->virt = NULL;
+       buf->phys = 0;
+}
+
+static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
+                        struct ipu_image_convert_dma_buf *buf,
+                        int size)
+{
+       buf->len = PAGE_ALIGN(size);
+       buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
+                                      GFP_DMA | GFP_KERNEL);
+       if (!buf->virt) {
+               dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static inline int num_stripes(int dim)
+{
+       if (dim <= 1024)
+               return 1;
+       else if (dim <= 2048)
+               return 2;
+       else
+               return 4;
+}
+
+static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
+                                struct ipu_image_convert_image *image)
+{
+       int i;
+
+       for (i = 0; i < ctx->num_tiles; i++) {
+               struct ipu_image_tile *tile = &image->tile[i];
+
+               tile->height = image->base.pix.height / image->num_rows;
+               tile->width = image->base.pix.width / image->num_cols;
+               tile->size = ((tile->height * image->fmt->bpp) >> 3) *
+                       tile->width;
+
+               if (image->fmt->planar) {
+                       tile->stride = tile->width;
+                       tile->rot_stride = tile->height;
+               } else {
+                       tile->stride =
+                               (image->fmt->bpp * tile->width) >> 3;
+                       tile->rot_stride =
+                               (image->fmt->bpp * tile->height) >> 3;
+               }
+       }
+}
+
+/*
+ * Use the rotation transformation to find the tile coordinates
+ * (row, col) of a tile in the destination frame that corresponds
+ * to the given tile coordinates of a source frame. The destination
+ * coordinate is then converted to a tile index.
+ */
+static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
+                               int src_row, int src_col)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_image *s_image = &ctx->in;
+       struct ipu_image_convert_image *d_image = &ctx->out;
+       int dst_row, dst_col;
+
+       /* with no rotation it's a 1:1 mapping */
+       if (ctx->rot_mode == IPU_ROTATE_NONE)
+               return src_row * s_image->num_cols + src_col;
+
+       /*
+        * before doing the transform, first we have to translate
+        * source row,col for an origin in the center of s_image
+        */
+       src_row = src_row * 2 - (s_image->num_rows - 1);
+       src_col = src_col * 2 - (s_image->num_cols - 1);
+
+       /* do the rotation transform */
+       if (ctx->rot_mode & IPU_ROT_BIT_90) {
+               dst_col = -src_row;
+               dst_row = src_col;
+       } else {
+               dst_col = src_col;
+               dst_row = src_row;
+       }
+
+       /* apply flip */
+       if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
+               dst_col = -dst_col;
+       if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
+               dst_row = -dst_row;
+
+       dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
+               chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
+
+       /*
+        * finally translate dest row,col using an origin in upper
+        * left of d_image
+        */
+       dst_row += d_image->num_rows - 1;
+       dst_col += d_image->num_cols - 1;
+       dst_row /= 2;
+       dst_col /= 2;
+
+       return dst_row * d_image->num_cols + dst_col;
+}
+
+/*
+ * Fill the out_tile_map[] with transformed destination tile indeces.
+ */
+static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
+{
+       struct ipu_image_convert_image *s_image = &ctx->in;
+       unsigned int row, col, tile = 0;
+
+       for (row = 0; row < s_image->num_rows; row++) {
+               for (col = 0; col < s_image->num_cols; col++) {
+                       ctx->out_tile_map[tile] =
+                               transform_tile_index(ctx, row, col);
+                       tile++;
+               }
+       }
+}
+
+static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
+                                    struct ipu_image_convert_image *image)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       const struct ipu_image_pixfmt *fmt = image->fmt;
+       unsigned int row, col, tile = 0;
+       u32 H, w, h, y_stride, uv_stride;
+       u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
+       u32 y_row_off, y_col_off, y_off;
+       u32 y_size, uv_size;
+
+       /* setup some convenience vars */
+       H = image->base.pix.height;
+
+       y_stride = image->stride;
+       uv_stride = y_stride / fmt->uv_width_dec;
+       if (fmt->uv_packed)
+               uv_stride *= 2;
+
+       y_size = H * y_stride;
+       uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
+
+       for (row = 0; row < image->num_rows; row++) {
+               w = image->tile[tile].width;
+               h = image->tile[tile].height;
+               y_row_off = row * h * y_stride;
+               uv_row_off = (row * h * uv_stride) / fmt->uv_height_dec;
+
+               for (col = 0; col < image->num_cols; col++) {
+                       y_col_off = col * w;
+                       uv_col_off = y_col_off / fmt->uv_width_dec;
+                       if (fmt->uv_packed)
+                               uv_col_off *= 2;
+
+                       y_off = y_row_off + y_col_off;
+                       uv_off = uv_row_off + uv_col_off;
+
+                       u_off = y_size - y_off + uv_off;
+                       v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
+                       if (fmt->uv_swapped) {
+                               tmp = u_off;
+                               u_off = v_off;
+                               v_off = tmp;
+                       }
+
+                       image->tile[tile].offset = y_off;
+                       image->tile[tile].u_off = u_off;
+                       image->tile[tile++].v_off = v_off;
+
+                       dev_dbg(priv->ipu->dev,
+                               "task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
+                               chan->ic_task, ctx,
+                               image->type == IMAGE_CONVERT_IN ?
+                               "Input" : "Output", row, col,
+                               y_off, u_off, v_off);
+               }
+       }
+}
+
+static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
+                                    struct ipu_image_convert_image *image)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       const struct ipu_image_pixfmt *fmt = image->fmt;
+       unsigned int row, col, tile = 0;
+       u32 w, h, bpp, stride;
+       u32 row_off, col_off;
+
+       /* setup some convenience vars */
+       stride = image->stride;
+       bpp = fmt->bpp;
+
+       for (row = 0; row < image->num_rows; row++) {
+               w = image->tile[tile].width;
+               h = image->tile[tile].height;
+               row_off = row * h * stride;
+
+               for (col = 0; col < image->num_cols; col++) {
+                       col_off = (col * w * bpp) >> 3;
+
+                       image->tile[tile].offset = row_off + col_off;
+                       image->tile[tile].u_off = 0;
+                       image->tile[tile++].v_off = 0;
+
+                       dev_dbg(priv->ipu->dev,
+                               "task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
+                               chan->ic_task, ctx,
+                               image->type == IMAGE_CONVERT_IN ?
+                               "Input" : "Output", row, col,
+                               row_off + col_off);
+               }
+       }
+}
+
+static void calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
+                             struct ipu_image_convert_image *image)
+{
+       if (image->fmt->planar)
+               calc_tile_offsets_planar(ctx, image);
+       else
+               calc_tile_offsets_packed(ctx, image);
+}
+
+/*
+ * return the number of runs in given queue (pending_q or done_q)
+ * for this context. hold irqlock when calling.
+ */
+static int get_run_count(struct ipu_image_convert_ctx *ctx,
+                        struct list_head *q)
+{
+       struct ipu_image_convert_run *run;
+       int count = 0;
+
+       lockdep_assert_held(&ctx->chan->irqlock);
+
+       list_for_each_entry(run, q, list) {
+               if (run->ctx == ctx)
+                       count++;
+       }
+
+       return count;
+}
+
+static void convert_stop(struct ipu_image_convert_run *run)
+{
+       struct ipu_image_convert_ctx *ctx = run->ctx;
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
+               __func__, chan->ic_task, ctx, run);
+
+       /* disable IC tasks and the channels */
+       ipu_ic_task_disable(chan->ic);
+       ipu_idmac_disable_channel(chan->in_chan);
+       ipu_idmac_disable_channel(chan->out_chan);
+
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               ipu_idmac_disable_channel(chan->rotation_in_chan);
+               ipu_idmac_disable_channel(chan->rotation_out_chan);
+               ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
+       }
+
+       ipu_ic_disable(chan->ic);
+}
+
+static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
+                              struct ipuv3_channel *channel,
+                              struct ipu_image_convert_image *image,
+                              enum ipu_rotate_mode rot_mode,
+                              bool rot_swap_width_height)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       unsigned int burst_size;
+       u32 width, height, stride;
+       dma_addr_t addr0, addr1 = 0;
+       struct ipu_image tile_image;
+       unsigned int tile_idx[2];
+
+       if (image->type == IMAGE_CONVERT_OUT) {
+               tile_idx[0] = ctx->out_tile_map[0];
+               tile_idx[1] = ctx->out_tile_map[1];
+       } else {
+               tile_idx[0] = 0;
+               tile_idx[1] = 1;
+       }
+
+       if (rot_swap_width_height) {
+               width = image->tile[0].height;
+               height = image->tile[0].width;
+               stride = image->tile[0].rot_stride;
+               addr0 = ctx->rot_intermediate[0].phys;
+               if (ctx->double_buffering)
+                       addr1 = ctx->rot_intermediate[1].phys;
+       } else {
+               width = image->tile[0].width;
+               height = image->tile[0].height;
+               stride = image->stride;
+               addr0 = image->base.phys0 +
+                       image->tile[tile_idx[0]].offset;
+               if (ctx->double_buffering)
+                       addr1 = image->base.phys0 +
+                               image->tile[tile_idx[1]].offset;
+       }
+
+       ipu_cpmem_zero(channel);
+
+       memset(&tile_image, 0, sizeof(tile_image));
+       tile_image.pix.width = tile_image.rect.width = width;
+       tile_image.pix.height = tile_image.rect.height = height;
+       tile_image.pix.bytesperline = stride;
+       tile_image.pix.pixelformat =  image->fmt->fourcc;
+       tile_image.phys0 = addr0;
+       tile_image.phys1 = addr1;
+       ipu_cpmem_set_image(channel, &tile_image);
+
+       if (image->fmt->planar && !rot_swap_width_height)
+               ipu_cpmem_set_uv_offset(channel,
+                                       image->tile[tile_idx[0]].u_off,
+                                       image->tile[tile_idx[0]].v_off);
+
+       if (rot_mode)
+               ipu_cpmem_set_rotation(channel, rot_mode);
+
+       if (channel == chan->rotation_in_chan ||
+           channel == chan->rotation_out_chan) {
+               burst_size = 8;
+               ipu_cpmem_set_block_mode(channel);
+       } else
+               burst_size = (width % 16) ? 8 : 16;
+
+       ipu_cpmem_set_burstsize(channel, burst_size);
+
+       ipu_ic_task_idma_init(chan->ic, channel, width, height,
+                             burst_size, rot_mode);
+
+       ipu_cpmem_set_axi_id(channel, 1);
+
+       ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
+}
+
+static int convert_start(struct ipu_image_convert_run *run)
+{
+       struct ipu_image_convert_ctx *ctx = run->ctx;
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_image *s_image = &ctx->in;
+       struct ipu_image_convert_image *d_image = &ctx->out;
+       enum ipu_color_space src_cs, dest_cs;
+       unsigned int dest_width, dest_height;
+       int ret;
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p\n",
+               __func__, chan->ic_task, ctx, run);
+
+       src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
+       dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
+
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               /* swap width/height for resizer */
+               dest_width = d_image->tile[0].height;
+               dest_height = d_image->tile[0].width;
+       } else {
+               dest_width = d_image->tile[0].width;
+               dest_height = d_image->tile[0].height;
+       }
+
+       /* setup the IC resizer and CSC */
+       ret = ipu_ic_task_init(chan->ic,
+                              s_image->tile[0].width,
+                              s_image->tile[0].height,
+                              dest_width,
+                              dest_height,
+                              src_cs, dest_cs);
+       if (ret) {
+               dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
+               return ret;
+       }
+
+       /* init the source MEM-->IC PP IDMAC channel */
+       init_idmac_channel(ctx, chan->in_chan, s_image,
+                          IPU_ROTATE_NONE, false);
+
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               /* init the IC PP-->MEM IDMAC channel */
+               init_idmac_channel(ctx, chan->out_chan, d_image,
+                                  IPU_ROTATE_NONE, true);
+
+               /* init the MEM-->IC PP ROT IDMAC channel */
+               init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
+                                  ctx->rot_mode, true);
+
+               /* init the destination IC PP ROT-->MEM IDMAC channel */
+               init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
+                                  IPU_ROTATE_NONE, false);
+
+               /* now link IC PP-->MEM to MEM-->IC PP ROT */
+               ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
+       } else {
+               /* init the destination IC PP-->MEM IDMAC channel */
+               init_idmac_channel(ctx, chan->out_chan, d_image,
+                                  ctx->rot_mode, false);
+       }
+
+       /* enable the IC */
+       ipu_ic_enable(chan->ic);
+
+       /* set buffers ready */
+       ipu_idmac_select_buffer(chan->in_chan, 0);
+       ipu_idmac_select_buffer(chan->out_chan, 0);
+       if (ipu_rot_mode_is_irt(ctx->rot_mode))
+               ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
+       if (ctx->double_buffering) {
+               ipu_idmac_select_buffer(chan->in_chan, 1);
+               ipu_idmac_select_buffer(chan->out_chan, 1);
+               if (ipu_rot_mode_is_irt(ctx->rot_mode))
+                       ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
+       }
+
+       /* enable the channels! */
+       ipu_idmac_enable_channel(chan->in_chan);
+       ipu_idmac_enable_channel(chan->out_chan);
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               ipu_idmac_enable_channel(chan->rotation_in_chan);
+               ipu_idmac_enable_channel(chan->rotation_out_chan);
+       }
+
+       ipu_ic_task_enable(chan->ic);
+
+       ipu_cpmem_dump(chan->in_chan);
+       ipu_cpmem_dump(chan->out_chan);
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               ipu_cpmem_dump(chan->rotation_in_chan);
+               ipu_cpmem_dump(chan->rotation_out_chan);
+       }
+
+       ipu_dump(priv->ipu);
+
+       return 0;
+}
+
+/* hold irqlock when calling */
+static int do_run(struct ipu_image_convert_run *run)
+{
+       struct ipu_image_convert_ctx *ctx = run->ctx;
+       struct ipu_image_convert_chan *chan = ctx->chan;
+
+       lockdep_assert_held(&chan->irqlock);
+
+       ctx->in.base.phys0 = run->in_phys;
+       ctx->out.base.phys0 = run->out_phys;
+
+       ctx->cur_buf_num = 0;
+       ctx->next_tile = 1;
+
+       /* remove run from pending_q and set as current */
+       list_del(&run->list);
+       chan->current_run = run;
+
+       return convert_start(run);
+}
+
+/* hold irqlock when calling */
+static void run_next(struct ipu_image_convert_chan *chan)
+{
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_run *run, *tmp;
+       int ret;
+
+       lockdep_assert_held(&chan->irqlock);
+
+       list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
+               /* skip contexts that are aborting */
+               if (run->ctx->aborting) {
+                       dev_dbg(priv->ipu->dev,
+                               "%s: task %u: skipping aborting ctx %p run %p\n",
+                               __func__, chan->ic_task, run->ctx, run);
+                       continue;
+               }
+
+               ret = do_run(run);
+               if (!ret)
+                       break;
+
+               /*
+                * something went wrong with start, add the run
+                * to done q and continue to the next run in the
+                * pending q.
+                */
+               run->status = ret;
+               list_add_tail(&run->list, &chan->done_q);
+               chan->current_run = NULL;
+       }
+}
+
+static void empty_done_q(struct ipu_image_convert_chan *chan)
+{
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_run *run;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       while (!list_empty(&chan->done_q)) {
+               run = list_entry(chan->done_q.next,
+                                struct ipu_image_convert_run,
+                                list);
+
+               list_del(&run->list);
+
+               dev_dbg(priv->ipu->dev,
+                       "%s: task %u: completing ctx %p run %p with %d\n",
+                       __func__, chan->ic_task, run->ctx, run, run->status);
+
+               /* call the completion callback and free the run */
+               spin_unlock_irqrestore(&chan->irqlock, flags);
+               run->ctx->complete(run, run->ctx->complete_context);
+               spin_lock_irqsave(&chan->irqlock, flags);
+       }
+
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+}
+
+/*
+ * the bottom half thread clears out the done_q, calling the
+ * completion handler for each.
+ */
+static irqreturn_t do_bh(int irq, void *dev_id)
+{
+       struct ipu_image_convert_chan *chan = dev_id;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_ctx *ctx;
+       unsigned long flags;
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
+               chan->ic_task);
+
+       empty_done_q(chan);
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       /*
+        * the done_q is cleared out, signal any contexts
+        * that are aborting that abort can complete.
+        */
+       list_for_each_entry(ctx, &chan->ctx_list, list) {
+               if (ctx->aborting) {
+                       dev_dbg(priv->ipu->dev,
+                               "%s: task %u: signaling abort for ctx %p\n",
+                               __func__, chan->ic_task, ctx);
+                       complete(&ctx->aborted);
+               }
+       }
+
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
+               chan->ic_task);
+
+       return IRQ_HANDLED;
+}
+
+/* hold irqlock when calling */
+static irqreturn_t do_irq(struct ipu_image_convert_run *run)
+{
+       struct ipu_image_convert_ctx *ctx = run->ctx;
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_tile *src_tile, *dst_tile;
+       struct ipu_image_convert_image *s_image = &ctx->in;
+       struct ipu_image_convert_image *d_image = &ctx->out;
+       struct ipuv3_channel *outch;
+       unsigned int dst_idx;
+
+       lockdep_assert_held(&chan->irqlock);
+
+       outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
+               chan->rotation_out_chan : chan->out_chan;
+
+       /*
+        * It is difficult to stop the channel DMA before the channels
+        * enter the paused state. Without double-buffering the channels
+        * are always in a paused state when the EOF irq occurs, so it
+        * is safe to stop the channels now. For double-buffering we
+        * just ignore the abort until the operation completes, when it
+        * is safe to shut down.
+        */
+       if (ctx->aborting && !ctx->double_buffering) {
+               convert_stop(run);
+               run->status = -EIO;
+               goto done;
+       }
+
+       if (ctx->next_tile == ctx->num_tiles) {
+               /*
+                * the conversion is complete
+                */
+               convert_stop(run);
+               run->status = 0;
+               goto done;
+       }
+
+       /*
+        * not done, place the next tile buffers.
+        */
+       if (!ctx->double_buffering) {
+
+               src_tile = &s_image->tile[ctx->next_tile];
+               dst_idx = ctx->out_tile_map[ctx->next_tile];
+               dst_tile = &d_image->tile[dst_idx];
+
+               ipu_cpmem_set_buffer(chan->in_chan, 0,
+                                    s_image->base.phys0 + src_tile->offset);
+               ipu_cpmem_set_buffer(outch, 0,
+                                    d_image->base.phys0 + dst_tile->offset);
+               if (s_image->fmt->planar)
+                       ipu_cpmem_set_uv_offset(chan->in_chan,
+                                               src_tile->u_off,
+                                               src_tile->v_off);
+               if (d_image->fmt->planar)
+                       ipu_cpmem_set_uv_offset(outch,
+                                               dst_tile->u_off,
+                                               dst_tile->v_off);
+
+               ipu_idmac_select_buffer(chan->in_chan, 0);
+               ipu_idmac_select_buffer(outch, 0);
+
+       } else if (ctx->next_tile < ctx->num_tiles - 1) {
+
+               src_tile = &s_image->tile[ctx->next_tile + 1];
+               dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
+               dst_tile = &d_image->tile[dst_idx];
+
+               ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
+                                    s_image->base.phys0 + src_tile->offset);
+               ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
+                                    d_image->base.phys0 + dst_tile->offset);
+
+               ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
+               ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
+
+               ctx->cur_buf_num ^= 1;
+       }
+
+       ctx->next_tile++;
+       return IRQ_HANDLED;
+done:
+       list_add_tail(&run->list, &chan->done_q);
+       chan->current_run = NULL;
+       run_next(chan);
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t norotate_irq(int irq, void *data)
+{
+       struct ipu_image_convert_chan *chan = data;
+       struct ipu_image_convert_ctx *ctx;
+       struct ipu_image_convert_run *run;
+       unsigned long flags;
+       irqreturn_t ret;
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       /* get current run and its context */
+       run = chan->current_run;
+       if (!run) {
+               ret = IRQ_NONE;
+               goto out;
+       }
+
+       ctx = run->ctx;
+
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               /* this is a rotation operation, just ignore */
+               spin_unlock_irqrestore(&chan->irqlock, flags);
+               return IRQ_HANDLED;
+       }
+
+       ret = do_irq(run);
+out:
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+       return ret;
+}
+
+static irqreturn_t rotate_irq(int irq, void *data)
+{
+       struct ipu_image_convert_chan *chan = data;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_ctx *ctx;
+       struct ipu_image_convert_run *run;
+       unsigned long flags;
+       irqreturn_t ret;
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       /* get current run and its context */
+       run = chan->current_run;
+       if (!run) {
+               ret = IRQ_NONE;
+               goto out;
+       }
+
+       ctx = run->ctx;
+
+       if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               /* this was NOT a rotation operation, shouldn't happen */
+               dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
+               spin_unlock_irqrestore(&chan->irqlock, flags);
+               return IRQ_HANDLED;
+       }
+
+       ret = do_irq(run);
+out:
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+       return ret;
+}
+
+/*
+ * try to force the completion of runs for this ctx. Called when
+ * abort wait times out in ipu_image_convert_abort().
+ */
+static void force_abort(struct ipu_image_convert_ctx *ctx)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_run *run;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       run = chan->current_run;
+       if (run && run->ctx == ctx) {
+               convert_stop(run);
+               run->status = -EIO;
+               list_add_tail(&run->list, &chan->done_q);
+               chan->current_run = NULL;
+               run_next(chan);
+       }
+
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+
+       empty_done_q(chan);
+}
+
+static void release_ipu_resources(struct ipu_image_convert_chan *chan)
+{
+       if (chan->out_eof_irq >= 0)
+               free_irq(chan->out_eof_irq, chan);
+       if (chan->rot_out_eof_irq >= 0)
+               free_irq(chan->rot_out_eof_irq, chan);
+
+       if (!IS_ERR_OR_NULL(chan->in_chan))
+               ipu_idmac_put(chan->in_chan);
+       if (!IS_ERR_OR_NULL(chan->out_chan))
+               ipu_idmac_put(chan->out_chan);
+       if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
+               ipu_idmac_put(chan->rotation_in_chan);
+       if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
+               ipu_idmac_put(chan->rotation_out_chan);
+       if (!IS_ERR_OR_NULL(chan->ic))
+               ipu_ic_put(chan->ic);
+
+       chan->in_chan = chan->out_chan = chan->rotation_in_chan =
+               chan->rotation_out_chan = NULL;
+       chan->out_eof_irq = chan->rot_out_eof_irq = -1;
+}
+
+static int get_ipu_resources(struct ipu_image_convert_chan *chan)
+{
+       const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       int ret;
+
+       /* get IC */
+       chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
+       if (IS_ERR(chan->ic)) {
+               dev_err(priv->ipu->dev, "could not acquire IC\n");
+               ret = PTR_ERR(chan->ic);
+               goto err;
+       }
+
+       /* get IDMAC channels */
+       chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
+       chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
+       if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
+               dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
+               ret = -EBUSY;
+               goto err;
+       }
+
+       chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
+       chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
+       if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
+               dev_err(priv->ipu->dev,
+                       "could not acquire idmac rotation channels\n");
+               ret = -EBUSY;
+               goto err;
+       }
+
+       /* acquire the EOF interrupts */
+       chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
+                                                 chan->out_chan,
+                                                 IPU_IRQ_EOF);
+
+       ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
+                                  0, "ipu-ic", chan);
+       if (ret < 0) {
+               dev_err(priv->ipu->dev, "could not acquire irq %d\n",
+                        chan->out_eof_irq);
+               chan->out_eof_irq = -1;
+               goto err;
+       }
+
+       chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
+                                                    chan->rotation_out_chan,
+                                                    IPU_IRQ_EOF);
+
+       ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
+                                  0, "ipu-ic", chan);
+       if (ret < 0) {
+               dev_err(priv->ipu->dev, "could not acquire irq %d\n",
+                       chan->rot_out_eof_irq);
+               chan->rot_out_eof_irq = -1;
+               goto err;
+       }
+
+       return 0;
+err:
+       release_ipu_resources(chan);
+       return ret;
+}
+
+static int fill_image(struct ipu_image_convert_ctx *ctx,
+                     struct ipu_image_convert_image *ic_image,
+                     struct ipu_image *image,
+                     enum ipu_image_convert_type type)
+{
+       struct ipu_image_convert_priv *priv = ctx->chan->priv;
+
+       ic_image->base = *image;
+       ic_image->type = type;
+
+       ic_image->fmt = get_format(image->pix.pixelformat);
+       if (!ic_image->fmt) {
+               dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
+                       type == IMAGE_CONVERT_OUT ? "Output" : "Input");
+               return -EINVAL;
+       }
+
+       if (ic_image->fmt->planar)
+               ic_image->stride = ic_image->base.pix.width;
+       else
+               ic_image->stride  = ic_image->base.pix.bytesperline;
+
+       calc_tile_dimensions(ctx, ic_image);
+       calc_tile_offsets(ctx, ic_image);
+
+       return 0;
+}
+
+/* borrowed from drivers/media/v4l2-core/v4l2-common.c */
+static unsigned int clamp_align(unsigned int x, unsigned int min,
+                               unsigned int max, unsigned int align)
+{
+       /* Bits that must be zero to be aligned */
+       unsigned int mask = ~((1 << align) - 1);
+
+       /* Clamp to aligned min and max */
+       x = clamp(x, (min + ~mask) & mask, max & mask);
+
+       /* Round to nearest aligned value */
+       if (align)
+               x = (x + (1 << (align - 1))) & mask;
+
+       return x;
+}
+
+/*
+ * We have to adjust the tile width such that the tile physaddrs and
+ * U and V plane offsets are multiples of 8 bytes as required by
+ * the IPU DMA Controller. For the planar formats, this corresponds
+ * to a pixel alignment of 16 (but use a more formal equation since
+ * the variables are available). For all the packed formats, 8 is
+ * good enough.
+ */
+static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
+{
+       return fmt->planar ? 8 * fmt->uv_width_dec : 8;
+}
+
+/*
+ * For tile height alignment, we have to ensure that the output tile
+ * heights are multiples of 8 lines if the IRT is required by the
+ * given rotation mode (the IRT performs rotations on 8x8 blocks
+ * at a time). If the IRT is not used, or for input image tiles,
+ * 2 lines are good enough.
+ */
+static inline u32 tile_height_align(enum ipu_image_convert_type type,
+                                   enum ipu_rotate_mode rot_mode)
+{
+       return (type == IMAGE_CONVERT_OUT &&
+               ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
+}
+
+/* Adjusts input/output images to IPU restrictions */
+void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
+                             enum ipu_rotate_mode rot_mode)
+{
+       const struct ipu_image_pixfmt *infmt, *outfmt;
+       unsigned int num_in_rows, num_in_cols;
+       unsigned int num_out_rows, num_out_cols;
+       u32 w_align, h_align;
+
+       infmt = get_format(in->pix.pixelformat);
+       outfmt = get_format(out->pix.pixelformat);
+
+       /* set some default pixel formats if needed */
+       if (!infmt) {
+               in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
+               infmt = get_format(V4L2_PIX_FMT_RGB24);
+       }
+       if (!outfmt) {
+               out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
+               outfmt = get_format(V4L2_PIX_FMT_RGB24);
+       }
+
+       /* image converter does not handle fields */
+       in->pix.field = out->pix.field = V4L2_FIELD_NONE;
+
+       /* resizer cannot downsize more than 4:1 */
+       if (ipu_rot_mode_is_irt(rot_mode)) {
+               out->pix.height = max_t(__u32, out->pix.height,
+                                       in->pix.width / 4);
+               out->pix.width = max_t(__u32, out->pix.width,
+                                      in->pix.height / 4);
+       } else {
+               out->pix.width = max_t(__u32, out->pix.width,
+                                      in->pix.width / 4);
+               out->pix.height = max_t(__u32, out->pix.height,
+                                       in->pix.height / 4);
+       }
+
+       /* get tiling rows/cols from output format */
+       num_out_rows = num_stripes(out->pix.height);
+       num_out_cols = num_stripes(out->pix.width);
+       if (ipu_rot_mode_is_irt(rot_mode)) {
+               num_in_rows = num_out_cols;
+               num_in_cols = num_out_rows;
+       } else {
+               num_in_rows = num_out_rows;
+               num_in_cols = num_out_cols;
+       }
+
+       /* align input width/height */
+       w_align = ilog2(tile_width_align(infmt) * num_in_cols);
+       h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
+                       num_in_rows);
+       in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
+       in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
+
+       /* align output width/height */
+       w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
+       h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
+                       num_out_rows);
+       out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
+       out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
+
+       /* set input/output strides and image sizes */
+       in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
+       in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
+       out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
+       out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
+
+/*
+ * this is used by ipu_image_convert_prepare() to verify set input and
+ * output images are valid before starting the conversion. Clients can
+ * also call it before calling ipu_image_convert_prepare().
+ */
+int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
+                            enum ipu_rotate_mode rot_mode)
+{
+       struct ipu_image testin, testout;
+
+       testin = *in;
+       testout = *out;
+
+       ipu_image_convert_adjust(&testin, &testout, rot_mode);
+
+       if (testin.pix.width != in->pix.width ||
+           testin.pix.height != in->pix.height ||
+           testout.pix.width != out->pix.width ||
+           testout.pix.height != out->pix.height)
+               return -EINVAL;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
+
+/*
+ * Call ipu_image_convert_prepare() to prepare for the conversion of
+ * given images and rotation mode. Returns a new conversion context.
+ */
+struct ipu_image_convert_ctx *
+ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+                         struct ipu_image *in, struct ipu_image *out,
+                         enum ipu_rotate_mode rot_mode,
+                         ipu_image_convert_cb_t complete,
+                         void *complete_context)
+{
+       struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
+       struct ipu_image_convert_image *s_image, *d_image;
+       struct ipu_image_convert_chan *chan;
+       struct ipu_image_convert_ctx *ctx;
+       unsigned long flags;
+       bool get_res;
+       int ret;
+
+       if (!in || !out || !complete ||
+           (ic_task != IC_TASK_VIEWFINDER &&
+            ic_task != IC_TASK_POST_PROCESSOR))
+               return ERR_PTR(-EINVAL);
+
+       /* verify the in/out images before continuing */
+       ret = ipu_image_convert_verify(in, out, rot_mode);
+       if (ret) {
+               dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
+                       __func__);
+               return ERR_PTR(ret);
+       }
+
+       chan = &priv->chan[ic_task];
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return ERR_PTR(-ENOMEM);
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
+               chan->ic_task, ctx);
+
+       ctx->chan = chan;
+       init_completion(&ctx->aborted);
+
+       s_image = &ctx->in;
+       d_image = &ctx->out;
+
+       /* set tiling and rotation */
+       d_image->num_rows = num_stripes(out->pix.height);
+       d_image->num_cols = num_stripes(out->pix.width);
+       if (ipu_rot_mode_is_irt(rot_mode)) {
+               s_image->num_rows = d_image->num_cols;
+               s_image->num_cols = d_image->num_rows;
+       } else {
+               s_image->num_rows = d_image->num_rows;
+               s_image->num_cols = d_image->num_cols;
+       }
+
+       ctx->num_tiles = d_image->num_cols * d_image->num_rows;
+       ctx->rot_mode = rot_mode;
+
+       ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
+       if (ret)
+               goto out_free;
+       ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
+       if (ret)
+               goto out_free;
+
+       calc_out_tile_map(ctx);
+
+       dump_format(ctx, s_image);
+       dump_format(ctx, d_image);
+
+       ctx->complete = complete;
+       ctx->complete_context = complete_context;
+
+       /*
+        * Can we use double-buffering for this operation? If there is
+        * only one tile (the whole image can be converted in a single
+        * operation) there's no point in using double-buffering. Also,
+        * the IPU's IDMAC channels allow only a single U and V plane
+        * offset shared between both buffers, but these offsets change
+        * for every tile, and therefore would have to be updated for
+        * each buffer which is not possible. So double-buffering is
+        * impossible when either the source or destination images are
+        * a planar format (YUV420, YUV422P, etc.).
+        */
+       ctx->double_buffering = (ctx->num_tiles > 1 &&
+                                !s_image->fmt->planar &&
+                                !d_image->fmt->planar);
+
+       if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+               ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
+                                   d_image->tile[0].size);
+               if (ret)
+                       goto out_free;
+               if (ctx->double_buffering) {
+                       ret = alloc_dma_buf(priv,
+                                           &ctx->rot_intermediate[1],
+                                           d_image->tile[0].size);
+                       if (ret)
+                               goto out_free_dmabuf0;
+               }
+       }
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       get_res = list_empty(&chan->ctx_list);
+
+       list_add_tail(&ctx->list, &chan->ctx_list);
+
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+
+       if (get_res) {
+               ret = get_ipu_resources(chan);
+               if (ret)
+                       goto out_free_dmabuf1;
+       }
+
+       return ctx;
+
+out_free_dmabuf1:
+       free_dma_buf(priv, &ctx->rot_intermediate[1]);
+       spin_lock_irqsave(&chan->irqlock, flags);
+       list_del(&ctx->list);
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+out_free_dmabuf0:
+       free_dma_buf(priv, &ctx->rot_intermediate[0]);
+out_free:
+       kfree(ctx);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
+
+/*
+ * Carry out a single image conversion run. Only the physaddr's of the input
+ * and output image buffers are needed. The conversion context must have
+ * been created previously with ipu_image_convert_prepare().
+ */
+int ipu_image_convert_queue(struct ipu_image_convert_run *run)
+{
+       struct ipu_image_convert_chan *chan;
+       struct ipu_image_convert_priv *priv;
+       struct ipu_image_convert_ctx *ctx;
+       unsigned long flags;
+       int ret = 0;
+
+       if (!run || !run->ctx || !run->in_phys || !run->out_phys)
+               return -EINVAL;
+
+       ctx = run->ctx;
+       chan = ctx->chan;
+       priv = chan->priv;
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
+               chan->ic_task, ctx, run);
+
+       INIT_LIST_HEAD(&run->list);
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       if (ctx->aborting) {
+               ret = -EIO;
+               goto unlock;
+       }
+
+       list_add_tail(&run->list, &chan->pending_q);
+
+       if (!chan->current_run) {
+               ret = do_run(run);
+               if (ret)
+                       chan->current_run = NULL;
+       }
+unlock:
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
+
+/* Abort any active or pending conversions for this context */
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       struct ipu_image_convert_run *run, *active_run, *tmp;
+       unsigned long flags;
+       int run_count, ret;
+       bool need_abort;
+
+       reinit_completion(&ctx->aborted);
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       /* move all remaining pending runs in this context to done_q */
+       list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
+               if (run->ctx != ctx)
+                       continue;
+               run->status = -EIO;
+               list_move_tail(&run->list, &chan->done_q);
+       }
+
+       run_count = get_run_count(ctx, &chan->done_q);
+       active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
+               chan->current_run : NULL;
+
+       need_abort = (run_count || active_run);
+
+       ctx->aborting = need_abort;
+
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+
+       if (!need_abort) {
+               dev_dbg(priv->ipu->dev,
+                       "%s: task %u: no abort needed for ctx %p\n",
+                       __func__, chan->ic_task, ctx);
+               return;
+       }
+
+       dev_dbg(priv->ipu->dev,
+               "%s: task %u: wait for completion: %d runs, active run %p\n",
+               __func__, chan->ic_task, run_count, active_run);
+
+       ret = wait_for_completion_timeout(&ctx->aborted,
+                                         msecs_to_jiffies(10000));
+       if (ret == 0) {
+               dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
+               force_abort(ctx);
+       }
+
+       ctx->aborting = false;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
+
+/* Unprepare image conversion context */
+void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
+{
+       struct ipu_image_convert_chan *chan = ctx->chan;
+       struct ipu_image_convert_priv *priv = chan->priv;
+       unsigned long flags;
+       bool put_res;
+
+       /* make sure no runs are hanging around */
+       ipu_image_convert_abort(ctx);
+
+       dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
+               chan->ic_task, ctx);
+
+       spin_lock_irqsave(&chan->irqlock, flags);
+
+       list_del(&ctx->list);
+
+       put_res = list_empty(&chan->ctx_list);
+
+       spin_unlock_irqrestore(&chan->irqlock, flags);
+
+       if (put_res)
+               release_ipu_resources(chan);
+
+       free_dma_buf(priv, &ctx->rot_intermediate[1]);
+       free_dma_buf(priv, &ctx->rot_intermediate[0]);
+
+       kfree(ctx);
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
+
+/*
+ * "Canned" asynchronous single image conversion. Allocates and returns
+ * a new conversion run.  On successful return the caller must free the
+ * run and call ipu_image_convert_unprepare() after conversion completes.
+ */
+struct ipu_image_convert_run *
+ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+                 struct ipu_image *in, struct ipu_image *out,
+                 enum ipu_rotate_mode rot_mode,
+                 ipu_image_convert_cb_t complete,
+                 void *complete_context)
+{
+       struct ipu_image_convert_ctx *ctx;
+       struct ipu_image_convert_run *run;
+       int ret;
+
+       ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
+                                       complete, complete_context);
+       if (IS_ERR(ctx))
+               return ERR_PTR(PTR_ERR(ctx));
+
+       run = kzalloc(sizeof(*run), GFP_KERNEL);
+       if (!run) {
+               ipu_image_convert_unprepare(ctx);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       run->ctx = ctx;
+       run->in_phys = in->phys0;
+       run->out_phys = out->phys0;
+
+       ret = ipu_image_convert_queue(run);
+       if (ret) {
+               ipu_image_convert_unprepare(ctx);
+               kfree(run);
+               return ERR_PTR(ret);
+       }
+
+       return run;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert);
+
+/* "Canned" synchronous single image conversion */
+static void image_convert_sync_complete(struct ipu_image_convert_run *run,
+                                       void *data)
+{
+       struct completion *comp = data;
+
+       complete(comp);
+}
+
+int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+                          struct ipu_image *in, struct ipu_image *out,
+                          enum ipu_rotate_mode rot_mode)
+{
+       struct ipu_image_convert_run *run;
+       struct completion comp;
+       int ret;
+
+       init_completion(&comp);
+
+       run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
+                               image_convert_sync_complete, &comp);
+       if (IS_ERR(run))
+               return PTR_ERR(run);
+
+       ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
+       ret = (ret == 0) ? -ETIMEDOUT : 0;
+
+       ipu_image_convert_unprepare(run->ctx);
+       kfree(run);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
+
+int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
+{
+       struct ipu_image_convert_priv *priv;
+       int i;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       ipu->image_convert_priv = priv;
+       priv->ipu = ipu;
+
+       for (i = 0; i < IC_NUM_TASKS; i++) {
+               struct ipu_image_convert_chan *chan = &priv->chan[i];
+
+               chan->ic_task = i;
+               chan->priv = priv;
+               chan->dma_ch = &image_convert_dma_chan[i];
+               chan->out_eof_irq = -1;
+               chan->rot_out_eof_irq = -1;
+
+               spin_lock_init(&chan->irqlock);
+               INIT_LIST_HEAD(&chan->ctx_list);
+               INIT_LIST_HEAD(&chan->pending_q);
+               INIT_LIST_HEAD(&chan->done_q);
+       }
+
+       return 0;
+}
+
+void ipu_image_convert_exit(struct ipu_soc *ipu)
+{
+}
index fd47f8f..22e47b6 100644 (file)
@@ -75,6 +75,33 @@ struct ipu_soc;
 #define IPU_INT_CTRL(n)                IPU_CM_REG(0x003C + 4 * (n))
 #define IPU_INT_STAT(n)                IPU_CM_REG(0x0200 + 4 * (n))
 
+/* FS_PROC_FLOW1 */
+#define FS_PRPENC_ROT_SRC_SEL_MASK     (0xf << 0)
+#define FS_PRPENC_ROT_SRC_SEL_ENC              (0x7 << 0)
+#define FS_PRPVF_ROT_SRC_SEL_MASK      (0xf << 8)
+#define FS_PRPVF_ROT_SRC_SEL_VF                        (0x8 << 8)
+#define FS_PP_SRC_SEL_MASK             (0xf << 12)
+#define FS_PP_ROT_SRC_SEL_MASK         (0xf << 16)
+#define FS_PP_ROT_SRC_SEL_PP                   (0x5 << 16)
+#define FS_VDI1_SRC_SEL_MASK           (0x3 << 20)
+#define FS_VDI3_SRC_SEL_MASK           (0x3 << 20)
+#define FS_PRP_SRC_SEL_MASK            (0xf << 24)
+#define FS_VDI_SRC_SEL_MASK            (0x3 << 28)
+#define FS_VDI_SRC_SEL_CSI_DIRECT              (0x1 << 28)
+#define FS_VDI_SRC_SEL_VDOA                    (0x2 << 28)
+
+/* FS_PROC_FLOW2 */
+#define FS_PRP_ENC_DEST_SEL_MASK       (0xf << 0)
+#define FS_PRP_ENC_DEST_SEL_IRT_ENC            (0x1 << 0)
+#define FS_PRPVF_DEST_SEL_MASK         (0xf << 4)
+#define FS_PRPVF_DEST_SEL_IRT_VF               (0x1 << 4)
+#define FS_PRPVF_ROT_DEST_SEL_MASK     (0xf << 8)
+#define FS_PP_DEST_SEL_MASK            (0xf << 12)
+#define FS_PP_DEST_SEL_IRT_PP                  (0x3 << 12)
+#define FS_PP_ROT_DEST_SEL_MASK                (0xf << 16)
+#define FS_PRPENC_ROT_DEST_SEL_MASK    (0xf << 20)
+#define FS_PRP_DEST_SEL_MASK           (0xf << 24)
+
 #define IPU_DI0_COUNTER_RELEASE                        (1 << 24)
 #define IPU_DI1_COUNTER_RELEASE                        (1 << 25)
 
@@ -138,6 +165,8 @@ struct ipu_dc_priv;
 struct ipu_dmfc_priv;
 struct ipu_di;
 struct ipu_ic_priv;
+struct ipu_vdi;
+struct ipu_image_convert_priv;
 struct ipu_smfc_priv;
 
 struct ipu_devtype;
@@ -170,6 +199,8 @@ struct ipu_soc {
        struct ipu_di           *di_priv[2];
        struct ipu_csi          *csi_priv[2];
        struct ipu_ic_priv      *ic_priv;
+       struct ipu_vdi          *vdi_priv;
+       struct ipu_image_convert_priv *image_convert_priv;
        struct ipu_smfc_priv    *smfc_priv;
 };
 
@@ -200,6 +231,13 @@ int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
                unsigned long base, unsigned long tpmem_base);
 void ipu_ic_exit(struct ipu_soc *ipu);
 
+int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
+                unsigned long base, u32 module);
+void ipu_vdi_exit(struct ipu_soc *ipu);
+
+int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev);
+void ipu_image_convert_exit(struct ipu_soc *ipu);
+
 int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
                unsigned long base, u32 module, struct clk *ipu_clk);
 void ipu_di_exit(struct ipu_soc *ipu, int id);
diff --git a/drivers/gpu/ipu-v3/ipu-vdi.c b/drivers/gpu/ipu-v3/ipu-vdi.c
new file mode 100644 (file)
index 0000000..f27bf5a
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2012-2016 Mentor Graphics Inc.
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#include <linux/io.h>
+#include "ipu-prv.h"
+
+struct ipu_vdi {
+       void __iomem *base;
+       u32 module;
+       spinlock_t lock;
+       int use_count;
+       struct ipu_soc *ipu;
+};
+
+
+/* VDI Register Offsets */
+#define VDI_FSIZE 0x0000
+#define VDI_C     0x0004
+
+/* VDI Register Fields */
+#define VDI_C_CH_420             (0 << 1)
+#define VDI_C_CH_422             (1 << 1)
+#define VDI_C_MOT_SEL_MASK       (0x3 << 2)
+#define VDI_C_MOT_SEL_FULL       (2 << 2)
+#define VDI_C_MOT_SEL_LOW        (1 << 2)
+#define VDI_C_MOT_SEL_MED        (0 << 2)
+#define VDI_C_BURST_SIZE1_4      (3 << 4)
+#define VDI_C_BURST_SIZE2_4      (3 << 8)
+#define VDI_C_BURST_SIZE3_4      (3 << 12)
+#define VDI_C_BURST_SIZE_MASK    0xF
+#define VDI_C_BURST_SIZE1_OFFSET 4
+#define VDI_C_BURST_SIZE2_OFFSET 8
+#define VDI_C_BURST_SIZE3_OFFSET 12
+#define VDI_C_VWM1_SET_1         (0 << 16)
+#define VDI_C_VWM1_SET_2         (1 << 16)
+#define VDI_C_VWM1_CLR_2         (1 << 19)
+#define VDI_C_VWM3_SET_1         (0 << 22)
+#define VDI_C_VWM3_SET_2         (1 << 22)
+#define VDI_C_VWM3_CLR_2         (1 << 25)
+#define VDI_C_TOP_FIELD_MAN_1    (1 << 30)
+#define VDI_C_TOP_FIELD_AUTO_1   (1 << 31)
+
+static inline u32 ipu_vdi_read(struct ipu_vdi *vdi, unsigned int offset)
+{
+       return readl(vdi->base + offset);
+}
+
+static inline void ipu_vdi_write(struct ipu_vdi *vdi, u32 value,
+                                unsigned int offset)
+{
+       writel(value, vdi->base + offset);
+}
+
+void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field)
+{
+       bool top_field_0 = false;
+       unsigned long flags;
+       u32 reg;
+
+       switch (field) {
+       case V4L2_FIELD_INTERLACED_TB:
+       case V4L2_FIELD_SEQ_TB:
+       case V4L2_FIELD_TOP:
+               top_field_0 = true;
+               break;
+       case V4L2_FIELD_INTERLACED_BT:
+       case V4L2_FIELD_SEQ_BT:
+       case V4L2_FIELD_BOTTOM:
+               top_field_0 = false;
+               break;
+       default:
+               top_field_0 = (std & V4L2_STD_525_60) ? true : false;
+               break;
+       }
+
+       spin_lock_irqsave(&vdi->lock, flags);
+
+       reg = ipu_vdi_read(vdi, VDI_C);
+       if (top_field_0)
+               reg &= ~VDI_C_TOP_FIELD_MAN_1;
+       else
+               reg |= VDI_C_TOP_FIELD_MAN_1;
+       ipu_vdi_write(vdi, reg, VDI_C);
+
+       spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_set_field_order);
+
+void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel)
+{
+       unsigned long flags;
+       u32 reg;
+
+       spin_lock_irqsave(&vdi->lock, flags);
+
+       reg = ipu_vdi_read(vdi, VDI_C);
+
+       reg &= ~VDI_C_MOT_SEL_MASK;
+
+       switch (motion_sel) {
+       case MED_MOTION:
+               reg |= VDI_C_MOT_SEL_MED;
+               break;
+       case HIGH_MOTION:
+               reg |= VDI_C_MOT_SEL_FULL;
+               break;
+       default:
+               reg |= VDI_C_MOT_SEL_LOW;
+               break;
+       }
+
+       ipu_vdi_write(vdi, reg, VDI_C);
+
+       spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_set_motion);
+
+void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres)
+{
+       unsigned long flags;
+       u32 pixel_fmt, reg;
+
+       spin_lock_irqsave(&vdi->lock, flags);
+
+       reg = ((yres - 1) << 16) | (xres - 1);
+       ipu_vdi_write(vdi, reg, VDI_FSIZE);
+
+       /*
+        * Full motion, only vertical filter is used.
+        * Burst size is 4 accesses
+        */
+       if (code == MEDIA_BUS_FMT_UYVY8_2X8 ||
+           code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+           code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+           code == MEDIA_BUS_FMT_YUYV8_1X16)
+               pixel_fmt = VDI_C_CH_422;
+       else
+               pixel_fmt = VDI_C_CH_420;
+
+       reg = ipu_vdi_read(vdi, VDI_C);
+       reg |= pixel_fmt;
+       reg |= VDI_C_BURST_SIZE2_4;
+       reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_CLR_2;
+       reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_CLR_2;
+       ipu_vdi_write(vdi, reg, VDI_C);
+
+       spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_setup);
+
+void ipu_vdi_unsetup(struct ipu_vdi *vdi)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&vdi->lock, flags);
+       ipu_vdi_write(vdi, 0, VDI_FSIZE);
+       ipu_vdi_write(vdi, 0, VDI_C);
+       spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_unsetup);
+
+int ipu_vdi_enable(struct ipu_vdi *vdi)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&vdi->lock, flags);
+
+       if (!vdi->use_count)
+               ipu_module_enable(vdi->ipu, vdi->module);
+
+       vdi->use_count++;
+
+       spin_unlock_irqrestore(&vdi->lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_enable);
+
+int ipu_vdi_disable(struct ipu_vdi *vdi)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&vdi->lock, flags);
+
+       if (vdi->use_count) {
+               if (!--vdi->use_count)
+                       ipu_module_disable(vdi->ipu, vdi->module);
+       }
+
+       spin_unlock_irqrestore(&vdi->lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_disable);
+
+struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu)
+{
+       return ipu->vdi_priv;
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_get);
+
+void ipu_vdi_put(struct ipu_vdi *vdi)
+{
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_put);
+
+int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
+                unsigned long base, u32 module)
+{
+       struct ipu_vdi *vdi;
+
+       vdi = devm_kzalloc(dev, sizeof(*vdi), GFP_KERNEL);
+       if (!vdi)
+               return -ENOMEM;
+
+       ipu->vdi_priv = vdi;
+
+       spin_lock_init(&vdi->lock);
+       vdi->module = module;
+       vdi->base = devm_ioremap(dev, base, PAGE_SIZE);
+       if (!vdi->base)
+               return -ENOMEM;
+
+       dev_dbg(dev, "VDI base: 0x%08lx remapped to %p\n", base, vdi->base);
+       vdi->ipu = ipu;
+
+       return 0;
+}
+
+void ipu_vdi_exit(struct ipu_soc *ipu)
+{
+}
index 94eb138..c53dc90 100644 (file)
@@ -168,6 +168,26 @@ void drm_printk(const char *level, unsigned int category,
 /** \name Macros to make printk easier */
 /*@{*/
 
+#define _DRM_PRINTK(once, level, fmt, ...)                             \
+       do {                                                            \
+               printk##once(KERN_##level "[" DRM_NAME "] " fmt,        \
+                            ##__VA_ARGS__);                            \
+       } while (0)
+
+#define DRM_INFO(fmt, ...)                                             \
+       _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE(fmt, ...)                                             \
+       _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN(fmt, ...)                                             \
+       _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+
+#define DRM_INFO_ONCE(fmt, ...)                                                \
+       _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE_ONCE(fmt, ...)                                                \
+       _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN_ONCE(fmt, ...)                                                \
+       _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+
 /**
  * Error output.
  *
@@ -202,8 +222,6 @@ void drm_printk(const char *level, unsigned int category,
 #define DRM_DEV_INFO(dev, fmt, ...)                                    \
        drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt,  \
                       ##__VA_ARGS__)
-#define DRM_INFO(fmt, ...)                                             \
-       drm_printk(KERN_INFO, DRM_UT_NONE, __func__, "", fmt, ##__VA_ARGS__)
 
 #define DRM_DEV_INFO_ONCE(dev, fmt, ...)                               \
 ({                                                                     \
@@ -213,7 +231,6 @@ void drm_printk(const char *level, unsigned int category,
                DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__);                  \
        }                                                               \
 })
-#define DRM_INFO_ONCE(fmt, ...) DRM_DEV_INFO_ONCE(NULL, fmt, ##__VA_ARGS__)
 
 /**
  * Debug output.
@@ -379,7 +396,6 @@ struct drm_file {
        unsigned is_master:1;
 
        struct pid *pid;
-       kuid_t uid;
        drm_magic_t magic;
        struct list_head lhead;
        struct drm_minor *minor;
index 856a9c8..9701f2d 100644 (file)
 
 #include <drm/drm_crtc.h>
 
+/**
+ * struct drm_crtc_commit - track modeset commits on a CRTC
+ *
+ * This structure is used to track pending modeset changes and atomic commit on
+ * a per-CRTC basis. Since updating the list should never block this structure
+ * is reference counted to allow waiters to safely wait on an event to complete,
+ * without holding any locks.
+ *
+ * It has 3 different events in total to allow a fine-grained synchronization
+ * between outstanding updates::
+ *
+ *     atomic commit thread                    hardware
+ *
+ *     write new state into hardware   ---->   ...
+ *     signal hw_done
+ *                                             switch to new state on next
+ *     ...                                     v/hblank
+ *
+ *     wait for buffers to show up             ...
+ *
+ *     ...                                     send completion irq
+ *                                             irq handler signals flip_done
+ *     cleanup old buffers
+ *
+ *     signal cleanup_done
+ *
+ *     wait for flip_done              <----
+ *     clean up atomic state
+ *
+ * The important bit to know is that cleanup_done is the terminal event, but the
+ * ordering between flip_done and hw_done is entirely up to the specific driver
+ * and modeset state change.
+ *
+ * For an implementation of how to use this look at
+ * drm_atomic_helper_setup_commit() from the atomic helper library.
+ */
+struct drm_crtc_commit {
+       /**
+        * @crtc:
+        *
+        * DRM CRTC for this commit.
+        */
+       struct drm_crtc *crtc;
+
+       /**
+        * @ref:
+        *
+        * Reference count for this structure. Needed to allow blocking on
+        * completions without the risk of the completion disappearing
+        * meanwhile.
+        */
+       struct kref ref;
+
+       /**
+        * @flip_done:
+        *
+        * Will be signaled when the hardware has flipped to the new set of
+        * buffers. Signals at the same time as when the drm event for this
+        * commit is sent to userspace, or when an out-fence is singalled. Note
+        * that for most hardware, in most cases this happens after @hw_done is
+        * signalled.
+        */
+       struct completion flip_done;
+
+       /**
+        * @hw_done:
+        *
+        * Will be signalled when all hw register changes for this commit have
+        * been written out. Especially when disabling a pipe this can be much
+        * later than than @flip_done, since that can signal already when the
+        * screen goes black, whereas to fully shut down a pipe more register
+        * I/O is required.
+        *
+        * Note that this does not need to include separately reference-counted
+        * resources like backing storage buffer pinning, or runtime pm
+        * management.
+        */
+       struct completion hw_done;
+
+       /**
+        * @cleanup_done:
+        *
+        * Will be signalled after old buffers have been cleaned up by calling
+        * drm_atomic_helper_cleanup_planes(). Since this can only happen after
+        * a vblank wait completed it might be a bit later. This completion is
+        * useful to throttle updates and avoid hardware updates getting ahead
+        * of the buffer cleanup too much.
+        */
+       struct completion cleanup_done;
+
+       /**
+        * @commit_entry:
+        *
+        * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
+        */
+       struct list_head commit_entry;
+
+       /**
+        * @event:
+        *
+        * &drm_pending_vblank_event pointer to clean up private events.
+        */
+       struct drm_pending_vblank_event *event;
+};
+
+struct __drm_planes_state {
+       struct drm_plane *ptr;
+       struct drm_plane_state *state;
+};
+
+struct __drm_crtcs_state {
+       struct drm_crtc *ptr;
+       struct drm_crtc_state *state;
+       struct drm_crtc_commit *commit;
+};
+
+struct __drm_connnectors_state {
+       struct drm_connector *ptr;
+       struct drm_connector_state *state;
+};
+
+/**
+ * struct drm_atomic_state - the global state object for atomic updates
+ * @dev: parent DRM device
+ * @allow_modeset: allow full modeset
+ * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
+ * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
+ * @planes: pointer to array of structures with per-plane data
+ * @crtcs: pointer to array of CRTC pointers
+ * @num_connector: size of the @connectors and @connector_states arrays
+ * @connectors: pointer to array of structures with per-connector data
+ * @acquire_ctx: acquire context for this atomic modeset state update
+ */
+struct drm_atomic_state {
+       struct drm_device *dev;
+       bool allow_modeset : 1;
+       bool legacy_cursor_update : 1;
+       bool legacy_set_config : 1;
+       struct __drm_planes_state *planes;
+       struct __drm_crtcs_state *crtcs;
+       int num_connector;
+       struct __drm_connnectors_state *connectors;
+
+       struct drm_modeset_acquire_ctx *acquire_ctx;
+
+       /**
+        * @commit_work:
+        *
+        * Work item which can be used by the driver or helpers to execute the
+        * commit without blocking.
+        */
+       struct work_struct commit_work;
+};
+
 void drm_crtc_commit_put(struct drm_crtc_commit *commit);
 static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
 {
index f866828..7ff92b0 100644 (file)
@@ -45,8 +45,9 @@ int drm_atomic_helper_commit(struct drm_device *dev,
                             struct drm_atomic_state *state,
                             bool nonblock);
 
-void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
-                                       struct drm_atomic_state *state);
+int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
+                                       struct drm_atomic_state *state,
+                                       bool pre_swap);
 bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
                                           struct drm_atomic_state *old_state,
                                           struct drm_crtc *crtc);
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
new file mode 100644 (file)
index 0000000..36baa17
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_BLEND_H__
+#define __DRM_BLEND_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+
+struct drm_device;
+struct drm_atomic_state;
+
+/*
+ * Rotation property bits. DRM_ROTATE_<degrees> rotates the image by the
+ * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and
+ * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation
+ *
+ * WARNING: These defines are UABI since they're exposed in the rotation
+ * property.
+ */
+#define DRM_ROTATE_0   BIT(0)
+#define DRM_ROTATE_90  BIT(1)
+#define DRM_ROTATE_180 BIT(2)
+#define DRM_ROTATE_270 BIT(3)
+#define DRM_ROTATE_MASK (DRM_ROTATE_0   | DRM_ROTATE_90 | \
+                        DRM_ROTATE_180 | DRM_ROTATE_270)
+#define DRM_REFLECT_X  BIT(4)
+#define DRM_REFLECT_Y  BIT(5)
+#define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y)
+
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+                                                      unsigned int supported_rotations);
+unsigned int drm_rotation_simplify(unsigned int rotation,
+                                  unsigned int supported_rotations);
+
+int drm_plane_create_zpos_property(struct drm_plane *plane,
+                                  unsigned int zpos,
+                                  unsigned int min, unsigned int max);
+int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
+                                            unsigned int zpos);
+int drm_atomic_normalize_zpos(struct drm_device *dev,
+                             struct drm_atomic_state *state);
+#endif
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
new file mode 100644 (file)
index 0000000..530a1d6
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_BRIDGE_H__
+#define __DRM_BRIDGE_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+#include <drm/drm_modes.h>
+
+struct drm_bridge;
+
+/**
+ * struct drm_bridge_funcs - drm_bridge control functions
+ */
+struct drm_bridge_funcs {
+       /**
+        * @attach:
+        *
+        * This callback is invoked whenever our bridge is being attached to a
+        * &drm_encoder.
+        *
+        * The attach callback is optional.
+        *
+        * RETURNS:
+        *
+        * Zero on success, error code on failure.
+        */
+       int (*attach)(struct drm_bridge *bridge);
+
+       /**
+        * @detach:
+        *
+        * This callback is invoked whenever our bridge is being detached from a
+        * &drm_encoder.
+        *
+        * The detach callback is optional.
+        */
+       void (*detach)(struct drm_bridge *bridge);
+
+       /**
+        * @mode_fixup:
+        *
+        * This callback is used to validate and adjust a mode. The paramater
+        * mode is the display mode that should be fed to the next element in
+        * the display chain, either the final &drm_connector or the next
+        * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
+        * requires. It can be modified by this callback and does not need to
+        * match mode.
+        *
+        * This is the only hook that allows a bridge to reject a modeset. If
+        * this function passes all other callbacks must succeed for this
+        * configuration.
+        *
+        * The mode_fixup callback is optional.
+        *
+        * NOTE:
+        *
+        * This function is called in the check phase of atomic modesets, which
+        * can be aborted for any reason (including on userspace's request to
+        * just check whether a configuration would be possible). Drivers MUST
+        * NOT touch any persistent state (hardware or software) or data
+        * structures except the passed in @state parameter.
+        *
+        * RETURNS:
+        *
+        * True if an acceptable configuration is possible, false if the modeset
+        * operation should be rejected.
+        */
+       bool (*mode_fixup)(struct drm_bridge *bridge,
+                          const struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode);
+       /**
+        * @disable:
+        *
+        * This callback should disable the bridge. It is called right before
+        * the preceding element in the display pipe is disabled. If the
+        * preceding element is a bridge this means it's called before that
+        * bridge's ->disable() function. If the preceding element is a
+        * &drm_encoder it's called right before the encoder's ->disable(),
+        * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+        *
+        * The bridge can assume that the display pipe (i.e. clocks and timing
+        * signals) feeding it is still running when this callback is called.
+        *
+        * The disable callback is optional.
+        */
+       void (*disable)(struct drm_bridge *bridge);
+
+       /**
+        * @post_disable:
+        *
+        * This callback should disable the bridge. It is called right after
+        * the preceding element in the display pipe is disabled. If the
+        * preceding element is a bridge this means it's called after that
+        * bridge's ->post_disable() function. If the preceding element is a
+        * &drm_encoder it's called right after the encoder's ->disable(),
+        * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+        *
+        * The bridge must assume that the display pipe (i.e. clocks and timing
+        * singals) feeding it is no longer running when this callback is
+        * called.
+        *
+        * The post_disable callback is optional.
+        */
+       void (*post_disable)(struct drm_bridge *bridge);
+
+       /**
+        * @mode_set:
+        *
+        * This callback should set the given mode on the bridge. It is called
+        * after the ->mode_set() callback for the preceding element in the
+        * display pipeline has been called already. The display pipe (i.e.
+        * clocks and timing signals) is off when this function is called.
+        */
+       void (*mode_set)(struct drm_bridge *bridge,
+                        struct drm_display_mode *mode,
+                        struct drm_display_mode *adjusted_mode);
+       /**
+        * @pre_enable:
+        *
+        * This callback should enable the bridge. It is called right before
+        * the preceding element in the display pipe is enabled. If the
+        * preceding element is a bridge this means it's called before that
+        * bridge's ->pre_enable() function. If the preceding element is a
+        * &drm_encoder it's called right before the encoder's ->enable(),
+        * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+        *
+        * The display pipe (i.e. clocks and timing signals) feeding this bridge
+        * will not yet be running when this callback is called. The bridge must
+        * not enable the display link feeding the next bridge in the chain (if
+        * there is one) when this callback is called.
+        *
+        * The pre_enable callback is optional.
+        */
+       void (*pre_enable)(struct drm_bridge *bridge);
+
+       /**
+        * @enable:
+        *
+        * This callback should enable the bridge. It is called right after
+        * the preceding element in the display pipe is enabled. If the
+        * preceding element is a bridge this means it's called after that
+        * bridge's ->enable() function. If the preceding element is a
+        * &drm_encoder it's called right after the encoder's ->enable(),
+        * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+        *
+        * The bridge can assume that the display pipe (i.e. clocks and timing
+        * signals) feeding it is running when this callback is called. This
+        * callback must enable the display link feeding the next bridge in the
+        * chain if there is one.
+        *
+        * The enable callback is optional.
+        */
+       void (*enable)(struct drm_bridge *bridge);
+};
+
+/**
+ * struct drm_bridge - central DRM bridge control structure
+ * @dev: DRM device this bridge belongs to
+ * @encoder: encoder to which this bridge is connected
+ * @next: the next bridge in the encoder chain
+ * @of_node: device node pointer to the bridge
+ * @list: to keep track of all added bridges
+ * @funcs: control functions
+ * @driver_private: pointer to the bridge driver's internal context
+ */
+struct drm_bridge {
+       struct drm_device *dev;
+       struct drm_encoder *encoder;
+       struct drm_bridge *next;
+#ifdef CONFIG_OF
+       struct device_node *of_node;
+#endif
+       struct list_head list;
+
+       const struct drm_bridge_funcs *funcs;
+       void *driver_private;
+};
+
+int drm_bridge_add(struct drm_bridge *bridge);
+void drm_bridge_remove(struct drm_bridge *bridge);
+struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
+void drm_bridge_detach(struct drm_bridge *bridge);
+
+bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
+                       const struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode);
+void drm_bridge_disable(struct drm_bridge *bridge);
+void drm_bridge_post_disable(struct drm_bridge *bridge);
+void drm_bridge_mode_set(struct drm_bridge *bridge,
+                       struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode);
+void drm_bridge_pre_enable(struct drm_bridge *bridge);
+void drm_bridge_enable(struct drm_bridge *bridge);
+
+#endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
new file mode 100644 (file)
index 0000000..c767238
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_COLOR_MGMT_H__
+#define __DRM_COLOR_MGMT_H__
+
+#include <linux/ctype.h>
+
+void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+                               uint degamma_lut_size,
+                               bool has_ctm,
+                               uint gamma_lut_size);
+
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+                                int gamma_size);
+
+/**
+ * drm_color_lut_extract - clamp&round LUT entries
+ * @user_input: input value
+ * @bit_precision: number of bits the hw LUT supports
+ *
+ * Extract a degamma/gamma LUT value provided by user (in the form of
+ * &drm_color_lut entries) and round it to the precision supported by the
+ * hardware.
+ */
+static inline uint32_t drm_color_lut_extract(uint32_t user_input,
+                                            uint32_t bit_precision)
+{
+       uint32_t val = user_input;
+       uint32_t max = 0xffff >> (16 - bit_precision);
+
+       /* Round only if we're not using full precision. */
+       if (bit_precision < 16) {
+               val += 1UL << (16 - bit_precision - 1);
+               val >>= 16 - bit_precision;
+       }
+
+       return clamp_val(val, 0, max);
+}
+
+
+#endif
index 66b7d67..51a15de 100644 (file)
 #include <linux/ctype.h>
 #include <drm/drm_mode_object.h>
 
+#include <uapi/drm/drm_mode.h>
+
+struct drm_device;
+
 struct drm_connector_helper_funcs;
 struct drm_device;
 struct drm_crtc;
@@ -181,14 +185,19 @@ int drm_display_info_set_bus_formats(struct drm_display_info *info,
 /**
  * struct drm_connector_state - mutable connector state
  * @connector: backpointer to the connector
- * @crtc: CRTC to connect connector to, NULL if disabled
  * @best_encoder: can be used by helpers and drivers to select the encoder
  * @state: backpointer to global drm_atomic_state
  */
 struct drm_connector_state {
        struct drm_connector *connector;
 
-       struct drm_crtc *crtc;  /* do not write directly, use drm_atomic_set_crtc_for_connector() */
+       /**
+        * @crtc: CRTC to connect connector to, NULL if disabled.
+        *
+        * Do not change this directly, use drm_atomic_set_crtc_for_connector()
+        * instead.
+        */
+       struct drm_crtc *crtc;
 
        struct drm_encoder *best_encoder;
 
@@ -744,4 +753,19 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
 int drm_mode_connector_set_tile_property(struct drm_connector *connector);
 int drm_mode_connector_update_edid_property(struct drm_connector *connector,
                                            const struct edid *edid);
+
+/**
+ * drm_for_each_connector - iterate over all connectors
+ * @connector: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all connectors of @dev.
+ */
+#define drm_for_each_connector(connector, dev) \
+       for (assert_drm_connector_list_read_locked(&(dev)->mode_config),        \
+            connector = list_first_entry(&(dev)->mode_config.connector_list,   \
+                                         struct drm_connector, head);          \
+            &connector->head != (&(dev)->mode_config.connector_list);          \
+            connector = list_next_entry(connector, head))
+
 #endif
diff --git a/include/drm/drm_core.h b/include/drm/drm_core.h
deleted file mode 100644 (file)
index 4e75238..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#define CORE_AUTHOR            "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
-
-#define CORE_NAME              "drm"
-#define CORE_DESC              "DRM shared core routines"
-#define CORE_DATE              "20060810"
-
-#define DRM_IF_MAJOR   1
-#define DRM_IF_MINOR   4
-
-#define CORE_MAJOR     1
-#define CORE_MINOR     1
-#define CORE_PATCHLEVEL 0
index 8ca71d6..a544b75 100644 (file)
 #include <drm/drm_connector.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_property.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_color_mgmt.h>
 
 struct drm_device;
 struct drm_mode_set;
@@ -60,21 +65,6 @@ static inline uint64_t I642U64(int64_t val)
        return (uint64_t)*((uint64_t *)&val);
 }
 
-/*
- * Rotation property bits. DRM_ROTATE_<degrees> rotates the image by the
- * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and
- * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation
- */
-#define DRM_ROTATE_0   BIT(0)
-#define DRM_ROTATE_90  BIT(1)
-#define DRM_ROTATE_180 BIT(2)
-#define DRM_ROTATE_270 BIT(3)
-#define DRM_ROTATE_MASK (DRM_ROTATE_0   | DRM_ROTATE_90 | \
-                        DRM_ROTATE_180 | DRM_ROTATE_270)
-#define DRM_REFLECT_X  BIT(4)
-#define DRM_REFLECT_Y  BIT(5)
-#define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y)
-
 /* data corresponds to displayid vend/prod/serial */
 struct drm_tile_group {
        struct kref refcount;
@@ -654,693 +644,6 @@ struct drm_crtc {
        struct drm_modeset_acquire_ctx *acquire_ctx;
 };
 
-/**
- * struct drm_plane_state - mutable plane state
- * @plane: backpointer to the plane
- * @crtc: currently bound CRTC, NULL if disabled
- * @fb: currently bound framebuffer
- * @fence: optional fence to wait for before scanning out @fb
- * @crtc_x: left position of visible portion of plane on crtc
- * @crtc_y: upper position of visible portion of plane on crtc
- * @crtc_w: width of visible portion of plane on crtc
- * @crtc_h: height of visible portion of plane on crtc
- * @src_x: left position of visible portion of plane within
- *     plane (in 16.16)
- * @src_y: upper position of visible portion of plane within
- *     plane (in 16.16)
- * @src_w: width of visible portion of plane (in 16.16)
- * @src_h: height of visible portion of plane (in 16.16)
- * @rotation: rotation of the plane
- * @zpos: priority of the given plane on crtc (optional)
- * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
- *     where N is the number of active planes for given crtc
- * @src: clipped source coordinates of the plane (in 16.16)
- * @dst: clipped destination coordinates of the plane
- * @visible: visibility of the plane
- * @state: backpointer to global drm_atomic_state
- */
-struct drm_plane_state {
-       struct drm_plane *plane;
-
-       struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
-       struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
-       struct fence *fence;
-
-       /* Signed dest location allows it to be partially off screen */
-       int32_t crtc_x, crtc_y;
-       uint32_t crtc_w, crtc_h;
-
-       /* Source values are 16.16 fixed point */
-       uint32_t src_x, src_y;
-       uint32_t src_h, src_w;
-
-       /* Plane rotation */
-       unsigned int rotation;
-
-       /* Plane zpos */
-       unsigned int zpos;
-       unsigned int normalized_zpos;
-
-       /* Clipped coordinates */
-       struct drm_rect src, dst;
-
-       /*
-        * Is the plane actually visible? Can be false even
-        * if fb!=NULL and crtc!=NULL, due to clipping.
-        */
-       bool visible;
-
-       struct drm_atomic_state *state;
-};
-
-
-/**
- * struct drm_plane_funcs - driver plane control functions
- */
-struct drm_plane_funcs {
-       /**
-        * @update_plane:
-        *
-        * This is the legacy entry point to enable and configure the plane for
-        * the given CRTC and framebuffer. It is never called to disable the
-        * plane, i.e. the passed-in crtc and fb paramters are never NULL.
-        *
-        * The source rectangle in frame buffer memory coordinates is given by
-        * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point
-        * values). Devices that don't support subpixel plane coordinates can
-        * ignore the fractional part.
-        *
-        * The destination rectangle in CRTC coordinates is given by the
-        * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values).
-        * Devices scale the source rectangle to the destination rectangle. If
-        * scaling is not supported, and the source rectangle size doesn't match
-        * the destination rectangle size, the driver must return a
-        * -<errorname>EINVAL</errorname> error.
-        *
-        * Drivers implementing atomic modeset should use
-        * drm_atomic_helper_update_plane() to implement this hook.
-        *
-        * RETURNS:
-        *
-        * 0 on success or a negative error code on failure.
-        */
-       int (*update_plane)(struct drm_plane *plane,
-                           struct drm_crtc *crtc, struct drm_framebuffer *fb,
-                           int crtc_x, int crtc_y,
-                           unsigned int crtc_w, unsigned int crtc_h,
-                           uint32_t src_x, uint32_t src_y,
-                           uint32_t src_w, uint32_t src_h);
-
-       /**
-        * @disable_plane:
-        *
-        * This is the legacy entry point to disable the plane. The DRM core
-        * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call
-        * with the frame buffer ID set to 0.  Disabled planes must not be
-        * processed by the CRTC.
-        *
-        * Drivers implementing atomic modeset should use
-        * drm_atomic_helper_disable_plane() to implement this hook.
-        *
-        * RETURNS:
-        *
-        * 0 on success or a negative error code on failure.
-        */
-       int (*disable_plane)(struct drm_plane *plane);
-
-       /**
-        * @destroy:
-        *
-        * Clean up plane resources. This is only called at driver unload time
-        * through drm_mode_config_cleanup() since a plane cannot be hotplugged
-        * in DRM.
-        */
-       void (*destroy)(struct drm_plane *plane);
-
-       /**
-        * @reset:
-        *
-        * Reset plane hardware and software state to off. This function isn't
-        * called by the core directly, only through drm_mode_config_reset().
-        * It's not a helper hook only for historical reasons.
-        *
-        * Atomic drivers can use drm_atomic_helper_plane_reset() to reset
-        * atomic state using this hook.
-        */
-       void (*reset)(struct drm_plane *plane);
-
-       /**
-        * @set_property:
-        *
-        * This is the legacy entry point to update a property attached to the
-        * plane.
-        *
-        * Drivers implementing atomic modeset should use
-        * drm_atomic_helper_plane_set_property() to implement this hook.
-        *
-        * This callback is optional if the driver does not support any legacy
-        * driver-private properties.
-        *
-        * RETURNS:
-        *
-        * 0 on success or a negative error code on failure.
-        */
-       int (*set_property)(struct drm_plane *plane,
-                           struct drm_property *property, uint64_t val);
-
-       /**
-        * @atomic_duplicate_state:
-        *
-        * Duplicate the current atomic state for this plane and return it.
-        * The core and helpers gurantee that any atomic state duplicated with
-        * this hook and still owned by the caller (i.e. not transferred to the
-        * driver by calling ->atomic_commit() from struct
-        * &drm_mode_config_funcs) will be cleaned up by calling the
-        * @atomic_destroy_state hook in this structure.
-        *
-        * Atomic drivers which don't subclass struct &drm_plane_state should use
-        * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
-        * state structure to extend it with driver-private state should use
-        * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is
-        * duplicated in a consistent fashion across drivers.
-        *
-        * It is an error to call this hook before plane->state has been
-        * initialized correctly.
-        *
-        * NOTE:
-        *
-        * If the duplicate state references refcounted resources this hook must
-        * acquire a reference for each of them. The driver must release these
-        * references again in @atomic_destroy_state.
-        *
-        * RETURNS:
-        *
-        * Duplicated atomic state or NULL when the allocation failed.
-        */
-       struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
-
-       /**
-        * @atomic_destroy_state:
-        *
-        * Destroy a state duplicated with @atomic_duplicate_state and release
-        * or unreference all resources it references
-        */
-       void (*atomic_destroy_state)(struct drm_plane *plane,
-                                    struct drm_plane_state *state);
-
-       /**
-        * @atomic_set_property:
-        *
-        * Decode a driver-private property value and store the decoded value
-        * into the passed-in state structure. Since the atomic core decodes all
-        * standardized properties (even for extensions beyond the core set of
-        * properties which might not be implemented by all drivers) this
-        * requires drivers to subclass the state structure.
-        *
-        * Such driver-private properties should really only be implemented for
-        * truly hardware/vendor specific state. Instead it is preferred to
-        * standardize atomic extension and decode the properties used to expose
-        * such an extension in the core.
-        *
-        * Do not call this function directly, use
-        * drm_atomic_plane_set_property() instead.
-        *
-        * This callback is optional if the driver does not support any
-        * driver-private atomic properties.
-        *
-        * NOTE:
-        *
-        * This function is called in the state assembly phase of atomic
-        * modesets, which can be aborted for any reason (including on
-        * userspace's request to just check whether a configuration would be
-        * possible). Drivers MUST NOT touch any persistent state (hardware or
-        * software) or data structures except the passed in @state parameter.
-        *
-        * Also since userspace controls in which order properties are set this
-        * function must not do any input validation (since the state update is
-        * incomplete and hence likely inconsistent). Instead any such input
-        * validation must be done in the various atomic_check callbacks.
-        *
-        * RETURNS:
-        *
-        * 0 if the property has been found, -EINVAL if the property isn't
-        * implemented by the driver (which shouldn't ever happen, the core only
-        * asks for properties attached to this plane). No other validation is
-        * allowed by the driver. The core already checks that the property
-        * value is within the range (integer, valid enum value, ...) the driver
-        * set when registering the property.
-        */
-       int (*atomic_set_property)(struct drm_plane *plane,
-                                  struct drm_plane_state *state,
-                                  struct drm_property *property,
-                                  uint64_t val);
-
-       /**
-        * @atomic_get_property:
-        *
-        * Reads out the decoded driver-private property. This is used to
-        * implement the GETPLANE IOCTL.
-        *
-        * Do not call this function directly, use
-        * drm_atomic_plane_get_property() instead.
-        *
-        * This callback is optional if the driver does not support any
-        * driver-private atomic properties.
-        *
-        * RETURNS:
-        *
-        * 0 on success, -EINVAL if the property isn't implemented by the
-        * driver (which should never happen, the core only asks for
-        * properties attached to this plane).
-        */
-       int (*atomic_get_property)(struct drm_plane *plane,
-                                  const struct drm_plane_state *state,
-                                  struct drm_property *property,
-                                  uint64_t *val);
-       /**
-        * @late_register:
-        *
-        * This optional hook can be used to register additional userspace
-        * interfaces attached to the plane like debugfs interfaces.
-        * It is called late in the driver load sequence from drm_dev_register().
-        * Everything added from this callback should be unregistered in
-        * the early_unregister callback.
-        *
-        * Returns:
-        *
-        * 0 on success, or a negative error code on failure.
-        */
-       int (*late_register)(struct drm_plane *plane);
-
-       /**
-        * @early_unregister:
-        *
-        * This optional hook should be used to unregister the additional
-        * userspace interfaces attached to the plane from
-        * late_unregister(). It is called from drm_dev_unregister(),
-        * early in the driver unload sequence to disable userspace access
-        * before data structures are torndown.
-        */
-       void (*early_unregister)(struct drm_plane *plane);
-};
-
-enum drm_plane_type {
-       DRM_PLANE_TYPE_OVERLAY,
-       DRM_PLANE_TYPE_PRIMARY,
-       DRM_PLANE_TYPE_CURSOR,
-};
-
-
-/**
- * struct drm_plane - central DRM plane control structure
- * @dev: DRM device this plane belongs to
- * @head: for list management
- * @name: human readable name, can be overwritten by the driver
- * @base: base mode object
- * @possible_crtcs: pipes this plane can be bound to
- * @format_types: array of formats supported by this plane
- * @format_count: number of formats supported
- * @format_default: driver hasn't supplied supported formats for the plane
- * @crtc: currently bound CRTC
- * @fb: currently bound fb
- * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
- *     drm_mode_set_config_internal() to implement correct refcounting.
- * @funcs: helper functions
- * @properties: property tracking for this plane
- * @type: type of plane (overlay, primary, cursor)
- * @state: current atomic state for this plane
- * @zpos_property: zpos property for this plane
- * @helper_private: mid-layer private data
- */
-struct drm_plane {
-       struct drm_device *dev;
-       struct list_head head;
-
-       char *name;
-
-       /**
-        * @mutex:
-        *
-        * Protects modeset plane state, together with the mutex of &drm_crtc
-        * this plane is linked to (when active, getting actived or getting
-        * disabled).
-        */
-       struct drm_modeset_lock mutex;
-
-       struct drm_mode_object base;
-
-       uint32_t possible_crtcs;
-       uint32_t *format_types;
-       unsigned int format_count;
-       bool format_default;
-
-       struct drm_crtc *crtc;
-       struct drm_framebuffer *fb;
-
-       struct drm_framebuffer *old_fb;
-
-       const struct drm_plane_funcs *funcs;
-
-       struct drm_object_properties properties;
-
-       enum drm_plane_type type;
-
-       /**
-        * @index: Position inside the mode_config.list, can be used as an array
-        * index. It is invariant over the lifetime of the plane.
-        */
-       unsigned index;
-
-       const struct drm_plane_helper_funcs *helper_private;
-
-       struct drm_plane_state *state;
-
-       struct drm_property *zpos_property;
-};
-
-/**
- * struct drm_bridge_funcs - drm_bridge control functions
- */
-struct drm_bridge_funcs {
-       /**
-        * @attach:
-        *
-        * This callback is invoked whenever our bridge is being attached to a
-        * &drm_encoder.
-        *
-        * The attach callback is optional.
-        *
-        * RETURNS:
-        *
-        * Zero on success, error code on failure.
-        */
-       int (*attach)(struct drm_bridge *bridge);
-
-       /**
-        * @detach:
-        *
-        * This callback is invoked whenever our bridge is being detached from a
-        * &drm_encoder.
-        *
-        * The detach callback is optional.
-        */
-       void (*detach)(struct drm_bridge *bridge);
-
-       /**
-        * @mode_fixup:
-        *
-        * This callback is used to validate and adjust a mode. The paramater
-        * mode is the display mode that should be fed to the next element in
-        * the display chain, either the final &drm_connector or the next
-        * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
-        * requires. It can be modified by this callback and does not need to
-        * match mode.
-        *
-        * This is the only hook that allows a bridge to reject a modeset. If
-        * this function passes all other callbacks must succeed for this
-        * configuration.
-        *
-        * The mode_fixup callback is optional.
-        *
-        * NOTE:
-        *
-        * This function is called in the check phase of atomic modesets, which
-        * can be aborted for any reason (including on userspace's request to
-        * just check whether a configuration would be possible). Drivers MUST
-        * NOT touch any persistent state (hardware or software) or data
-        * structures except the passed in @state parameter.
-        *
-        * RETURNS:
-        *
-        * True if an acceptable configuration is possible, false if the modeset
-        * operation should be rejected.
-        */
-       bool (*mode_fixup)(struct drm_bridge *bridge,
-                          const struct drm_display_mode *mode,
-                          struct drm_display_mode *adjusted_mode);
-       /**
-        * @disable:
-        *
-        * This callback should disable the bridge. It is called right before
-        * the preceding element in the display pipe is disabled. If the
-        * preceding element is a bridge this means it's called before that
-        * bridge's ->disable() function. If the preceding element is a
-        * &drm_encoder it's called right before the encoder's ->disable(),
-        * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
-        *
-        * The bridge can assume that the display pipe (i.e. clocks and timing
-        * signals) feeding it is still running when this callback is called.
-        *
-        * The disable callback is optional.
-        */
-       void (*disable)(struct drm_bridge *bridge);
-
-       /**
-        * @post_disable:
-        *
-        * This callback should disable the bridge. It is called right after
-        * the preceding element in the display pipe is disabled. If the
-        * preceding element is a bridge this means it's called after that
-        * bridge's ->post_disable() function. If the preceding element is a
-        * &drm_encoder it's called right after the encoder's ->disable(),
-        * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
-        *
-        * The bridge must assume that the display pipe (i.e. clocks and timing
-        * singals) feeding it is no longer running when this callback is
-        * called.
-        *
-        * The post_disable callback is optional.
-        */
-       void (*post_disable)(struct drm_bridge *bridge);
-
-       /**
-        * @mode_set:
-        *
-        * This callback should set the given mode on the bridge. It is called
-        * after the ->mode_set() callback for the preceding element in the
-        * display pipeline has been called already. The display pipe (i.e.
-        * clocks and timing signals) is off when this function is called.
-        */
-       void (*mode_set)(struct drm_bridge *bridge,
-                        struct drm_display_mode *mode,
-                        struct drm_display_mode *adjusted_mode);
-       /**
-        * @pre_enable:
-        *
-        * This callback should enable the bridge. It is called right before
-        * the preceding element in the display pipe is enabled. If the
-        * preceding element is a bridge this means it's called before that
-        * bridge's ->pre_enable() function. If the preceding element is a
-        * &drm_encoder it's called right before the encoder's ->enable(),
-        * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
-        *
-        * The display pipe (i.e. clocks and timing signals) feeding this bridge
-        * will not yet be running when this callback is called. The bridge must
-        * not enable the display link feeding the next bridge in the chain (if
-        * there is one) when this callback is called.
-        *
-        * The pre_enable callback is optional.
-        */
-       void (*pre_enable)(struct drm_bridge *bridge);
-
-       /**
-        * @enable:
-        *
-        * This callback should enable the bridge. It is called right after
-        * the preceding element in the display pipe is enabled. If the
-        * preceding element is a bridge this means it's called after that
-        * bridge's ->enable() function. If the preceding element is a
-        * &drm_encoder it's called right after the encoder's ->enable(),
-        * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
-        *
-        * The bridge can assume that the display pipe (i.e. clocks and timing
-        * signals) feeding it is running when this callback is called. This
-        * callback must enable the display link feeding the next bridge in the
-        * chain if there is one.
-        *
-        * The enable callback is optional.
-        */
-       void (*enable)(struct drm_bridge *bridge);
-};
-
-/**
- * struct drm_bridge - central DRM bridge control structure
- * @dev: DRM device this bridge belongs to
- * @encoder: encoder to which this bridge is connected
- * @next: the next bridge in the encoder chain
- * @of_node: device node pointer to the bridge
- * @list: to keep track of all added bridges
- * @funcs: control functions
- * @driver_private: pointer to the bridge driver's internal context
- */
-struct drm_bridge {
-       struct drm_device *dev;
-       struct drm_encoder *encoder;
-       struct drm_bridge *next;
-#ifdef CONFIG_OF
-       struct device_node *of_node;
-#endif
-       struct list_head list;
-
-       const struct drm_bridge_funcs *funcs;
-       void *driver_private;
-};
-
-/**
- * struct drm_crtc_commit - track modeset commits on a CRTC
- *
- * This structure is used to track pending modeset changes and atomic commit on
- * a per-CRTC basis. Since updating the list should never block this structure
- * is reference counted to allow waiters to safely wait on an event to complete,
- * without holding any locks.
- *
- * It has 3 different events in total to allow a fine-grained synchronization
- * between outstanding updates::
- *
- *     atomic commit thread                    hardware
- *
- *     write new state into hardware   ---->   ...
- *     signal hw_done
- *                                             switch to new state on next
- *     ...                                     v/hblank
- *
- *     wait for buffers to show up             ...
- *
- *     ...                                     send completion irq
- *                                             irq handler signals flip_done
- *     cleanup old buffers
- *
- *     signal cleanup_done
- *
- *     wait for flip_done              <----
- *     clean up atomic state
- *
- * The important bit to know is that cleanup_done is the terminal event, but the
- * ordering between flip_done and hw_done is entirely up to the specific driver
- * and modeset state change.
- *
- * For an implementation of how to use this look at
- * drm_atomic_helper_setup_commit() from the atomic helper library.
- */
-struct drm_crtc_commit {
-       /**
-        * @crtc:
-        *
-        * DRM CRTC for this commit.
-        */
-       struct drm_crtc *crtc;
-
-       /**
-        * @ref:
-        *
-        * Reference count for this structure. Needed to allow blocking on
-        * completions without the risk of the completion disappearing
-        * meanwhile.
-        */
-       struct kref ref;
-
-       /**
-        * @flip_done:
-        *
-        * Will be signaled when the hardware has flipped to the new set of
-        * buffers. Signals at the same time as when the drm event for this
-        * commit is sent to userspace, or when an out-fence is singalled. Note
-        * that for most hardware, in most cases this happens after @hw_done is
-        * signalled.
-        */
-       struct completion flip_done;
-
-       /**
-        * @hw_done:
-        *
-        * Will be signalled when all hw register changes for this commit have
-        * been written out. Especially when disabling a pipe this can be much
-        * later than than @flip_done, since that can signal already when the
-        * screen goes black, whereas to fully shut down a pipe more register
-        * I/O is required.
-        *
-        * Note that this does not need to include separately reference-counted
-        * resources like backing storage buffer pinning, or runtime pm
-        * management.
-        */
-       struct completion hw_done;
-
-       /**
-        * @cleanup_done:
-        *
-        * Will be signalled after old buffers have been cleaned up by calling
-        * drm_atomic_helper_cleanup_planes(). Since this can only happen after
-        * a vblank wait completed it might be a bit later. This completion is
-        * useful to throttle updates and avoid hardware updates getting ahead
-        * of the buffer cleanup too much.
-        */
-       struct completion cleanup_done;
-
-       /**
-        * @commit_entry:
-        *
-        * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
-        */
-       struct list_head commit_entry;
-
-       /**
-        * @event:
-        *
-        * &drm_pending_vblank_event pointer to clean up private events.
-        */
-       struct drm_pending_vblank_event *event;
-};
-
-struct __drm_planes_state {
-       struct drm_plane *ptr;
-       struct drm_plane_state *state;
-};
-
-struct __drm_crtcs_state {
-       struct drm_crtc *ptr;
-       struct drm_crtc_state *state;
-       struct drm_crtc_commit *commit;
-};
-
-struct __drm_connnectors_state {
-       struct drm_connector *ptr;
-       struct drm_connector_state *state;
-};
-
-/**
- * struct drm_atomic_state - the global state object for atomic updates
- * @dev: parent DRM device
- * @allow_modeset: allow full modeset
- * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
- * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
- * @planes: pointer to array of structures with per-plane data
- * @crtcs: pointer to array of CRTC pointers
- * @num_connector: size of the @connectors and @connector_states arrays
- * @connectors: pointer to array of structures with per-connector data
- * @acquire_ctx: acquire context for this atomic modeset state update
- */
-struct drm_atomic_state {
-       struct drm_device *dev;
-       bool allow_modeset : 1;
-       bool legacy_cursor_update : 1;
-       bool legacy_set_config : 1;
-       struct __drm_planes_state *planes;
-       struct __drm_crtcs_state *crtcs;
-       int num_connector;
-       struct __drm_connnectors_state *connectors;
-
-       struct drm_modeset_acquire_ctx *acquire_ctx;
-
-       /**
-        * @commit_work:
-        *
-        * Work item which can be used by the driver or helpers to execute the
-        * commit without blocking.
-        */
-       struct work_struct commit_work;
-};
-
-
 /**
  * struct drm_mode_set - new values for a CRTC config change
  * @fb: framebuffer to use for new config
@@ -1991,35 +1294,7 @@ struct drm_mode_config {
        struct drm_mode_config_helper_funcs *helper_private;
 };
 
-/**
- * drm_for_each_plane_mask - iterate over planes specified by bitmask
- * @plane: the loop cursor
- * @dev: the DRM device
- * @plane_mask: bitmask of plane indices
- *
- * Iterate over all planes specified by bitmask.
- */
-#define drm_for_each_plane_mask(plane, dev, plane_mask) \
-       list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
-               for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
-
-/**
- * drm_for_each_encoder_mask - iterate over encoders specified by bitmask
- * @encoder: the loop cursor
- * @dev: the DRM device
- * @encoder_mask: bitmask of encoder indices
- *
- * Iterate over all encoders specified by bitmask.
- */
-#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
-       list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
-               for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
-
 #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
-#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
-#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
-#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
-#define obj_to_plane(x) container_of(x, struct drm_plane, base)
 
 extern __printf(6, 7)
 int drm_crtc_init_with_planes(struct drm_device *dev,
@@ -2054,36 +1329,6 @@ static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
        return 1 << drm_crtc_index(crtc);
 }
 
-extern __printf(8, 9)
-int drm_universal_plane_init(struct drm_device *dev,
-                            struct drm_plane *plane,
-                            unsigned long possible_crtcs,
-                            const struct drm_plane_funcs *funcs,
-                            const uint32_t *formats,
-                            unsigned int format_count,
-                            enum drm_plane_type type,
-                            const char *name, ...);
-extern int drm_plane_init(struct drm_device *dev,
-                         struct drm_plane *plane,
-                         unsigned long possible_crtcs,
-                         const struct drm_plane_funcs *funcs,
-                         const uint32_t *formats, unsigned int format_count,
-                         bool is_primary);
-extern void drm_plane_cleanup(struct drm_plane *plane);
-
-/**
- * drm_plane_index - find the index of a registered plane
- * @plane: plane to find index for
- *
- * Given a registered plane, return the index of that plane within a DRM
- * device's list of planes.
- */
-static inline unsigned int drm_plane_index(struct drm_plane *plane)
-{
-       return plane->index;
-}
-extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
-extern void drm_plane_force_disable(struct drm_plane *plane);
 extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
                                   int *hdisplay, int *vdisplay);
 extern int drm_crtc_force_disable(struct drm_crtc *crtc);
@@ -2093,9 +1338,6 @@ extern void drm_mode_config_init(struct drm_device *dev);
 extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 
-extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
-                                        int gamma_size);
-
 extern int drm_mode_set_config_internal(struct drm_mode_set *set);
 
 extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
@@ -2105,35 +1347,7 @@ extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
 extern void drm_mode_put_tile_group(struct drm_device *dev,
                                   struct drm_tile_group *tg);
 
-extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
-                                      struct drm_property *property,
-                                      uint64_t value);
-
-extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
-                                                             unsigned int supported_rotations);
-extern unsigned int drm_rotation_simplify(unsigned int rotation,
-                                         unsigned int supported_rotations);
-extern void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
-                                      uint degamma_lut_size,
-                                      bool has_ctm,
-                                      uint gamma_lut_size);
-
-int drm_plane_create_zpos_property(struct drm_plane *plane,
-                                  unsigned int zpos,
-                                  unsigned int min, unsigned int max);
-
-int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
-                                            unsigned int zpos);
-
 /* Helpers */
-static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
-               uint32_t id)
-{
-       struct drm_mode_object *mo;
-       mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
-       return mo ? obj_to_plane(mo) : NULL;
-}
-
 static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
        uint32_t id)
 {
@@ -2142,33 +1356,6 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
        return mo ? obj_to_crtc(mo) : NULL;
 }
 
-/*
- * Extract a degamma/gamma LUT value provided by user and round it to the
- * precision supported by the hardware.
- */
-static inline uint32_t drm_color_lut_extract(uint32_t user_input,
-                                            uint32_t bit_precision)
-{
-       uint32_t val = user_input;
-       uint32_t max = 0xffff >> (16 - bit_precision);
-
-       /* Round only if we're not using full precision. */
-       if (bit_precision < 16) {
-               val += 1UL << (16 - bit_precision - 1);
-               val >>= 16 - bit_precision;
-       }
-
-       return clamp_val(val, 0, max);
-}
-
-/* Plane list iterator for legacy (overlay only) planes. */
-#define drm_for_each_legacy_plane(plane, dev) \
-       list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
-               for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY)
-
-#define drm_for_each_plane(plane, dev) \
-       list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
-
 #define drm_for_each_crtc(crtc, dev) \
        list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
 
@@ -2186,68 +1373,4 @@ assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
                !drm_modeset_is_locked(&mode_config->connection_mutex));
 }
 
-#define drm_for_each_connector(connector, dev) \
-       for (assert_drm_connector_list_read_locked(&(dev)->mode_config),        \
-            connector = list_first_entry(&(dev)->mode_config.connector_list,   \
-                                         struct drm_connector, head);          \
-            &connector->head != (&(dev)->mode_config.connector_list);          \
-            connector = list_next_entry(connector, head))
-
-#define drm_for_each_encoder(encoder, dev) \
-       list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
-
-#define drm_for_each_fb(fb, dev) \
-       for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)),            \
-            fb = list_first_entry(&(dev)->mode_config.fb_list, \
-                                         struct drm_framebuffer, head);        \
-            &fb->head != (&(dev)->mode_config.fb_list);                        \
-            fb = list_next_entry(fb, head))
-
-/* drm_edid.c */
-bool drm_probe_ddc(struct i2c_adapter *adapter);
-struct edid *drm_get_edid(struct drm_connector *connector,
-                         struct i2c_adapter *adapter);
-struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
-                                    struct i2c_adapter *adapter);
-struct edid *drm_edid_duplicate(const struct edid *edid);
-int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
-
-u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
-bool drm_detect_hdmi_monitor(struct edid *edid);
-bool drm_detect_monitor_audio(struct edid *edid);
-bool drm_rgb_quant_range_selectable(struct edid *edid);
-int drm_add_modes_noedid(struct drm_connector *connector,
-                        int hdisplay, int vdisplay);
-void drm_set_preferred_mode(struct drm_connector *connector,
-                           int hpref, int vpref);
-
-int drm_edid_header_is_valid(const u8 *raw_edid);
-bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
-                         bool *edid_corrupt);
-bool drm_edid_is_valid(struct edid *edid);
-void drm_edid_get_monitor_name(struct edid *edid, char *name,
-                              int buflen);
-struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
-                                          int hsize, int vsize, int fresh,
-                                          bool rb);
-
-/* drm_bridge.c */
-extern int drm_bridge_add(struct drm_bridge *bridge);
-extern void drm_bridge_remove(struct drm_bridge *bridge);
-extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
-extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
-extern void drm_bridge_detach(struct drm_bridge *bridge);
-
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
-                       const struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode);
-void drm_bridge_disable(struct drm_bridge *bridge);
-void drm_bridge_post_disable(struct drm_bridge *bridge);
-void drm_bridge_mode_set(struct drm_bridge *bridge,
-                       struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode);
-void drm_bridge_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_enable(struct drm_bridge *bridge);
-
 #endif /* __DRM_CRTC_H__ */
index 63b8bd5..2a79882 100644 (file)
 # define DP_DS_PORT_TYPE_DVI               2
 # define DP_DS_PORT_TYPE_HDMI              3
 # define DP_DS_PORT_TYPE_NON_EDID          4
+# define DP_DS_PORT_TYPE_DP_DUALMODE        5
+# define DP_DS_PORT_TYPE_WIRELESS           6
 # define DP_DS_PORT_HPD                            (1 << 3)
 /* offset 1 for VGA is maximum megapixels per second / 8 */
 /* offset 2 */
-# define DP_DS_VGA_MAX_BPC_MASK                    (3 << 0)
-# define DP_DS_VGA_8BPC                            0
-# define DP_DS_VGA_10BPC                   1
-# define DP_DS_VGA_12BPC                   2
-# define DP_DS_VGA_16BPC                   3
+# define DP_DS_MAX_BPC_MASK                (3 << 0)
+# define DP_DS_8BPC                        0
+# define DP_DS_10BPC                       1
+# define DP_DS_12BPC                       2
+# define DP_DS_16BPC                       3
 
 /* link configuration */
 #define        DP_LINK_BW_SET                      0x100
 #define DP_SOURCE_OUI                      0x300
 #define DP_SINK_OUI                        0x400
 #define DP_BRANCH_OUI                      0x500
+#define DP_BRANCH_ID                        0x503
+#define DP_BRANCH_HW_REV                    0x509
+#define DP_BRANCH_SW_REV                    0x50A
 
 #define DP_SET_POWER                        0x600
 # define DP_SET_POWER_D0                    0x1
@@ -813,6 +818,13 @@ int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                               const u8 port_cap[4]);
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                             const u8 port_cap[4]);
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
+void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+                            const u8 port_cap[4], struct drm_dp_aux *aux);
 
 void drm_dp_aux_init(struct drm_dp_aux *aux);
 int drm_dp_aux_register(struct drm_dp_aux *aux);
index 919933d..c3a7d44 100644 (file)
@@ -25,6 +25,9 @@
 
 #include <linux/types.h>
 
+struct drm_device;
+struct i2c_adapter;
+
 #define EDID_LENGTH 128
 #define DDC_ADDR 0x50
 #define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
@@ -423,9 +426,36 @@ static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
        return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
 }
 
+bool drm_probe_ddc(struct i2c_adapter *adapter);
 struct edid *drm_do_get_edid(struct drm_connector *connector,
        int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
                              size_t len),
        void *data);
+struct edid *drm_get_edid(struct drm_connector *connector,
+                         struct i2c_adapter *adapter);
+struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+                                    struct i2c_adapter *adapter);
+struct edid *drm_edid_duplicate(const struct edid *edid);
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
+enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
+bool drm_detect_hdmi_monitor(struct edid *edid);
+bool drm_detect_monitor_audio(struct edid *edid);
+bool drm_rgb_quant_range_selectable(struct edid *edid);
+int drm_add_modes_noedid(struct drm_connector *connector,
+                        int hdisplay, int vdisplay);
+void drm_set_preferred_mode(struct drm_connector *connector,
+                           int hpref, int vpref);
+
+int drm_edid_header_is_valid(const u8 *raw_edid);
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
+                         bool *edid_corrupt);
+bool drm_edid_is_valid(struct edid *edid);
+void drm_edid_get_monitor_name(struct edid *edid, char *name,
+                              int buflen);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+                                          int hsize, int vsize, int fresh,
+                                          bool rb);
 
 #endif /* __DRM_EDID_H__ */
index fce0203..387e33a 100644 (file)
@@ -224,4 +224,26 @@ static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
 
 void drm_encoder_cleanup(struct drm_encoder *encoder);
 
+/**
+ * drm_for_each_encoder_mask - iterate over encoders specified by bitmask
+ * @encoder: the loop cursor
+ * @dev: the DRM device
+ * @encoder_mask: bitmask of encoder indices
+ *
+ * Iterate over all encoders specified by bitmask.
+ */
+#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
+       list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
+               for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
+
+/**
+ * drm_for_each_encoder - iterate over all encoders
+ * @encoder: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all encoders of @dev.
+ */
+#define drm_for_each_encoder(encoder, dev) \
+       list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
+
 #endif
index 797fb5f..e19458d 100644 (file)
@@ -287,11 +287,6 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
                                       struct drm_connector *connector);
 #else
-static inline int drm_fb_helper_modinit(void)
-{
-       return 0;
-}
-
 static inline void drm_fb_helper_prepare(struct drm_device *dev,
                                        struct drm_fb_helper *helper,
                                        const struct drm_fb_helper_funcs *funcs)
index b2554c5..f5ae1f4 100644 (file)
@@ -206,6 +206,8 @@ struct drm_framebuffer {
        struct list_head filp_head;
 };
 
+#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
+
 int drm_framebuffer_init(struct drm_device *dev,
                         struct drm_framebuffer *fb,
                         const struct drm_framebuffer_funcs *funcs);
@@ -247,4 +249,19 @@ static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
 {
        return atomic_read(&fb->base.refcount.refcount);
 }
+
+/**
+ * drm_for_each_fb - iterate over all framebuffers
+ * @fb: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all framebuffers of @dev. User must hold the fb_lock from
+ * &drm_mode_config.
+ */
+#define drm_for_each_fb(fb, dev) \
+       for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)),            \
+            fb = list_first_entry(&(dev)->mode_config.fb_list, \
+                                         struct drm_framebuffer, head);        \
+            &fb->head != (&(dev)->mode_config.fb_list);                        \
+            fb = list_next_entry(fb, head))
 #endif
index be3d938..43460b2 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/kref.h>
 struct drm_object_properties;
 struct drm_property;
+struct drm_device;
 
 /**
  * struct drm_mode_object - base structure for modeset objects
index 011f199..9934d91 100644 (file)
 #ifndef __DRM_MODES_H__
 #define __DRM_MODES_H__
 
+#include <linux/hdmi.h>
+
 #include <drm/drm_mode_object.h>
 #include <drm/drm_connector.h>
 
+struct videomode;
+
 /*
  * Note on terminology:  here, for brevity and convenience, we refer to connector
  * control chips as 'CRTCs'.  They can control any type of connector, VGA, LVDS,
@@ -403,6 +407,8 @@ struct drm_display_mode {
        enum hdmi_picture_aspect picture_aspect_ratio;
 };
 
+#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
+
 /**
  * drm_mode_is_stereo - check for stereo mode flags
  * @mode: drm_display_mode to check
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
new file mode 100644 (file)
index 0000000..43cf193
--- /dev/null
@@ -0,0 +1,526 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_PLANE_H__
+#define __DRM_PLANE_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+struct drm_crtc;
+
+/**
+ * struct drm_plane_state - mutable plane state
+ * @plane: backpointer to the plane
+ * @crtc: currently bound CRTC, NULL if disabled
+ * @fb: currently bound framebuffer
+ * @fence: optional fence to wait for before scanning out @fb
+ * @crtc_x: left position of visible portion of plane on crtc
+ * @crtc_y: upper position of visible portion of plane on crtc
+ * @crtc_w: width of visible portion of plane on crtc
+ * @crtc_h: height of visible portion of plane on crtc
+ * @src_x: left position of visible portion of plane within
+ *     plane (in 16.16)
+ * @src_y: upper position of visible portion of plane within
+ *     plane (in 16.16)
+ * @src_w: width of visible portion of plane (in 16.16)
+ * @src_h: height of visible portion of plane (in 16.16)
+ * @rotation: rotation of the plane
+ * @zpos: priority of the given plane on crtc (optional)
+ * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
+ *     where N is the number of active planes for given crtc
+ * @src: clipped source coordinates of the plane (in 16.16)
+ * @dst: clipped destination coordinates of the plane
+ * @visible: visibility of the plane
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_plane_state {
+       struct drm_plane *plane;
+
+       struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
+       struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
+       struct fence *fence;
+
+       /* Signed dest location allows it to be partially off screen */
+       int32_t crtc_x, crtc_y;
+       uint32_t crtc_w, crtc_h;
+
+       /* Source values are 16.16 fixed point */
+       uint32_t src_x, src_y;
+       uint32_t src_h, src_w;
+
+       /* Plane rotation */
+       unsigned int rotation;
+
+       /* Plane zpos */
+       unsigned int zpos;
+       unsigned int normalized_zpos;
+
+       /* Clipped coordinates */
+       struct drm_rect src, dst;
+
+       /*
+        * Is the plane actually visible? Can be false even
+        * if fb!=NULL and crtc!=NULL, due to clipping.
+        */
+       bool visible;
+
+       struct drm_atomic_state *state;
+};
+
+
+/**
+ * struct drm_plane_funcs - driver plane control functions
+ */
+struct drm_plane_funcs {
+       /**
+        * @update_plane:
+        *
+        * This is the legacy entry point to enable and configure the plane for
+        * the given CRTC and framebuffer. It is never called to disable the
+        * plane, i.e. the passed-in crtc and fb paramters are never NULL.
+        *
+        * The source rectangle in frame buffer memory coordinates is given by
+        * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point
+        * values). Devices that don't support subpixel plane coordinates can
+        * ignore the fractional part.
+        *
+        * The destination rectangle in CRTC coordinates is given by the
+        * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values).
+        * Devices scale the source rectangle to the destination rectangle. If
+        * scaling is not supported, and the source rectangle size doesn't match
+        * the destination rectangle size, the driver must return a
+        * -<errorname>EINVAL</errorname> error.
+        *
+        * Drivers implementing atomic modeset should use
+        * drm_atomic_helper_update_plane() to implement this hook.
+        *
+        * RETURNS:
+        *
+        * 0 on success or a negative error code on failure.
+        */
+       int (*update_plane)(struct drm_plane *plane,
+                           struct drm_crtc *crtc, struct drm_framebuffer *fb,
+                           int crtc_x, int crtc_y,
+                           unsigned int crtc_w, unsigned int crtc_h,
+                           uint32_t src_x, uint32_t src_y,
+                           uint32_t src_w, uint32_t src_h);
+
+       /**
+        * @disable_plane:
+        *
+        * This is the legacy entry point to disable the plane. The DRM core
+        * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call
+        * with the frame buffer ID set to 0.  Disabled planes must not be
+        * processed by the CRTC.
+        *
+        * Drivers implementing atomic modeset should use
+        * drm_atomic_helper_disable_plane() to implement this hook.
+        *
+        * RETURNS:
+        *
+        * 0 on success or a negative error code on failure.
+        */
+       int (*disable_plane)(struct drm_plane *plane);
+
+       /**
+        * @destroy:
+        *
+        * Clean up plane resources. This is only called at driver unload time
+        * through drm_mode_config_cleanup() since a plane cannot be hotplugged
+        * in DRM.
+        */
+       void (*destroy)(struct drm_plane *plane);
+
+       /**
+        * @reset:
+        *
+        * Reset plane hardware and software state to off. This function isn't
+        * called by the core directly, only through drm_mode_config_reset().
+        * It's not a helper hook only for historical reasons.
+        *
+        * Atomic drivers can use drm_atomic_helper_plane_reset() to reset
+        * atomic state using this hook.
+        */
+       void (*reset)(struct drm_plane *plane);
+
+       /**
+        * @set_property:
+        *
+        * This is the legacy entry point to update a property attached to the
+        * plane.
+        *
+        * Drivers implementing atomic modeset should use
+        * drm_atomic_helper_plane_set_property() to implement this hook.
+        *
+        * This callback is optional if the driver does not support any legacy
+        * driver-private properties.
+        *
+        * RETURNS:
+        *
+        * 0 on success or a negative error code on failure.
+        */
+       int (*set_property)(struct drm_plane *plane,
+                           struct drm_property *property, uint64_t val);
+
+       /**
+        * @atomic_duplicate_state:
+        *
+        * Duplicate the current atomic state for this plane and return it.
+        * The core and helpers gurantee that any atomic state duplicated with
+        * this hook and still owned by the caller (i.e. not transferred to the
+        * driver by calling ->atomic_commit() from struct
+        * &drm_mode_config_funcs) will be cleaned up by calling the
+        * @atomic_destroy_state hook in this structure.
+        *
+        * Atomic drivers which don't subclass struct &drm_plane_state should use
+        * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
+        * state structure to extend it with driver-private state should use
+        * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is
+        * duplicated in a consistent fashion across drivers.
+        *
+        * It is an error to call this hook before plane->state has been
+        * initialized correctly.
+        *
+        * NOTE:
+        *
+        * If the duplicate state references refcounted resources this hook must
+        * acquire a reference for each of them. The driver must release these
+        * references again in @atomic_destroy_state.
+        *
+        * RETURNS:
+        *
+        * Duplicated atomic state or NULL when the allocation failed.
+        */
+       struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
+
+       /**
+        * @atomic_destroy_state:
+        *
+        * Destroy a state duplicated with @atomic_duplicate_state and release
+        * or unreference all resources it references
+        */
+       void (*atomic_destroy_state)(struct drm_plane *plane,
+                                    struct drm_plane_state *state);
+
+       /**
+        * @atomic_set_property:
+        *
+        * Decode a driver-private property value and store the decoded value
+        * into the passed-in state structure. Since the atomic core decodes all
+        * standardized properties (even for extensions beyond the core set of
+        * properties which might not be implemented by all drivers) this
+        * requires drivers to subclass the state structure.
+        *
+        * Such driver-private properties should really only be implemented for
+        * truly hardware/vendor specific state. Instead it is preferred to
+        * standardize atomic extension and decode the properties used to expose
+        * such an extension in the core.
+        *
+        * Do not call this function directly, use
+        * drm_atomic_plane_set_property() instead.
+        *
+        * This callback is optional if the driver does not support any
+        * driver-private atomic properties.
+        *
+        * NOTE:
+        *
+        * This function is called in the state assembly phase of atomic
+        * modesets, which can be aborted for any reason (including on
+        * userspace's request to just check whether a configuration would be
+        * possible). Drivers MUST NOT touch any persistent state (hardware or
+        * software) or data structures except the passed in @state parameter.
+        *
+        * Also since userspace controls in which order properties are set this
+        * function must not do any input validation (since the state update is
+        * incomplete and hence likely inconsistent). Instead any such input
+        * validation must be done in the various atomic_check callbacks.
+        *
+        * RETURNS:
+        *
+        * 0 if the property has been found, -EINVAL if the property isn't
+        * implemented by the driver (which shouldn't ever happen, the core only
+        * asks for properties attached to this plane). No other validation is
+        * allowed by the driver. The core already checks that the property
+        * value is within the range (integer, valid enum value, ...) the driver
+        * set when registering the property.
+        */
+       int (*atomic_set_property)(struct drm_plane *plane,
+                                  struct drm_plane_state *state,
+                                  struct drm_property *property,
+                                  uint64_t val);
+
+       /**
+        * @atomic_get_property:
+        *
+        * Reads out the decoded driver-private property. This is used to
+        * implement the GETPLANE IOCTL.
+        *
+        * Do not call this function directly, use
+        * drm_atomic_plane_get_property() instead.
+        *
+        * This callback is optional if the driver does not support any
+        * driver-private atomic properties.
+        *
+        * RETURNS:
+        *
+        * 0 on success, -EINVAL if the property isn't implemented by the
+        * driver (which should never happen, the core only asks for
+        * properties attached to this plane).
+        */
+       int (*atomic_get_property)(struct drm_plane *plane,
+                                  const struct drm_plane_state *state,
+                                  struct drm_property *property,
+                                  uint64_t *val);
+       /**
+        * @late_register:
+        *
+        * This optional hook can be used to register additional userspace
+        * interfaces attached to the plane like debugfs interfaces.
+        * It is called late in the driver load sequence from drm_dev_register().
+        * Everything added from this callback should be unregistered in
+        * the early_unregister callback.
+        *
+        * Returns:
+        *
+        * 0 on success, or a negative error code on failure.
+        */
+       int (*late_register)(struct drm_plane *plane);
+
+       /**
+        * @early_unregister:
+        *
+        * This optional hook should be used to unregister the additional
+        * userspace interfaces attached to the plane from
+        * late_unregister(). It is called from drm_dev_unregister(),
+        * early in the driver unload sequence to disable userspace access
+        * before data structures are torndown.
+        */
+       void (*early_unregister)(struct drm_plane *plane);
+};
+
+/**
+ * enum drm_plane_type - uapi plane type enumeration
+ *
+ * For historical reasons not all planes are made the same. This enumeration is
+ * used to tell the different types of planes apart to implement the different
+ * uapi semantics for them. For userspace which is universal plane aware and
+ * which is using that atomic IOCTL there's no difference between these planes
+ * (beyong what the driver and hardware can support of course).
+ *
+ * For compatibility with legacy userspace, only overlay planes are made
+ * available to userspace by default. Userspace clients may set the
+ * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
+ * wish to receive a universal plane list containing all plane types. See also
+ * drm_for_each_legacy_plane().
+ *
+ * WARNING: The values of this enum is UABI since they're exposed in the "type"
+ * property.
+ */
+enum drm_plane_type {
+       /**
+        * @DRM_PLANE_TYPE_OVERLAY:
+        *
+        * Overlay planes represent all non-primary, non-cursor planes. Some
+        * drivers refer to these types of planes as "sprites" internally.
+        */
+       DRM_PLANE_TYPE_OVERLAY,
+
+       /**
+        * @DRM_PLANE_TYPE_PRIMARY:
+        *
+        * Primary planes represent a "main" plane for a CRTC.  Primary planes
+        * are the planes operated upon by CRTC modesetting and flipping
+        * operations described in the page_flip and set_config hooks in struct
+        * &drm_crtc_funcs.
+        */
+       DRM_PLANE_TYPE_PRIMARY,
+
+       /**
+        * @DRM_PLANE_TYPE_CURSOR:
+        *
+        * Cursor planes represent a "cursor" plane for a CRTC.  Cursor planes
+        * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and
+        * DRM_IOCTL_MODE_CURSOR2 IOCTLs.
+        */
+       DRM_PLANE_TYPE_CURSOR,
+};
+
+
+/**
+ * struct drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @name: human readable name, can be overwritten by the driver
+ * @base: base mode object
+ * @possible_crtcs: pipes this plane can be bound to
+ * @format_types: array of formats supported by this plane
+ * @format_count: number of formats supported
+ * @format_default: driver hasn't supplied supported formats for the plane
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
+ * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
+ *     drm_mode_set_config_internal() to implement correct refcounting.
+ * @funcs: helper functions
+ * @properties: property tracking for this plane
+ * @type: type of plane (overlay, primary, cursor)
+ * @state: current atomic state for this plane
+ * @zpos_property: zpos property for this plane
+ * @helper_private: mid-layer private data
+ */
+struct drm_plane {
+       struct drm_device *dev;
+       struct list_head head;
+
+       char *name;
+
+       /**
+        * @mutex:
+        *
+        * Protects modeset plane state, together with the mutex of &drm_crtc
+        * this plane is linked to (when active, getting actived or getting
+        * disabled).
+        */
+       struct drm_modeset_lock mutex;
+
+       struct drm_mode_object base;
+
+       uint32_t possible_crtcs;
+       uint32_t *format_types;
+       unsigned int format_count;
+       bool format_default;
+
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+
+       struct drm_framebuffer *old_fb;
+
+       const struct drm_plane_funcs *funcs;
+
+       struct drm_object_properties properties;
+
+       enum drm_plane_type type;
+
+       /**
+        * @index: Position inside the mode_config.list, can be used as an array
+        * index. It is invariant over the lifetime of the plane.
+        */
+       unsigned index;
+
+       const struct drm_plane_helper_funcs *helper_private;
+
+       struct drm_plane_state *state;
+
+       struct drm_property *zpos_property;
+};
+
+#define obj_to_plane(x) container_of(x, struct drm_plane, base)
+
+extern __printf(8, 9)
+int drm_universal_plane_init(struct drm_device *dev,
+                            struct drm_plane *plane,
+                            unsigned long possible_crtcs,
+                            const struct drm_plane_funcs *funcs,
+                            const uint32_t *formats,
+                            unsigned int format_count,
+                            enum drm_plane_type type,
+                            const char *name, ...);
+extern int drm_plane_init(struct drm_device *dev,
+                         struct drm_plane *plane,
+                         unsigned long possible_crtcs,
+                         const struct drm_plane_funcs *funcs,
+                         const uint32_t *formats, unsigned int format_count,
+                         bool is_primary);
+extern void drm_plane_cleanup(struct drm_plane *plane);
+
+/**
+ * drm_plane_index - find the index of a registered plane
+ * @plane: plane to find index for
+ *
+ * Given a registered plane, return the index of that plane within a DRM
+ * device's list of planes.
+ */
+static inline unsigned int drm_plane_index(struct drm_plane *plane)
+{
+       return plane->index;
+}
+extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
+extern void drm_plane_force_disable(struct drm_plane *plane);
+
+int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
+                                      struct drm_property *property,
+                                      uint64_t value);
+
+/**
+ * drm_plane_find - find a &drm_plane
+ * @dev: DRM device
+ * @id: plane id
+ *
+ * Returns the plane with @id, NULL if it doesn't exist. Simple wrapper around
+ * drm_mode_object_find().
+ */
+static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
+               uint32_t id)
+{
+       struct drm_mode_object *mo;
+       mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
+       return mo ? obj_to_plane(mo) : NULL;
+}
+
+/**
+ * drm_for_each_plane_mask - iterate over planes specified by bitmask
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ * @plane_mask: bitmask of plane indices
+ *
+ * Iterate over all planes specified by bitmask.
+ */
+#define drm_for_each_plane_mask(plane, dev, plane_mask) \
+       list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
+               for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
+
+/**
+ * drm_for_each_legacy_plane - iterate over all planes for legacy userspace
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all legacy planes of @dev, excluding primary and cursor planes.
+ * This is useful for implementing userspace apis when userspace is not
+ * universal plane aware. See also enum &drm_plane_type.
+ */
+#define drm_for_each_legacy_plane(plane, dev) \
+       list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
+               for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+
+/**
+ * drm_for_each_plane - iterate over all planes
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all planes of @dev, include primary and cursor planes.
+ */
+#define drm_for_each_plane(plane, dev) \
+       list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+
+
+#endif
index 30ab289..43c4b6a 100644 (file)
@@ -219,6 +219,7 @@ struct drm_prop_enum_list {
 };
 
 #define obj_to_property(x) container_of(x, struct drm_property, base)
+#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
 
 /**
  * drm_property_type_is - check the type of a property
index afba6fc..9c03895 100644 (file)
  */
 
 #include <drm/drm_mm.h>
-#include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/rbtree.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 
+struct drm_file;
+
 struct drm_vma_offset_file {
        struct rb_node vm_rb;
-       struct file *vm_filp;
+       struct drm_file *vm_tag;
        unsigned long vm_count;
 };
 
@@ -60,10 +61,11 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
                           struct drm_vma_offset_node *node);
 
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
-void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
+void drm_vma_node_revoke(struct drm_vma_offset_node *node,
+                        struct drm_file *tag);
 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
-                            struct file *filp);
+                            struct drm_file *tag);
 
 /**
  * drm_vma_offset_exact_lookup_locked() - Look up node by exact address
@@ -214,9 +216,9 @@ static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
 /**
  * drm_vma_node_verify_access() - Access verification helper for TTM
  * @node: Offset node
- * @filp: Open-file
+ * @tag: Tag of file to check
  *
- * This checks whether @filp is granted access to @node. It is the same as
+ * This checks whether @tag is granted access to @node. It is the same as
  * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
  * verify_access() callbacks.
  *
@@ -224,9 +226,9 @@ static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
  * 0 if access is granted, -EACCES otherwise.
  */
 static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
-                                            struct file *filp)
+                                            struct drm_file *tag)
 {
-       return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
+       return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES;
 }
 
 #endif /* __DRM_VMA_MANAGER_H__ */
index 33466bf..0d5f426 100644 (file)
 #define INTEL_IVB_Q_IDS(info) \
        INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
 
-#define INTEL_HSW_D_IDS(info) \
+#define INTEL_HSW_IDS(info) \
        INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
        INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
        INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
        INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
        INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
        INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
-       INTEL_VGA_DEVICE(0x0D2E, info)  /* CRW GT3 reserved */ \
-
-#define INTEL_HSW_M_IDS(info) \
+       INTEL_VGA_DEVICE(0x0D2E, info),  /* CRW GT3 reserved */ \
        INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
        INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
        INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
        INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
        INTEL_VGA_DEVICE(0x0D26, info)  /* CRW GT3 mobile */
 
-#define INTEL_VLV_M_IDS(info) \
+#define INTEL_VLV_IDS(info) \
        INTEL_VGA_DEVICE(0x0f30, info), \
        INTEL_VGA_DEVICE(0x0f31, info), \
        INTEL_VGA_DEVICE(0x0f32, info), \
        INTEL_VGA_DEVICE(0x0f33, info), \
-       INTEL_VGA_DEVICE(0x0157, info)
-
-#define INTEL_VLV_D_IDS(info) \
+       INTEL_VGA_DEVICE(0x0157, info), \
        INTEL_VGA_DEVICE(0x0155, info)
 
-#define INTEL_BDW_GT12M_IDS(info)  \
+#define INTEL_BDW_GT12_IDS(info)  \
        INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
        INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
        INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
        INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
        INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
        INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
-       INTEL_VGA_DEVICE(0x161E, info)  /* GT2 ULX */
-
-#define INTEL_BDW_GT12D_IDS(info) \
+       INTEL_VGA_DEVICE(0x161E, info),  /* GT2 ULX */ \
        INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
        INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
        INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
        INTEL_VGA_DEVICE(0x161D, info)  /* GT2 Workstation */
 
-#define INTEL_BDW_GT3M_IDS(info) \
+#define INTEL_BDW_GT3_IDS(info) \
        INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
        INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
        INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
-       INTEL_VGA_DEVICE(0x162E, info)  /* ULX */
-
-#define INTEL_BDW_GT3D_IDS(info) \
+       INTEL_VGA_DEVICE(0x162E, info),  /* ULX */\
        INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
        INTEL_VGA_DEVICE(0x162D, info)  /* Workstation */
 
        INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
        INTEL_VGA_DEVICE(0x163D, info)  /* Workstation */
 
-#define INTEL_BDW_M_IDS(info) \
-       INTEL_BDW_GT12M_IDS(info), \
-       INTEL_BDW_GT3M_IDS(info), \
-       INTEL_BDW_RSVDM_IDS(info)
-
-#define INTEL_BDW_D_IDS(info) \
-       INTEL_BDW_GT12D_IDS(info), \
-       INTEL_BDW_GT3D_IDS(info), \
+#define INTEL_BDW_IDS(info) \
+       INTEL_BDW_GT12_IDS(info), \
+       INTEL_BDW_GT3_IDS(info), \
+       INTEL_BDW_RSVDM_IDS(info), \
+       INTEL_BDW_GT12_IDS(info), \
+       INTEL_BDW_GT3_IDS(info), \
        INTEL_BDW_RSVDD_IDS(info)
 
 #define INTEL_CHV_IDS(info) \
index 6f2c598..9eb940d 100644 (file)
@@ -45,37 +45,7 @@ struct ttm_bo_device;
 
 struct drm_mm_node;
 
-/**
- * struct ttm_place
- *
- * @fpfn:      first valid page frame number to put the object
- * @lpfn:      last valid page frame number to put the object
- * @flags:     memory domain and caching flags for the object
- *
- * Structure indicating a possible place to put an object.
- */
-struct ttm_place {
-       unsigned        fpfn;
-       unsigned        lpfn;
-       uint32_t        flags;
-};
-
-/**
- * struct ttm_placement
- *
- * @num_placement:     number of preferred placements
- * @placement:         preferred placements
- * @num_busy_placement:        number of preferred placements when need to evict buffer
- * @busy_placement:    preferred placements when need to evict buffer
- *
- * Structure indicating the placement you request for an object.
- */
-struct ttm_placement {
-       unsigned                num_placement;
-       const struct ttm_place  *placement;
-       unsigned                num_busy_placement;
-       const struct ttm_place  *busy_placement;
-};
+struct ttm_placement;
 
 /**
  * struct ttm_bus_placement
index c986fa7..4f0a921 100644 (file)
@@ -133,7 +133,6 @@ struct ttm_tt {
  * struct ttm_dma_tt
  *
  * @ttm: Base ttm_tt struct.
- * @cpu_address: The CPU address of the pages
  * @dma_address: The DMA (bus) addresses of the pages
  * @pages_list: used by some page allocation backend
  *
@@ -143,7 +142,6 @@ struct ttm_tt {
  */
 struct ttm_dma_tt {
        struct ttm_tt ttm;
-       void **cpu_address;
        dma_addr_t *dma_address;
        struct list_head pages_list;
 };
index 8ed44f9..932be0c 100644 (file)
@@ -30,6 +30,9 @@
 
 #ifndef _TTM_PLACEMENT_H_
 #define _TTM_PLACEMENT_H_
+
+#include <linux/types.h>
+
 /*
  * Memory regions for data placement.
  */
 #define TTM_PL_SYSTEM           0
 #define TTM_PL_TT               1
 #define TTM_PL_VRAM             2
-#define TTM_PL_PRIV0            3
-#define TTM_PL_PRIV1            4
-#define TTM_PL_PRIV2            5
-#define TTM_PL_PRIV3            6
-#define TTM_PL_PRIV4            7
-#define TTM_PL_PRIV5            8
-#define TTM_PL_SWAPPED          15
+#define TTM_PL_PRIV             3
 
 #define TTM_PL_FLAG_SYSTEM      (1 << TTM_PL_SYSTEM)
 #define TTM_PL_FLAG_TT          (1 << TTM_PL_TT)
 #define TTM_PL_FLAG_VRAM        (1 << TTM_PL_VRAM)
-#define TTM_PL_FLAG_PRIV0       (1 << TTM_PL_PRIV0)
-#define TTM_PL_FLAG_PRIV1       (1 << TTM_PL_PRIV1)
-#define TTM_PL_FLAG_PRIV2       (1 << TTM_PL_PRIV2)
-#define TTM_PL_FLAG_PRIV3       (1 << TTM_PL_PRIV3)
-#define TTM_PL_FLAG_PRIV4       (1 << TTM_PL_PRIV4)
-#define TTM_PL_FLAG_PRIV5       (1 << TTM_PL_PRIV5)
-#define TTM_PL_FLAG_SWAPPED     (1 << TTM_PL_SWAPPED)
+#define TTM_PL_FLAG_PRIV        (1 << TTM_PL_PRIV)
 #define TTM_PL_MASK_MEM         0x0000FFFF
 
 /*
@@ -72,7 +63,6 @@
 #define TTM_PL_FLAG_CACHED      (1 << 16)
 #define TTM_PL_FLAG_UNCACHED    (1 << 17)
 #define TTM_PL_FLAG_WC          (1 << 18)
-#define TTM_PL_FLAG_SHARED      (1 << 20)
 #define TTM_PL_FLAG_NO_EVICT    (1 << 21)
 #define TTM_PL_FLAG_TOPDOWN     (1 << 22)
 
 
 #define TTM_PL_MASK_MEMTYPE     (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
 
-/*
- * Access flags to be used for CPU- and GPU- mappings.
- * The idea is that the TTM synchronization mechanism will
- * allow concurrent READ access and exclusive write access.
- * Currently GPU- and CPU accesses are exclusive.
+/**
+ * struct ttm_place
+ *
+ * @fpfn:      first valid page frame number to put the object
+ * @lpfn:      last valid page frame number to put the object
+ * @flags:     memory domain and caching flags for the object
+ *
+ * Structure indicating a possible place to put an object.
  */
+struct ttm_place {
+       unsigned        fpfn;
+       unsigned        lpfn;
+       uint32_t        flags;
+};
 
-#define TTM_ACCESS_READ         (1 << 0)
-#define TTM_ACCESS_WRITE        (1 << 1)
+/**
+ * struct ttm_placement
+ *
+ * @num_placement:     number of preferred placements
+ * @placement:         preferred placements
+ * @num_busy_placement:        number of preferred placements when need to evict buffer
+ * @busy_placement:    preferred placements when need to evict buffer
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+       unsigned                num_placement;
+       const struct ttm_place  *placement;
+       unsigned                num_busy_placement;
+       const struct ttm_place  *busy_placement;
+};
 
 #endif
index ae2845f..d6b5a21 100644 (file)
@@ -649,6 +649,7 @@ struct drm_amdgpu_info_hw_ip {
  * Supported GPU families
  */
 #define AMDGPU_FAMILY_UNKNOWN                  0
+#define AMDGPU_FAMILY_SI                       110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */
 #define AMDGPU_FAMILY_CI                       120 /* Bonaire, Hawaii */
 #define AMDGPU_FAMILY_KV                       125 /* Kaveri, Kabini, Mullins */
 #define AMDGPU_FAMILY_VI                       130 /* Iceland, Tonga */
index 5501fe8..03725fe 100644 (file)
@@ -387,6 +387,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_EXEC_SOFTPIN     37
 #define I915_PARAM_HAS_POOLED_EU        38
 #define I915_PARAM_MIN_EU_IN_POOL       39
+#define I915_PARAM_MMAP_GTT_VERSION     40
 
 typedef struct drm_i915_getparam {
        __s32 param;
index 413303d..5b287d6 100644 (file)
@@ -85,15 +85,12 @@ struct sync_file_info {
 #define SYNC_IOC_MERGE         _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
 
 /**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ * DOC: SYNC_IOC_FILE_INFO - get detailed information on a sync_file
  *
- * Takes a struct sync_file_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len.  On return, len is
- * updated to reflect the total size of the sync_file_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
+ * Takes a struct sync_file_info. If num_fences is 0, the field is updated
+ * with the actual number of fences. If num_fences is > 0, the system will
+ * use the pointer provided on sync_fence_info to return up to num_fences of
+ * struct sync_fence_info, with detailed fence information.
  */
 #define SYNC_IOC_FILE_INFO     _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
 
diff --git a/include/video/imx-ipu-image-convert.h b/include/video/imx-ipu-image-convert.h
new file mode 100644 (file)
index 0000000..7b87efc
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2012-2016 Mentor Graphics Inc.
+ *
+ * i.MX Queued image conversion support, with tiling and rotation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#ifndef __IMX_IPU_IMAGE_CONVERT_H__
+#define __IMX_IPU_IMAGE_CONVERT_H__
+
+#include <video/imx-ipu-v3.h>
+
+struct ipu_image_convert_ctx;
+
+/**
+ * struct ipu_image_convert_run - image conversion run request struct
+ *
+ * @ctx:       the conversion context
+ * @in_phys:   dma addr of input image buffer for this run
+ * @out_phys:  dma addr of output image buffer for this run
+ * @status:    completion status of this run
+ */
+struct ipu_image_convert_run {
+       struct ipu_image_convert_ctx *ctx;
+
+       dma_addr_t in_phys;
+       dma_addr_t out_phys;
+
+       int status;
+
+       /* internal to image converter, callers don't touch */
+       struct list_head list;
+};
+
+/**
+ * ipu_image_convert_cb_t - conversion callback function prototype
+ *
+ * @run:       the completed conversion run pointer
+ * @ctx:       a private context pointer for the callback
+ */
+typedef void (*ipu_image_convert_cb_t)(struct ipu_image_convert_run *run,
+                                      void *ctx);
+
+/**
+ * ipu_image_convert_enum_format() - enumerate the image converter's
+ *     supported input and output pixel formats.
+ *
+ * @index:     pixel format index
+ * @fourcc:    v4l2 fourcc for this index
+ *
+ * Returns 0 with a valid index and fills in v4l2 fourcc, -EINVAL otherwise.
+ *
+ * In V4L2, drivers can call ipu_image_enum_format() in .enum_fmt.
+ */
+int ipu_image_convert_enum_format(int index, u32 *fourcc);
+
+/**
+ * ipu_image_convert_adjust() - adjust input/output images to IPU restrictions.
+ *
+ * @in:                input image format, adjusted on return
+ * @out:       output image format, adjusted on return
+ * @rot_mode:  rotation mode
+ *
+ * In V4L2, drivers can call ipu_image_convert_adjust() in .try_fmt.
+ */
+void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
+                             enum ipu_rotate_mode rot_mode);
+
+/**
+ * ipu_image_convert_verify() - verify that input/output image formats
+ *         and rotation mode meet IPU restrictions.
+ *
+ * @in:                input image format
+ * @out:       output image format
+ * @rot_mode:  rotation mode
+ *
+ * Returns 0 if the formats and rotation mode meet IPU restrictions,
+ * -EINVAL otherwise.
+ */
+int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
+                            enum ipu_rotate_mode rot_mode);
+
+/**
+ * ipu_image_convert_prepare() - prepare a conversion context.
+ *
+ * @ipu:       the IPU handle to use for the conversions
+ * @ic_task:   the IC task to use for the conversions
+ * @in:                input image format
+ * @out:       output image format
+ * @rot_mode:  rotation mode
+ * @complete:  run completion callback
+ * @complete_context:  a context pointer for the completion callback
+ *
+ * Returns an opaque conversion context pointer on success, error pointer
+ * on failure. The input/output formats and rotation mode must already meet
+ * IPU retrictions.
+ *
+ * In V4L2, drivers should call ipu_image_convert_prepare() at streamon.
+ */
+struct ipu_image_convert_ctx *
+ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+                         struct ipu_image *in, struct ipu_image *out,
+                         enum ipu_rotate_mode rot_mode,
+                         ipu_image_convert_cb_t complete,
+                         void *complete_context);
+
+/**
+ * ipu_image_convert_unprepare() - unprepare a conversion context.
+ *
+ * @ctx: the conversion context pointer to unprepare
+ *
+ * Aborts any active or pending conversions for this context and
+ * frees the context. Any currently active or pending runs belonging
+ * to this context are returned via the completion callback with an
+ * error run status.
+ *
+ * In V4L2, drivers should call ipu_image_convert_unprepare() at
+ * streamoff.
+ */
+void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx);
+
+/**
+ * ipu_image_convert_queue() - queue a conversion run
+ *
+ * @run: the run request pointer
+ *
+ * ipu_image_convert_run must be dynamically allocated (_not_ as a local
+ * var) by callers and filled in with a previously prepared conversion
+ * context handle and the dma addr's of the input and output image buffers
+ * for this conversion run.
+ *
+ * When this conversion completes, the run pointer is returned via the
+ * completion callback. The caller is responsible for freeing the run
+ * object after it completes.
+ *
+ * In V4L2, drivers should call ipu_image_convert_queue() while
+ * streaming to queue the conversion of a received input buffer.
+ * For example mem2mem devices this would be called in .device_run.
+ */
+int ipu_image_convert_queue(struct ipu_image_convert_run *run);
+
+/**
+ * ipu_image_convert_abort() - abort conversions
+ *
+ * @ctx: the conversion context pointer
+ *
+ * This will abort any active or pending conversions for this context.
+ * Any currently active or pending runs belonging to this context are
+ * returned via the completion callback with an error run status.
+ */
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx);
+
+/**
+ * ipu_image_convert() - asynchronous image conversion request
+ *
+ * @ipu:       the IPU handle to use for the conversion
+ * @ic_task:   the IC task to use for the conversion
+ * @in:                input image format
+ * @out:       output image format
+ * @rot_mode:  rotation mode
+ * @complete:  run completion callback
+ * @complete_context:  a context pointer for the completion callback
+ *
+ * Request a single image conversion. Returns the run that has been queued.
+ * A conversion context is automatically created and is available in run->ctx.
+ * As with ipu_image_convert_prepare(), the input/output formats and rotation
+ * mode must already meet IPU retrictions.
+ *
+ * On successful return the caller can queue more run requests if needed, using
+ * the prepared context in run->ctx. The caller is responsible for unpreparing
+ * the context when no more conversion requests are needed.
+ */
+struct ipu_image_convert_run *
+ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+                 struct ipu_image *in, struct ipu_image *out,
+                 enum ipu_rotate_mode rot_mode,
+                 ipu_image_convert_cb_t complete,
+                 void *complete_context);
+
+/**
+ * ipu_image_convert_sync() - synchronous single image conversion request
+ *
+ * @ipu:       the IPU handle to use for the conversion
+ * @ic_task:   the IC task to use for the conversion
+ * @in:                input image format
+ * @out:       output image format
+ * @rot_mode:  rotation mode
+ *
+ * Carry out a single image conversion. Returns when the conversion
+ * completes. The input/output formats and rotation mode must already
+ * meet IPU retrictions. The created context is automatically unprepared
+ * and the run freed on return.
+ */
+int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+                          struct ipu_image *in, struct ipu_image *out,
+                          enum ipu_rotate_mode rot_mode);
+
+
+#endif /* __IMX_IPU_IMAGE_CONVERT_H__ */
index c3de740..173073e 100644 (file)
@@ -63,23 +63,41 @@ enum ipu_csi_dest {
 /*
  * Enumeration of IPU rotation modes
  */
+#define IPU_ROT_BIT_VFLIP (1 << 0)
+#define IPU_ROT_BIT_HFLIP (1 << 1)
+#define IPU_ROT_BIT_90    (1 << 2)
+
 enum ipu_rotate_mode {
        IPU_ROTATE_NONE = 0,
-       IPU_ROTATE_VERT_FLIP,
-       IPU_ROTATE_HORIZ_FLIP,
-       IPU_ROTATE_180,
-       IPU_ROTATE_90_RIGHT,
-       IPU_ROTATE_90_RIGHT_VFLIP,
-       IPU_ROTATE_90_RIGHT_HFLIP,
-       IPU_ROTATE_90_LEFT,
+       IPU_ROTATE_VERT_FLIP = IPU_ROT_BIT_VFLIP,
+       IPU_ROTATE_HORIZ_FLIP = IPU_ROT_BIT_HFLIP,
+       IPU_ROTATE_180 = (IPU_ROT_BIT_VFLIP | IPU_ROT_BIT_HFLIP),
+       IPU_ROTATE_90_RIGHT = IPU_ROT_BIT_90,
+       IPU_ROTATE_90_RIGHT_VFLIP = (IPU_ROT_BIT_90 | IPU_ROT_BIT_VFLIP),
+       IPU_ROTATE_90_RIGHT_HFLIP = (IPU_ROT_BIT_90 | IPU_ROT_BIT_HFLIP),
+       IPU_ROTATE_90_LEFT = (IPU_ROT_BIT_90 |
+                             IPU_ROT_BIT_VFLIP | IPU_ROT_BIT_HFLIP),
 };
 
+/* 90-degree rotations require the IRT unit */
+#define ipu_rot_mode_is_irt(m) (((m) & IPU_ROT_BIT_90) != 0)
+
 enum ipu_color_space {
        IPUV3_COLORSPACE_RGB,
        IPUV3_COLORSPACE_YUV,
        IPUV3_COLORSPACE_UNKNOWN,
 };
 
+/*
+ * Enumeration of VDI MOTION select
+ */
+enum ipu_motion_sel {
+       MOTION_NONE = 0,
+       LOW_MOTION,
+       MED_MOTION,
+       HIGH_MOTION,
+};
+
 struct ipuv3_channel;
 
 enum ipu_channel_irq {
@@ -97,6 +115,14 @@ enum ipu_channel_irq {
 #define IPUV3_CHANNEL_CSI2                      2
 #define IPUV3_CHANNEL_CSI3                      3
 #define IPUV3_CHANNEL_VDI_MEM_IC_VF             5
+/*
+ * NOTE: channels 6,7 are unused in the IPU and are not IDMAC channels,
+ * but the direct CSI->VDI linking is handled the same way as IDMAC
+ * channel linking in the FSU via the IPU_FS_PROC_FLOW registers, so
+ * these channel names are used to support the direct CSI->VDI link.
+ */
+#define IPUV3_CHANNEL_CSI_DIRECT                6
+#define IPUV3_CHANNEL_CSI_VDI_PREV              7
 #define IPUV3_CHANNEL_MEM_VDI_PREV              8
 #define IPUV3_CHANNEL_MEM_VDI_CUR               9
 #define IPUV3_CHANNEL_MEM_VDI_NEXT             10
@@ -133,6 +159,7 @@ enum ipu_channel_irq {
 #define IPUV3_CHANNEL_ROT_PP_MEM               50
 #define IPUV3_CHANNEL_MEM_BG_SYNC_ALPHA                51
 #define IPUV3_CHANNEL_MEM_BG_ASYNC_ALPHA       52
+#define IPUV3_NUM_CHANNELS                     64
 
 int ipu_map_irq(struct ipu_soc *ipu, int irq);
 int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
@@ -176,6 +203,10 @@ int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel);
 bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num);
 void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num);
 void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num);
+int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch);
+int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch);
+int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink);
+int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink);
 
 /*
  * IPU Channel Parameter Memory (cpmem) functions
@@ -334,6 +365,19 @@ struct ipu_ic *ipu_ic_get(struct ipu_soc *ipu, enum ipu_ic_task task);
 void ipu_ic_put(struct ipu_ic *ic);
 void ipu_ic_dump(struct ipu_ic *ic);
 
+/*
+ * IPU Video De-Interlacer (vdi) functions
+ */
+struct ipu_vdi;
+void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field);
+void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel);
+void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres);
+void ipu_vdi_unsetup(struct ipu_vdi *vdi);
+int ipu_vdi_enable(struct ipu_vdi *vdi);
+int ipu_vdi_disable(struct ipu_vdi *vdi);
+struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu);
+void ipu_vdi_put(struct ipu_vdi *vdi);
+
 /*
  * IPU Sensor Multiple FIFO Controller (SMFC) functions
  */