/*
* TTM.
*/
+
+#define AMDGPU_TTM_LRU_SIZE 20
+
+struct amdgpu_mman_lru {
+ struct list_head *lru[TTM_NUM_MEM_TYPES];
+ struct list_head *swap_lru;
+};
+
struct amdgpu_mman {
struct ttm_bo_global_ref bo_global_ref;
struct drm_global_reference mem_global_ref;
struct amdgpu_ring *buffer_funcs_ring;
/* Scheduler entity for buffer moves */
struct amd_sched_entity entity;
+
+ /* custom LRU management */
+ struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
};
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
+bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
+int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
+ struct fence *fence);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
unsigned num_gpu_pages;
unsigned num_cpu_pages;
unsigned table_size;
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
struct page **pages;
- dma_addr_t *pages_addr;
+#endif
bool ready;
const struct amdgpu_gart_funcs *gart_funcs;
};
AMDGPU_RING_TYPE_VCE
};
-extern struct amd_sched_backend_ops amdgpu_sched_ops;
+extern const struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job);
uint64_t addr;
};
-struct amdgpu_vm_id {
- struct amdgpu_vm_manager_id *mgr_id;
- uint64_t pd_gpu_addr;
- /* last flushed PD/PT update */
- struct fence *flushed_updates;
-};
-
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root va;
struct amdgpu_vm_pt *page_tables;
/* for id and flush management per ring */
- struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
+ struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
/* protecting freed */
spinlock_t freed_lock;
struct amd_sched_entity entity;
};
-struct amdgpu_vm_manager_id {
+struct amdgpu_vm_id {
struct list_head list;
- struct fence *active;
+ struct fence *first;
+ struct amdgpu_sync active;
+ struct fence *last_flush;
+ struct amdgpu_ring *last_user;
atomic_long_t owner;
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ struct fence *flushed_updates;
+
uint32_t gds_base;
uint32_t gds_size;
uint32_t gws_base;
struct mutex lock;
unsigned num_ids;
struct list_head ids_lru;
- struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM];
+ struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
uint32_t max_pfn;
/* vram base address for page table entry */
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
unsigned *vm_id, uint64_t *vm_pd_addr);
-void amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
+int amdgpu_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
*/
#include "clearstate_defs.h"
+struct amdgpu_rlc_funcs {
+ void (*enter_safe_mode)(struct amdgpu_device *adev);
+ void (*exit_safe_mode)(struct amdgpu_device *adev);
+};
+
struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
uint64_t cp_table_gpu_addr;
volatile uint32_t *cp_table_ptr;
u32 cp_table_size;
+
+ /* safe mode for updating CG/PG state */
+ bool in_safe_mode;
+ const struct amdgpu_rlc_funcs *funcs;
};
struct amdgpu_mec {
/*
* UVD
*/
-#define AMDGPU_MAX_UVD_HANDLES 10
-#define AMDGPU_UVD_STACK_SIZE (1024*1024)
-#define AMDGPU_UVD_HEAP_SIZE (1024*1024)
-#define AMDGPU_UVD_FIRMWARE_OFFSET 256
+#define AMDGPU_DEFAULT_UVD_HANDLES 10
+#define AMDGPU_MAX_UVD_HANDLES 40
+#define AMDGPU_UVD_STACK_SIZE (200*1024)
+#define AMDGPU_UVD_HEAP_SIZE (256*1024)
+#define AMDGPU_UVD_SESSION_SIZE (50*1024)
+#define AMDGPU_UVD_FIRMWARE_OFFSET 256
struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
void *saved_bo;
+ unsigned max_handles;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
* Debugfs
*/
struct amdgpu_debugfs {
- struct drm_info_list *files;
+ const struct drm_info_list *files;
unsigned num_files;
};
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- struct drm_info_list *files,
+ const struct drm_info_list *files,
unsigned nfiles);
int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
/*
* CGS
*/
-void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
-void amdgpu_cgs_destroy_device(void *cgs_device);
-
-
-/*
- * CGS
- */
-void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
-void amdgpu_cgs_destroy_device(void *cgs_device);
+struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
+void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
/* GPU virtualization */
* KMS
*/
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
-extern int amdgpu_max_kms_ioctl;
+extern const int amdgpu_max_kms_ioctl;
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
int amdgpu_driver_unload_kms(struct drm_device *dev);