339d8041075f8ce8d8bc67b5a7858509f3afd2dc
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_lrc.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *    Michel Thierry <michel.thierry@intel.com>
26  *    Thomas Daniel <thomas.daniel@intel.com>
27  *    Oscar Mateo <oscar.mateo@intel.com>
28  *
29  */
30
31 /**
32  * DOC: Logical Rings, Logical Ring Contexts and Execlists
33  *
34  * Motivation:
35  * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36  * These expanded contexts enable a number of new abilities, especially
37  * "Execlists" (also implemented in this file).
38  *
39  * One of the main differences with the legacy HW contexts is that logical
40  * ring contexts incorporate many more things to the context's state, like
41  * PDPs or ringbuffer control registers:
42  *
43  * The reason why PDPs are included in the context is straightforward: as
44  * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45  * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46  * instead, the GPU will do it for you on the context switch.
47  *
48  * But, what about the ringbuffer control registers (head, tail, etc..)?
49  * shouldn't we just need a set of those per engine command streamer? This is
50  * where the name "Logical Rings" starts to make sense: by virtualizing the
51  * rings, the engine cs shifts to a new "ring buffer" with every context
52  * switch. When you want to submit a workload to the GPU you: A) choose your
53  * context, B) find its appropriate virtualized ring, C) write commands to it
54  * and then, finally, D) tell the GPU to switch to that context.
55  *
56  * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57  * to a contexts is via a context execution list, ergo "Execlists".
58  *
59  * LRC implementation:
60  * Regarding the creation of contexts, we have:
61  *
62  * - One global default context.
63  * - One local default context for each opened fd.
64  * - One local extra context for each context create ioctl call.
65  *
66  * Now that ringbuffers belong per-context (and not per-engine, like before)
67  * and that contexts are uniquely tied to a given engine (and not reusable,
68  * like before) we need:
69  *
70  * - One ringbuffer per-engine inside each context.
71  * - One backing object per-engine inside each context.
72  *
73  * The global default context starts its life with these new objects fully
74  * allocated and populated. The local default context for each opened fd is
75  * more complex, because we don't know at creation time which engine is going
76  * to use them. To handle this, we have implemented a deferred creation of LR
77  * contexts:
78  *
79  * The local context starts its life as a hollow or blank holder, that only
80  * gets populated for a given engine once we receive an execbuffer. If later
81  * on we receive another execbuffer ioctl for the same context but a different
82  * engine, we allocate/populate a new ringbuffer and context backing object and
83  * so on.
84  *
85  * Finally, regarding local contexts created using the ioctl call: as they are
86  * only allowed with the render ring, we can allocate & populate them right
87  * away (no need to defer anything, at least for now).
88  *
89  * Execlists implementation:
90  * Execlists are the new method by which, on gen8+ hardware, workloads are
91  * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92  * This method works as follows:
93  *
94  * When a request is committed, its commands (the BB start and any leading or
95  * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96  * for the appropriate context. The tail pointer in the hardware context is not
97  * updated at this time, but instead, kept by the driver in the ringbuffer
98  * structure. A structure representing this request is added to a request queue
99  * for the appropriate engine: this structure contains a copy of the context's
100  * tail after the request was written to the ring buffer and a pointer to the
101  * context itself.
102  *
103  * If the engine's request queue was empty before the request was added, the
104  * queue is processed immediately. Otherwise the queue will be processed during
105  * a context switch interrupt. In any case, elements on the queue will get sent
106  * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107  * globally unique 20-bits submission ID.
108  *
109  * When execution of a request completes, the GPU updates the context status
110  * buffer with a context complete event and generates a context switch interrupt.
111  * During the interrupt handling, the driver examines the events in the buffer:
112  * for each context complete event, if the announced ID matches that on the head
113  * of the request queue, then that request is retired and removed from the queue.
114  *
115  * After processing, if any requests were retired and the queue is not empty
116  * then a new execution list can be submitted. The two requests at the front of
117  * the queue are next to be submitted but since a context may not occur twice in
118  * an execution list, if subsequent requests have the same ID as the first then
119  * the two requests must be combined. This is done simply by discarding requests
120  * at the head of the queue until either only one requests is left (in which case
121  * we use a NULL second context) or the first two requests have unique IDs.
122  *
123  * By always executing the first two requests in the queue the driver ensures
124  * that the GPU is kept as busy as possible. In the case where a single context
125  * completes but a second context is still executing, the request for this second
126  * context will be at the head of the queue when we remove the first one. This
127  * request will then be resubmitted along with a new request for a different context,
128  * which will cause the hardware to continue executing the second request and queue
129  * the new request (the GPU detects the condition of a context getting preempted
130  * with the same context and optimizes the context switch flow by not doing
131  * preemption, but just sampling the new tail pointer).
132  *
133  */
134 #include <linux/interrupt.h>
135
136 #include <drm/drmP.h>
137 #include <drm/i915_drm.h>
138 #include "i915_drv.h"
139 #include "intel_mocs.h"
140
141 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
142 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
145 #define RING_EXECLIST_QFULL             (1 << 0x2)
146 #define RING_EXECLIST1_VALID            (1 << 0x3)
147 #define RING_EXECLIST0_VALID            (1 << 0x4)
148 #define RING_EXECLIST_ACTIVE_STATUS     (3 << 0xE)
149 #define RING_EXECLIST1_ACTIVE           (1 << 0x11)
150 #define RING_EXECLIST0_ACTIVE           (1 << 0x12)
151
152 #define GEN8_CTX_STATUS_IDLE_ACTIVE     (1 << 0)
153 #define GEN8_CTX_STATUS_PREEMPTED       (1 << 1)
154 #define GEN8_CTX_STATUS_ELEMENT_SWITCH  (1 << 2)
155 #define GEN8_CTX_STATUS_ACTIVE_IDLE     (1 << 3)
156 #define GEN8_CTX_STATUS_COMPLETE        (1 << 4)
157 #define GEN8_CTX_STATUS_LITE_RESTORE    (1 << 15)
158
159 #define CTX_LRI_HEADER_0                0x01
160 #define CTX_CONTEXT_CONTROL             0x02
161 #define CTX_RING_HEAD                   0x04
162 #define CTX_RING_TAIL                   0x06
163 #define CTX_RING_BUFFER_START           0x08
164 #define CTX_RING_BUFFER_CONTROL         0x0a
165 #define CTX_BB_HEAD_U                   0x0c
166 #define CTX_BB_HEAD_L                   0x0e
167 #define CTX_BB_STATE                    0x10
168 #define CTX_SECOND_BB_HEAD_U            0x12
169 #define CTX_SECOND_BB_HEAD_L            0x14
170 #define CTX_SECOND_BB_STATE             0x16
171 #define CTX_BB_PER_CTX_PTR              0x18
172 #define CTX_RCS_INDIRECT_CTX            0x1a
173 #define CTX_RCS_INDIRECT_CTX_OFFSET     0x1c
174 #define CTX_LRI_HEADER_1                0x21
175 #define CTX_CTX_TIMESTAMP               0x22
176 #define CTX_PDP3_UDW                    0x24
177 #define CTX_PDP3_LDW                    0x26
178 #define CTX_PDP2_UDW                    0x28
179 #define CTX_PDP2_LDW                    0x2a
180 #define CTX_PDP1_UDW                    0x2c
181 #define CTX_PDP1_LDW                    0x2e
182 #define CTX_PDP0_UDW                    0x30
183 #define CTX_PDP0_LDW                    0x32
184 #define CTX_LRI_HEADER_2                0x41
185 #define CTX_R_PWR_CLK_STATE             0x42
186 #define CTX_GPGPU_CSR_BASE_ADDRESS      0x44
187
188 #define GEN8_CTX_VALID (1<<0)
189 #define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
190 #define GEN8_CTX_FORCE_RESTORE (1<<2)
191 #define GEN8_CTX_L3LLC_COHERENT (1<<5)
192 #define GEN8_CTX_PRIVILEGE (1<<8)
193
194 #define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
195         (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
196         (reg_state)[(pos)+1] = (val); \
197 } while (0)
198
199 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do {                \
200         const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
201         reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
202         reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
203 } while (0)
204
205 #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
206         reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
207         reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
208 } while (0)
209
210 enum {
211         FAULT_AND_HANG = 0,
212         FAULT_AND_HALT, /* Debug only */
213         FAULT_AND_STREAM,
214         FAULT_AND_CONTINUE /* Unsupported */
215 };
216 #define GEN8_CTX_ID_SHIFT 32
217 #define GEN8_CTX_ID_WIDTH 21
218 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT        0x17
219 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT        0x26
220
221 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
222 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
223
224 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
225                                             struct intel_engine_cs *engine);
226 static int intel_lr_context_pin(struct i915_gem_context *ctx,
227                                 struct intel_engine_cs *engine);
228
229 /**
230  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
231  * @dev_priv: i915 device private
232  * @enable_execlists: value of i915.enable_execlists module parameter.
233  *
234  * Only certain platforms support Execlists (the prerequisites being
235  * support for Logical Ring Contexts and Aliasing PPGTT or better).
236  *
237  * Return: 1 if Execlists is supported and has to be enabled.
238  */
239 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
240 {
241         /* On platforms with execlist available, vGPU will only
242          * support execlist mode, no ring buffer mode.
243          */
244         if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
245                 return 1;
246
247         if (INTEL_GEN(dev_priv) >= 9)
248                 return 1;
249
250         if (enable_execlists == 0)
251                 return 0;
252
253         if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
254             USES_PPGTT(dev_priv) &&
255             i915.use_mmio_flip >= 0)
256                 return 1;
257
258         return 0;
259 }
260
261 static void
262 logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
263 {
264         struct drm_i915_private *dev_priv = engine->i915;
265
266         if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
267                 engine->idle_lite_restore_wa = ~0;
268
269         engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
270                                         IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
271                                         (engine->id == VCS || engine->id == VCS2);
272
273         engine->ctx_desc_template = GEN8_CTX_VALID;
274         if (IS_GEN8(dev_priv))
275                 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
276         engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
277
278         /* TODO: WaDisableLiteRestore when we start using semaphore
279          * signalling between Command Streamers */
280         /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
281
282         /* WaEnableForceRestoreInCtxtDescForVCS:skl */
283         /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
284         if (engine->disable_lite_restore_wa)
285                 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
286 }
287
288 /**
289  * intel_lr_context_descriptor_update() - calculate & cache the descriptor
290  *                                        descriptor for a pinned context
291  *
292  * @ctx: Context to work on
293  * @engine: Engine the descriptor will be used with
294  *
295  * The context descriptor encodes various attributes of a context,
296  * including its GTT address and some flags. Because it's fairly
297  * expensive to calculate, we'll just do it once and cache the result,
298  * which remains valid until the context is unpinned.
299  *
300  * This is what a descriptor looks like, from LSB to MSB:
301  *    bits  0-11:    flags, GEN8_CTX_* (cached in ctx_desc_template)
302  *    bits 12-31:    LRCA, GTT address of (the HWSP of) this context
303  *    bits 32-52:    ctx ID, a globally unique tag
304  *    bits 53-54:    mbz, reserved for use by hardware
305  *    bits 55-63:    group ID, currently unused and set to 0
306  */
307 static void
308 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
309                                    struct intel_engine_cs *engine)
310 {
311         struct intel_context *ce = &ctx->engine[engine->id];
312         u64 desc;
313
314         BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
315
316         desc = ctx->desc_template;                              /* bits  3-4  */
317         desc |= engine->ctx_desc_template;                      /* bits  0-11 */
318         desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
319                                                                 /* bits 12-31 */
320         desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;           /* bits 32-52 */
321
322         ce->lrc_desc = desc;
323 }
324
325 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
326                                      struct intel_engine_cs *engine)
327 {
328         return ctx->engine[engine->id].lrc_desc;
329 }
330
331 static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
332                                  struct drm_i915_gem_request *rq1)
333 {
334
335         struct intel_engine_cs *engine = rq0->engine;
336         struct drm_i915_private *dev_priv = rq0->i915;
337         uint64_t desc[2];
338
339         if (rq1) {
340                 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
341                 rq1->elsp_submitted++;
342         } else {
343                 desc[1] = 0;
344         }
345
346         desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
347         rq0->elsp_submitted++;
348
349         /* You must always write both descriptors in the order below. */
350         I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
351         I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
352
353         I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
354         /* The context is automatically loaded after the following */
355         I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
356
357         /* ELSP is a wo register, use another nearby reg for posting */
358         POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
359 }
360
361 static void
362 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
363 {
364         ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
365         ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
366         ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
367         ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
368 }
369
370 static void execlists_update_context(struct drm_i915_gem_request *rq)
371 {
372         struct intel_engine_cs *engine = rq->engine;
373         struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
374         uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
375
376         reg_state[CTX_RING_TAIL+1] = rq->tail;
377
378         /* True 32b PPGTT with dynamic page allocation: update PDP
379          * registers and point the unallocated PDPs to scratch page.
380          * PML4 is allocated during ppgtt init, so this is not needed
381          * in 48-bit mode.
382          */
383         if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
384                 execlists_update_context_pdps(ppgtt, reg_state);
385 }
386
387 static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
388                                       struct drm_i915_gem_request *rq1)
389 {
390         struct drm_i915_private *dev_priv = rq0->i915;
391         unsigned int fw_domains = rq0->engine->fw_domains;
392
393         execlists_update_context(rq0);
394
395         if (rq1)
396                 execlists_update_context(rq1);
397
398         spin_lock_irq(&dev_priv->uncore.lock);
399         intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
400
401         execlists_elsp_write(rq0, rq1);
402
403         intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
404         spin_unlock_irq(&dev_priv->uncore.lock);
405 }
406
407 static inline void execlists_context_status_change(
408                 struct drm_i915_gem_request *rq,
409                 unsigned long status)
410 {
411         /*
412          * Only used when GVT-g is enabled now. When GVT-g is disabled,
413          * The compiler should eliminate this function as dead-code.
414          */
415         if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
416                 return;
417
418         atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
419 }
420
421 static void execlists_context_unqueue(struct intel_engine_cs *engine)
422 {
423         struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
424         struct drm_i915_gem_request *cursor, *tmp;
425
426         assert_spin_locked(&engine->execlist_lock);
427
428         /*
429          * If irqs are not active generate a warning as batches that finish
430          * without the irqs may get lost and a GPU Hang may occur.
431          */
432         WARN_ON(!intel_irqs_enabled(engine->i915));
433
434         /* Try to read in pairs */
435         list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
436                                  execlist_link) {
437                 if (!req0) {
438                         req0 = cursor;
439                 } else if (req0->ctx == cursor->ctx) {
440                         /* Same ctx: ignore first request, as second request
441                          * will update tail past first request's workload */
442                         cursor->elsp_submitted = req0->elsp_submitted;
443                         list_del(&req0->execlist_link);
444                         i915_gem_request_unreference(req0);
445                         req0 = cursor;
446                 } else {
447                         if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
448                                 /*
449                                  * req0 (after merged) ctx requires single
450                                  * submission, stop picking
451                                  */
452                                 if (req0->ctx->execlists_force_single_submission)
453                                         break;
454                                 /*
455                                  * req0 ctx doesn't require single submission,
456                                  * but next req ctx requires, stop picking
457                                  */
458                                 if (cursor->ctx->execlists_force_single_submission)
459                                         break;
460                         }
461                         req1 = cursor;
462                         WARN_ON(req1->elsp_submitted);
463                         break;
464                 }
465         }
466
467         if (unlikely(!req0))
468                 return;
469
470         execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
471
472         if (req1)
473                 execlists_context_status_change(req1,
474                                                 INTEL_CONTEXT_SCHEDULE_IN);
475
476         if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
477                 /*
478                  * WaIdleLiteRestore: make sure we never cause a lite restore
479                  * with HEAD==TAIL.
480                  *
481                  * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
482                  * resubmit the request. See gen8_emit_request() for where we
483                  * prepare the padding after the end of the request.
484                  */
485                 struct intel_ringbuffer *ringbuf;
486
487                 ringbuf = req0->ctx->engine[engine->id].ringbuf;
488                 req0->tail += 8;
489                 req0->tail &= ringbuf->size - 1;
490         }
491
492         execlists_submit_requests(req0, req1);
493 }
494
495 static unsigned int
496 execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
497 {
498         struct drm_i915_gem_request *head_req;
499
500         assert_spin_locked(&engine->execlist_lock);
501
502         head_req = list_first_entry_or_null(&engine->execlist_queue,
503                                             struct drm_i915_gem_request,
504                                             execlist_link);
505
506         if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
507                return 0;
508
509         WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
510
511         if (--head_req->elsp_submitted > 0)
512                 return 0;
513
514         execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
515
516         list_del(&head_req->execlist_link);
517         i915_gem_request_unreference(head_req);
518
519         return 1;
520 }
521
522 static u32
523 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
524                    u32 *context_id)
525 {
526         struct drm_i915_private *dev_priv = engine->i915;
527         u32 status;
528
529         read_pointer %= GEN8_CSB_ENTRIES;
530
531         status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
532
533         if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
534                 return 0;
535
536         *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
537                                                               read_pointer));
538
539         return status;
540 }
541
542 /**
543  * intel_lrc_irq_handler() - handle Context Switch interrupts
544  * @data: tasklet handler passed in unsigned long
545  *
546  * Check the unread Context Status Buffers and manage the submission of new
547  * contexts to the ELSP accordingly.
548  */
549 static void intel_lrc_irq_handler(unsigned long data)
550 {
551         struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
552         struct drm_i915_private *dev_priv = engine->i915;
553         u32 status_pointer;
554         unsigned int read_pointer, write_pointer;
555         u32 csb[GEN8_CSB_ENTRIES][2];
556         unsigned int csb_read = 0, i;
557         unsigned int submit_contexts = 0;
558
559         intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
560
561         status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
562
563         read_pointer = engine->next_context_status_buffer;
564         write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
565         if (read_pointer > write_pointer)
566                 write_pointer += GEN8_CSB_ENTRIES;
567
568         while (read_pointer < write_pointer) {
569                 if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
570                         break;
571                 csb[csb_read][0] = get_context_status(engine, ++read_pointer,
572                                                       &csb[csb_read][1]);
573                 csb_read++;
574         }
575
576         engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
577
578         /* Update the read pointer to the old write pointer. Manual ringbuffer
579          * management ftw </sarcasm> */
580         I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
581                       _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
582                                     engine->next_context_status_buffer << 8));
583
584         intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
585
586         spin_lock(&engine->execlist_lock);
587
588         for (i = 0; i < csb_read; i++) {
589                 if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
590                         if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
591                                 if (execlists_check_remove_request(engine, csb[i][1]))
592                                         WARN(1, "Lite Restored request removed from queue\n");
593                         } else
594                                 WARN(1, "Preemption without Lite Restore\n");
595                 }
596
597                 if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
598                     GEN8_CTX_STATUS_ELEMENT_SWITCH))
599                         submit_contexts +=
600                                 execlists_check_remove_request(engine, csb[i][1]);
601         }
602
603         if (submit_contexts) {
604                 if (!engine->disable_lite_restore_wa ||
605                     (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
606                         execlists_context_unqueue(engine);
607         }
608
609         spin_unlock(&engine->execlist_lock);
610
611         if (unlikely(submit_contexts > 2))
612                 DRM_ERROR("More than two context complete events?\n");
613 }
614
615 static void execlists_context_queue(struct drm_i915_gem_request *request)
616 {
617         struct intel_engine_cs *engine = request->engine;
618         struct drm_i915_gem_request *cursor;
619         int num_elements = 0;
620
621         spin_lock_bh(&engine->execlist_lock);
622
623         list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
624                 if (++num_elements > 2)
625                         break;
626
627         if (num_elements > 2) {
628                 struct drm_i915_gem_request *tail_req;
629
630                 tail_req = list_last_entry(&engine->execlist_queue,
631                                            struct drm_i915_gem_request,
632                                            execlist_link);
633
634                 if (request->ctx == tail_req->ctx) {
635                         WARN(tail_req->elsp_submitted != 0,
636                                 "More than 2 already-submitted reqs queued\n");
637                         list_del(&tail_req->execlist_link);
638                         i915_gem_request_unreference(tail_req);
639                 }
640         }
641
642         i915_gem_request_reference(request);
643         list_add_tail(&request->execlist_link, &engine->execlist_queue);
644         request->ctx_hw_id = request->ctx->hw_id;
645         if (num_elements == 0)
646                 execlists_context_unqueue(engine);
647
648         spin_unlock_bh(&engine->execlist_lock);
649 }
650
651 static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
652 {
653         struct intel_engine_cs *engine = req->engine;
654         uint32_t flush_domains;
655         int ret;
656
657         flush_domains = 0;
658         if (engine->gpu_caches_dirty)
659                 flush_domains = I915_GEM_GPU_DOMAINS;
660
661         ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
662         if (ret)
663                 return ret;
664
665         engine->gpu_caches_dirty = false;
666         return 0;
667 }
668
669 static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
670                                  struct list_head *vmas)
671 {
672         const unsigned other_rings = ~intel_engine_flag(req->engine);
673         struct i915_vma *vma;
674         uint32_t flush_domains = 0;
675         bool flush_chipset = false;
676         int ret;
677
678         list_for_each_entry(vma, vmas, exec_list) {
679                 struct drm_i915_gem_object *obj = vma->obj;
680
681                 if (obj->active & other_rings) {
682                         ret = i915_gem_object_sync(obj, req->engine, &req);
683                         if (ret)
684                                 return ret;
685                 }
686
687                 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
688                         flush_chipset |= i915_gem_clflush_object(obj, false);
689
690                 flush_domains |= obj->base.write_domain;
691         }
692
693         if (flush_domains & I915_GEM_DOMAIN_GTT)
694                 wmb();
695
696         /* Unconditionally invalidate gpu caches and ensure that we do flush
697          * any residual writes from the previous batch.
698          */
699         return logical_ring_invalidate_all_caches(req);
700 }
701
702 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
703 {
704         struct intel_engine_cs *engine = request->engine;
705         struct intel_context *ce = &request->ctx->engine[engine->id];
706         int ret;
707
708         /* Flush enough space to reduce the likelihood of waiting after
709          * we start building the request - in which case we will just
710          * have to repeat work.
711          */
712         request->reserved_space += EXECLISTS_REQUEST_SIZE;
713
714         if (!ce->state) {
715                 ret = execlists_context_deferred_alloc(request->ctx, engine);
716                 if (ret)
717                         return ret;
718         }
719
720         request->ringbuf = ce->ringbuf;
721
722         if (i915.enable_guc_submission) {
723                 /*
724                  * Check that the GuC has space for the request before
725                  * going any further, as the i915_add_request() call
726                  * later on mustn't fail ...
727                  */
728                 ret = i915_guc_wq_check_space(request);
729                 if (ret)
730                         return ret;
731         }
732
733         ret = intel_lr_context_pin(request->ctx, engine);
734         if (ret)
735                 return ret;
736
737         ret = intel_ring_begin(request, 0);
738         if (ret)
739                 goto err_unpin;
740
741         if (!ce->initialised) {
742                 ret = engine->init_context(request);
743                 if (ret)
744                         goto err_unpin;
745
746                 ce->initialised = true;
747         }
748
749         /* Note that after this point, we have committed to using
750          * this request as it is being used to both track the
751          * state of engine initialisation and liveness of the
752          * golden renderstate above. Think twice before you try
753          * to cancel/unwind this request now.
754          */
755
756         request->reserved_space -= EXECLISTS_REQUEST_SIZE;
757         return 0;
758
759 err_unpin:
760         intel_lr_context_unpin(request->ctx, engine);
761         return ret;
762 }
763
764 /*
765  * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
766  * @request: Request to advance the logical ringbuffer of.
767  *
768  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
769  * really happens during submission is that the context and current tail will be placed
770  * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
771  * point, the tail *inside* the context is updated and the ELSP written to.
772  */
773 static int
774 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
775 {
776         struct intel_ringbuffer *ringbuf = request->ringbuf;
777         struct intel_engine_cs *engine = request->engine;
778
779         intel_logical_ring_advance(ringbuf);
780         request->tail = ringbuf->tail;
781
782         /*
783          * Here we add two extra NOOPs as padding to avoid
784          * lite restore of a context with HEAD==TAIL.
785          *
786          * Caller must reserve WA_TAIL_DWORDS for us!
787          */
788         intel_logical_ring_emit(ringbuf, MI_NOOP);
789         intel_logical_ring_emit(ringbuf, MI_NOOP);
790         intel_logical_ring_advance(ringbuf);
791
792         if (intel_engine_stopped(engine))
793                 return 0;
794
795         /* We keep the previous context alive until we retire the following
796          * request. This ensures that any the context object is still pinned
797          * for any residual writes the HW makes into it on the context switch
798          * into the next object following the breadcrumb. Otherwise, we may
799          * retire the context too early.
800          */
801         request->previous_context = engine->last_context;
802         engine->last_context = request->ctx;
803
804         if (i915.enable_guc_submission)
805                 i915_guc_submit(request);
806         else
807                 execlists_context_queue(request);
808
809         return 0;
810 }
811
812 /**
813  * execlists_submission() - submit a batchbuffer for execution, Execlists style
814  * @params: execbuffer call parameters.
815  * @args: execbuffer call arguments.
816  * @vmas: list of vmas.
817  *
818  * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
819  * away the submission details of the execbuffer ioctl call.
820  *
821  * Return: non-zero if the submission fails.
822  */
823 int intel_execlists_submission(struct i915_execbuffer_params *params,
824                                struct drm_i915_gem_execbuffer2 *args,
825                                struct list_head *vmas)
826 {
827         struct drm_device       *dev = params->dev;
828         struct intel_engine_cs *engine = params->engine;
829         struct drm_i915_private *dev_priv = dev->dev_private;
830         struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
831         u64 exec_start;
832         int instp_mode;
833         u32 instp_mask;
834         int ret;
835
836         instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
837         instp_mask = I915_EXEC_CONSTANTS_MASK;
838         switch (instp_mode) {
839         case I915_EXEC_CONSTANTS_REL_GENERAL:
840         case I915_EXEC_CONSTANTS_ABSOLUTE:
841         case I915_EXEC_CONSTANTS_REL_SURFACE:
842                 if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
843                         DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
844                         return -EINVAL;
845                 }
846
847                 if (instp_mode != dev_priv->relative_constants_mode) {
848                         if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
849                                 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
850                                 return -EINVAL;
851                         }
852
853                         /* The HW changed the meaning on this bit on gen6 */
854                         instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
855                 }
856                 break;
857         default:
858                 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
859                 return -EINVAL;
860         }
861
862         if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
863                 DRM_DEBUG("sol reset is gen7 only\n");
864                 return -EINVAL;
865         }
866
867         ret = execlists_move_to_gpu(params->request, vmas);
868         if (ret)
869                 return ret;
870
871         if (engine == &dev_priv->engine[RCS] &&
872             instp_mode != dev_priv->relative_constants_mode) {
873                 ret = intel_ring_begin(params->request, 4);
874                 if (ret)
875                         return ret;
876
877                 intel_logical_ring_emit(ringbuf, MI_NOOP);
878                 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
879                 intel_logical_ring_emit_reg(ringbuf, INSTPM);
880                 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
881                 intel_logical_ring_advance(ringbuf);
882
883                 dev_priv->relative_constants_mode = instp_mode;
884         }
885
886         exec_start = params->batch_obj_vm_offset +
887                      args->batch_start_offset;
888
889         ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
890         if (ret)
891                 return ret;
892
893         trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
894
895         i915_gem_execbuffer_move_to_active(vmas, params->request);
896
897         return 0;
898 }
899
900 void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
901 {
902         struct drm_i915_gem_request *req, *tmp;
903         LIST_HEAD(cancel_list);
904
905         WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
906
907         spin_lock_bh(&engine->execlist_lock);
908         list_replace_init(&engine->execlist_queue, &cancel_list);
909         spin_unlock_bh(&engine->execlist_lock);
910
911         list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
912                 list_del(&req->execlist_link);
913                 i915_gem_request_unreference(req);
914         }
915 }
916
917 void intel_logical_ring_stop(struct intel_engine_cs *engine)
918 {
919         struct drm_i915_private *dev_priv = engine->i915;
920         int ret;
921
922         if (!intel_engine_initialized(engine))
923                 return;
924
925         ret = intel_engine_idle(engine);
926         if (ret)
927                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
928                           engine->name, ret);
929
930         /* TODO: Is this correct with Execlists enabled? */
931         I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
932         if (intel_wait_for_register(dev_priv,
933                                     RING_MI_MODE(engine->mmio_base),
934                                     MODE_IDLE, MODE_IDLE,
935                                     1000)) {
936                 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
937                 return;
938         }
939         I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
940 }
941
942 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
943 {
944         struct intel_engine_cs *engine = req->engine;
945         int ret;
946
947         if (!engine->gpu_caches_dirty)
948                 return 0;
949
950         ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
951         if (ret)
952                 return ret;
953
954         engine->gpu_caches_dirty = false;
955         return 0;
956 }
957
958 static int intel_lr_context_pin(struct i915_gem_context *ctx,
959                                 struct intel_engine_cs *engine)
960 {
961         struct drm_i915_private *dev_priv = ctx->i915;
962         struct intel_context *ce = &ctx->engine[engine->id];
963         void *vaddr;
964         u32 *lrc_reg_state;
965         int ret;
966
967         lockdep_assert_held(&ctx->i915->dev->struct_mutex);
968
969         if (ce->pin_count++)
970                 return 0;
971
972         ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
973                                     PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
974         if (ret)
975                 goto err;
976
977         vaddr = i915_gem_object_pin_map(ce->state);
978         if (IS_ERR(vaddr)) {
979                 ret = PTR_ERR(vaddr);
980                 goto unpin_ctx_obj;
981         }
982
983         lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
984
985         ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
986         if (ret)
987                 goto unpin_map;
988
989         i915_gem_context_reference(ctx);
990         ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
991         intel_lr_context_descriptor_update(ctx, engine);
992
993         lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
994         ce->lrc_reg_state = lrc_reg_state;
995         ce->state->dirty = true;
996
997         /* Invalidate GuC TLB. */
998         if (i915.enable_guc_submission)
999                 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
1000
1001         return 0;
1002
1003 unpin_map:
1004         i915_gem_object_unpin_map(ce->state);
1005 unpin_ctx_obj:
1006         i915_gem_object_ggtt_unpin(ce->state);
1007 err:
1008         ce->pin_count = 0;
1009         return ret;
1010 }
1011
1012 void intel_lr_context_unpin(struct i915_gem_context *ctx,
1013                             struct intel_engine_cs *engine)
1014 {
1015         struct intel_context *ce = &ctx->engine[engine->id];
1016
1017         lockdep_assert_held(&ctx->i915->dev->struct_mutex);
1018         GEM_BUG_ON(ce->pin_count == 0);
1019
1020         if (--ce->pin_count)
1021                 return;
1022
1023         intel_unpin_ringbuffer_obj(ce->ringbuf);
1024
1025         i915_gem_object_unpin_map(ce->state);
1026         i915_gem_object_ggtt_unpin(ce->state);
1027
1028         ce->lrc_vma = NULL;
1029         ce->lrc_desc = 0;
1030         ce->lrc_reg_state = NULL;
1031
1032         i915_gem_context_unreference(ctx);
1033 }
1034
1035 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1036 {
1037         int ret, i;
1038         struct intel_engine_cs *engine = req->engine;
1039         struct intel_ringbuffer *ringbuf = req->ringbuf;
1040         struct i915_workarounds *w = &req->i915->workarounds;
1041
1042         if (w->count == 0)
1043                 return 0;
1044
1045         engine->gpu_caches_dirty = true;
1046         ret = logical_ring_flush_all_caches(req);
1047         if (ret)
1048                 return ret;
1049
1050         ret = intel_ring_begin(req, w->count * 2 + 2);
1051         if (ret)
1052                 return ret;
1053
1054         intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1055         for (i = 0; i < w->count; i++) {
1056                 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
1057                 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1058         }
1059         intel_logical_ring_emit(ringbuf, MI_NOOP);
1060
1061         intel_logical_ring_advance(ringbuf);
1062
1063         engine->gpu_caches_dirty = true;
1064         ret = logical_ring_flush_all_caches(req);
1065         if (ret)
1066                 return ret;
1067
1068         return 0;
1069 }
1070
1071 #define wa_ctx_emit(batch, index, cmd)                                  \
1072         do {                                                            \
1073                 int __index = (index)++;                                \
1074                 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
1075                         return -ENOSPC;                                 \
1076                 }                                                       \
1077                 batch[__index] = (cmd);                                 \
1078         } while (0)
1079
1080 #define wa_ctx_emit_reg(batch, index, reg) \
1081         wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
1082
1083 /*
1084  * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1085  * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1086  * but there is a slight complication as this is applied in WA batch where the
1087  * values are only initialized once so we cannot take register value at the
1088  * beginning and reuse it further; hence we save its value to memory, upload a
1089  * constant value with bit21 set and then we restore it back with the saved value.
1090  * To simplify the WA, a constant value is formed by using the default value
1091  * of this register. This shouldn't be a problem because we are only modifying
1092  * it for a short period and this batch in non-premptible. We can ofcourse
1093  * use additional instructions that read the actual value of the register
1094  * at that time and set our bit of interest but it makes the WA complicated.
1095  *
1096  * This WA is also required for Gen9 so extracting as a function avoids
1097  * code duplication.
1098  */
1099 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
1100                                                 uint32_t *const batch,
1101                                                 uint32_t index)
1102 {
1103         uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1104
1105         /*
1106          * WaDisableLSQCROPERFforOCL:skl,kbl
1107          * This WA is implemented in skl_init_clock_gating() but since
1108          * this batch updates GEN8_L3SQCREG4 with default value we need to
1109          * set this bit here to retain the WA during flush.
1110          */
1111         if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0) ||
1112             IS_KBL_REVID(engine->i915, 0, KBL_REVID_E0))
1113                 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1114
1115         wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
1116                                    MI_SRM_LRM_GLOBAL_GTT));
1117         wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1118         wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
1119         wa_ctx_emit(batch, index, 0);
1120
1121         wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1122         wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1123         wa_ctx_emit(batch, index, l3sqc4_flush);
1124
1125         wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1126         wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1127                                    PIPE_CONTROL_DC_FLUSH_ENABLE));
1128         wa_ctx_emit(batch, index, 0);
1129         wa_ctx_emit(batch, index, 0);
1130         wa_ctx_emit(batch, index, 0);
1131         wa_ctx_emit(batch, index, 0);
1132
1133         wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
1134                                    MI_SRM_LRM_GLOBAL_GTT));
1135         wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1136         wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
1137         wa_ctx_emit(batch, index, 0);
1138
1139         return index;
1140 }
1141
1142 static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1143                                     uint32_t offset,
1144                                     uint32_t start_alignment)
1145 {
1146         return wa_ctx->offset = ALIGN(offset, start_alignment);
1147 }
1148
1149 static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1150                              uint32_t offset,
1151                              uint32_t size_alignment)
1152 {
1153         wa_ctx->size = offset - wa_ctx->offset;
1154
1155         WARN(wa_ctx->size % size_alignment,
1156              "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1157              wa_ctx->size, size_alignment);
1158         return 0;
1159 }
1160
1161 /**
1162  * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1163  *
1164  * @engine: only applicable for RCS
1165  * @wa_ctx: structure representing wa_ctx
1166  *  offset: specifies start of the batch, should be cache-aligned. This is updated
1167  *    with the offset value received as input.
1168  *  size: size of the batch in DWORDS but HW expects in terms of cachelines
1169  * @batch: page in which WA are loaded
1170  * @offset: This field specifies the start of the batch, it should be
1171  *  cache-aligned otherwise it is adjusted accordingly.
1172  *  Typically we only have one indirect_ctx and per_ctx batch buffer which are
1173  *  initialized at the beginning and shared across all contexts but this field
1174  *  helps us to have multiple batches at different offsets and select them based
1175  *  on a criteria. At the moment this batch always start at the beginning of the page
1176  *  and at this point we don't have multiple wa_ctx batch buffers.
1177  *
1178  *  The number of WA applied are not known at the beginning; we use this field
1179  *  to return the no of DWORDS written.
1180  *
1181  *  It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1182  *  so it adds NOOPs as padding to make it cacheline aligned.
1183  *  MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1184  *  makes a complete batch buffer.
1185  *
1186  * Return: non-zero if we exceed the PAGE_SIZE limit.
1187  */
1188
1189 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1190                                     struct i915_wa_ctx_bb *wa_ctx,
1191                                     uint32_t *const batch,
1192                                     uint32_t *offset)
1193 {
1194         uint32_t scratch_addr;
1195         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1196
1197         /* WaDisableCtxRestoreArbitration:bdw,chv */
1198         wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1199
1200         /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1201         if (IS_BROADWELL(engine->i915)) {
1202                 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1203                 if (rc < 0)
1204                         return rc;
1205                 index = rc;
1206         }
1207
1208         /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1209         /* Actual scratch location is at 128 bytes offset */
1210         scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
1211
1212         wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1213         wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1214                                    PIPE_CONTROL_GLOBAL_GTT_IVB |
1215                                    PIPE_CONTROL_CS_STALL |
1216                                    PIPE_CONTROL_QW_WRITE));
1217         wa_ctx_emit(batch, index, scratch_addr);
1218         wa_ctx_emit(batch, index, 0);
1219         wa_ctx_emit(batch, index, 0);
1220         wa_ctx_emit(batch, index, 0);
1221
1222         /* Pad to end of cacheline */
1223         while (index % CACHELINE_DWORDS)
1224                 wa_ctx_emit(batch, index, MI_NOOP);
1225
1226         /*
1227          * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1228          * execution depends on the length specified in terms of cache lines
1229          * in the register CTX_RCS_INDIRECT_CTX
1230          */
1231
1232         return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1233 }
1234
1235 /**
1236  * gen8_init_perctx_bb() - initialize per ctx batch with WA
1237  *
1238  * @engine: only applicable for RCS
1239  * @wa_ctx: structure representing wa_ctx
1240  *  offset: specifies start of the batch, should be cache-aligned.
1241  *  size: size of the batch in DWORDS but HW expects in terms of cachelines
1242  * @batch: page in which WA are loaded
1243  * @offset: This field specifies the start of this batch.
1244  *   This batch is started immediately after indirect_ctx batch. Since we ensure
1245  *   that indirect_ctx ends on a cacheline this batch is aligned automatically.
1246  *
1247  *   The number of DWORDS written are returned using this field.
1248  *
1249  *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1250  *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1251  */
1252 static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
1253                                struct i915_wa_ctx_bb *wa_ctx,
1254                                uint32_t *const batch,
1255                                uint32_t *offset)
1256 {
1257         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1258
1259         /* WaDisableCtxRestoreArbitration:bdw,chv */
1260         wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1261
1262         wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1263
1264         return wa_ctx_end(wa_ctx, *offset = index, 1);
1265 }
1266
1267 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1268                                     struct i915_wa_ctx_bb *wa_ctx,
1269                                     uint32_t *const batch,
1270                                     uint32_t *offset)
1271 {
1272         int ret;
1273         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1274
1275         /* WaDisableCtxRestoreArbitration:skl,bxt */
1276         if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1277             IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1278                 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1279
1280         /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
1281         ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1282         if (ret < 0)
1283                 return ret;
1284         index = ret;
1285
1286         /* WaClearSlmSpaceAtContextSwitch:kbl */
1287         /* Actual scratch location is at 128 bytes offset */
1288         if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
1289                 uint32_t scratch_addr
1290                         = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
1291
1292                 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1293                 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1294                                            PIPE_CONTROL_GLOBAL_GTT_IVB |
1295                                            PIPE_CONTROL_CS_STALL |
1296                                            PIPE_CONTROL_QW_WRITE));
1297                 wa_ctx_emit(batch, index, scratch_addr);
1298                 wa_ctx_emit(batch, index, 0);
1299                 wa_ctx_emit(batch, index, 0);
1300                 wa_ctx_emit(batch, index, 0);
1301         }
1302         /* Pad to end of cacheline */
1303         while (index % CACHELINE_DWORDS)
1304                 wa_ctx_emit(batch, index, MI_NOOP);
1305
1306         return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1307 }
1308
1309 static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1310                                struct i915_wa_ctx_bb *wa_ctx,
1311                                uint32_t *const batch,
1312                                uint32_t *offset)
1313 {
1314         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1315
1316         /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1317         if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1318             IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1319                 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1320                 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1321                 wa_ctx_emit(batch, index,
1322                             _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1323                 wa_ctx_emit(batch, index, MI_NOOP);
1324         }
1325
1326         /* WaClearTdlStateAckDirtyBits:bxt */
1327         if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
1328                 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1329
1330                 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
1331                 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1332
1333                 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
1334                 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1335
1336                 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
1337                 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1338
1339                 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
1340                 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
1341                 wa_ctx_emit(batch, index, 0x0);
1342                 wa_ctx_emit(batch, index, MI_NOOP);
1343         }
1344
1345         /* WaDisableCtxRestoreArbitration:skl,bxt */
1346         if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1347             IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1348                 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1349
1350         wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1351
1352         return wa_ctx_end(wa_ctx, *offset = index, 1);
1353 }
1354
1355 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1356 {
1357         int ret;
1358
1359         engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
1360                                                    PAGE_ALIGN(size));
1361         if (IS_ERR(engine->wa_ctx.obj)) {
1362                 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1363                 ret = PTR_ERR(engine->wa_ctx.obj);
1364                 engine->wa_ctx.obj = NULL;
1365                 return ret;
1366         }
1367
1368         ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
1369         if (ret) {
1370                 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1371                                  ret);
1372                 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1373                 return ret;
1374         }
1375
1376         return 0;
1377 }
1378
1379 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
1380 {
1381         if (engine->wa_ctx.obj) {
1382                 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
1383                 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1384                 engine->wa_ctx.obj = NULL;
1385         }
1386 }
1387
1388 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1389 {
1390         int ret;
1391         uint32_t *batch;
1392         uint32_t offset;
1393         struct page *page;
1394         struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1395
1396         WARN_ON(engine->id != RCS);
1397
1398         /* update this when WA for higher Gen are added */
1399         if (INTEL_GEN(engine->i915) > 9) {
1400                 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1401                           INTEL_GEN(engine->i915));
1402                 return 0;
1403         }
1404
1405         /* some WA perform writes to scratch page, ensure it is valid */
1406         if (engine->scratch.obj == NULL) {
1407                 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
1408                 return -EINVAL;
1409         }
1410
1411         ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
1412         if (ret) {
1413                 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1414                 return ret;
1415         }
1416
1417         page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
1418         batch = kmap_atomic(page);
1419         offset = 0;
1420
1421         if (IS_GEN8(engine->i915)) {
1422                 ret = gen8_init_indirectctx_bb(engine,
1423                                                &wa_ctx->indirect_ctx,
1424                                                batch,
1425                                                &offset);
1426                 if (ret)
1427                         goto out;
1428
1429                 ret = gen8_init_perctx_bb(engine,
1430                                           &wa_ctx->per_ctx,
1431                                           batch,
1432                                           &offset);
1433                 if (ret)
1434                         goto out;
1435         } else if (IS_GEN9(engine->i915)) {
1436                 ret = gen9_init_indirectctx_bb(engine,
1437                                                &wa_ctx->indirect_ctx,
1438                                                batch,
1439                                                &offset);
1440                 if (ret)
1441                         goto out;
1442
1443                 ret = gen9_init_perctx_bb(engine,
1444                                           &wa_ctx->per_ctx,
1445                                           batch,
1446                                           &offset);
1447                 if (ret)
1448                         goto out;
1449         }
1450
1451 out:
1452         kunmap_atomic(batch);
1453         if (ret)
1454                 lrc_destroy_wa_ctx_obj(engine);
1455
1456         return ret;
1457 }
1458
1459 static void lrc_init_hws(struct intel_engine_cs *engine)
1460 {
1461         struct drm_i915_private *dev_priv = engine->i915;
1462
1463         I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1464                    (u32)engine->status_page.gfx_addr);
1465         POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1466 }
1467
1468 static int gen8_init_common_ring(struct intel_engine_cs *engine)
1469 {
1470         struct drm_i915_private *dev_priv = engine->i915;
1471         unsigned int next_context_status_buffer_hw;
1472
1473         lrc_init_hws(engine);
1474
1475         I915_WRITE_IMR(engine,
1476                        ~(engine->irq_enable_mask | engine->irq_keep_mask));
1477         I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1478
1479         I915_WRITE(RING_MODE_GEN7(engine),
1480                    _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1481                    _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1482         POSTING_READ(RING_MODE_GEN7(engine));
1483
1484         /*
1485          * Instead of resetting the Context Status Buffer (CSB) read pointer to
1486          * zero, we need to read the write pointer from hardware and use its
1487          * value because "this register is power context save restored".
1488          * Effectively, these states have been observed:
1489          *
1490          *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1491          * BDW  | CSB regs not reset       | CSB regs reset       |
1492          * CHT  | CSB regs not reset       | CSB regs not reset   |
1493          * SKL  |         ?                |         ?            |
1494          * BXT  |         ?                |         ?            |
1495          */
1496         next_context_status_buffer_hw =
1497                 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
1498
1499         /*
1500          * When the CSB registers are reset (also after power-up / gpu reset),
1501          * CSB write pointer is set to all 1's, which is not valid, use '5' in
1502          * this special case, so the first element read is CSB[0].
1503          */
1504         if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1505                 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1506
1507         engine->next_context_status_buffer = next_context_status_buffer_hw;
1508         DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1509
1510         intel_engine_init_hangcheck(engine);
1511
1512         return intel_mocs_init_engine(engine);
1513 }
1514
1515 static int gen8_init_render_ring(struct intel_engine_cs *engine)
1516 {
1517         struct drm_i915_private *dev_priv = engine->i915;
1518         int ret;
1519
1520         ret = gen8_init_common_ring(engine);
1521         if (ret)
1522                 return ret;
1523
1524         /* We need to disable the AsyncFlip performance optimisations in order
1525          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1526          * programmed to '1' on all products.
1527          *
1528          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1529          */
1530         I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1531
1532         I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1533
1534         return init_workarounds_ring(engine);
1535 }
1536
1537 static int gen9_init_render_ring(struct intel_engine_cs *engine)
1538 {
1539         int ret;
1540
1541         ret = gen8_init_common_ring(engine);
1542         if (ret)
1543                 return ret;
1544
1545         return init_workarounds_ring(engine);
1546 }
1547
1548 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1549 {
1550         struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1551         struct intel_engine_cs *engine = req->engine;
1552         struct intel_ringbuffer *ringbuf = req->ringbuf;
1553         const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1554         int i, ret;
1555
1556         ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
1557         if (ret)
1558                 return ret;
1559
1560         intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1561         for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1562                 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1563
1564                 intel_logical_ring_emit_reg(ringbuf,
1565                                             GEN8_RING_PDP_UDW(engine, i));
1566                 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
1567                 intel_logical_ring_emit_reg(ringbuf,
1568                                             GEN8_RING_PDP_LDW(engine, i));
1569                 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1570         }
1571
1572         intel_logical_ring_emit(ringbuf, MI_NOOP);
1573         intel_logical_ring_advance(ringbuf);
1574
1575         return 0;
1576 }
1577
1578 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1579                               u64 offset, unsigned dispatch_flags)
1580 {
1581         struct intel_ringbuffer *ringbuf = req->ringbuf;
1582         bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
1583         int ret;
1584
1585         /* Don't rely in hw updating PDPs, specially in lite-restore.
1586          * Ideally, we should set Force PD Restore in ctx descriptor,
1587          * but we can't. Force Restore would be a second option, but
1588          * it is unsafe in case of lite-restore (because the ctx is
1589          * not idle). PML4 is allocated during ppgtt init so this is
1590          * not needed in 48-bit.*/
1591         if (req->ctx->ppgtt &&
1592             (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1593                 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1594                     !intel_vgpu_active(req->i915)) {
1595                         ret = intel_logical_ring_emit_pdps(req);
1596                         if (ret)
1597                                 return ret;
1598                 }
1599
1600                 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
1601         }
1602
1603         ret = intel_ring_begin(req, 4);
1604         if (ret)
1605                 return ret;
1606
1607         /* FIXME(BDW): Address space and security selectors. */
1608         intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
1609                                 (ppgtt<<8) |
1610                                 (dispatch_flags & I915_DISPATCH_RS ?
1611                                  MI_BATCH_RESOURCE_STREAMER : 0));
1612         intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1613         intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1614         intel_logical_ring_emit(ringbuf, MI_NOOP);
1615         intel_logical_ring_advance(ringbuf);
1616
1617         return 0;
1618 }
1619
1620 static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
1621 {
1622         struct drm_i915_private *dev_priv = engine->i915;
1623         unsigned long flags;
1624
1625         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1626                 return false;
1627
1628         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1629         if (engine->irq_refcount++ == 0) {
1630                 I915_WRITE_IMR(engine,
1631                                ~(engine->irq_enable_mask | engine->irq_keep_mask));
1632                 POSTING_READ(RING_IMR(engine->mmio_base));
1633         }
1634         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1635
1636         return true;
1637 }
1638
1639 static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
1640 {
1641         struct drm_i915_private *dev_priv = engine->i915;
1642         unsigned long flags;
1643
1644         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1645         if (--engine->irq_refcount == 0) {
1646                 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1647                 POSTING_READ(RING_IMR(engine->mmio_base));
1648         }
1649         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1650 }
1651
1652 static int gen8_emit_flush(struct drm_i915_gem_request *request,
1653                            u32 invalidate_domains,
1654                            u32 unused)
1655 {
1656         struct intel_ringbuffer *ringbuf = request->ringbuf;
1657         struct intel_engine_cs *engine = ringbuf->engine;
1658         struct drm_i915_private *dev_priv = request->i915;
1659         uint32_t cmd;
1660         int ret;
1661
1662         ret = intel_ring_begin(request, 4);
1663         if (ret)
1664                 return ret;
1665
1666         cmd = MI_FLUSH_DW + 1;
1667
1668         /* We always require a command barrier so that subsequent
1669          * commands, such as breadcrumb interrupts, are strictly ordered
1670          * wrt the contents of the write cache being flushed to memory
1671          * (and thus being coherent from the CPU).
1672          */
1673         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1674
1675         if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1676                 cmd |= MI_INVALIDATE_TLB;
1677                 if (engine == &dev_priv->engine[VCS])
1678                         cmd |= MI_INVALIDATE_BSD;
1679         }
1680
1681         intel_logical_ring_emit(ringbuf, cmd);
1682         intel_logical_ring_emit(ringbuf,
1683                                 I915_GEM_HWS_SCRATCH_ADDR |
1684                                 MI_FLUSH_DW_USE_GTT);
1685         intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1686         intel_logical_ring_emit(ringbuf, 0); /* value */
1687         intel_logical_ring_advance(ringbuf);
1688
1689         return 0;
1690 }
1691
1692 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1693                                   u32 invalidate_domains,
1694                                   u32 flush_domains)
1695 {
1696         struct intel_ringbuffer *ringbuf = request->ringbuf;
1697         struct intel_engine_cs *engine = ringbuf->engine;
1698         u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1699         bool vf_flush_wa = false, dc_flush_wa = false;
1700         u32 flags = 0;
1701         int ret;
1702         int len;
1703
1704         flags |= PIPE_CONTROL_CS_STALL;
1705
1706         if (flush_domains) {
1707                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1708                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1709                 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
1710                 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1711         }
1712
1713         if (invalidate_domains) {
1714                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1715                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1716                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1717                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1718                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1719                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1720                 flags |= PIPE_CONTROL_QW_WRITE;
1721                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1722
1723                 /*
1724                  * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1725                  * pipe control.
1726                  */
1727                 if (IS_GEN9(request->i915))
1728                         vf_flush_wa = true;
1729
1730                 /* WaForGAMHang:kbl */
1731                 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1732                         dc_flush_wa = true;
1733         }
1734
1735         len = 6;
1736
1737         if (vf_flush_wa)
1738                 len += 6;
1739
1740         if (dc_flush_wa)
1741                 len += 12;
1742
1743         ret = intel_ring_begin(request, len);
1744         if (ret)
1745                 return ret;
1746
1747         if (vf_flush_wa) {
1748                 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1749                 intel_logical_ring_emit(ringbuf, 0);
1750                 intel_logical_ring_emit(ringbuf, 0);
1751                 intel_logical_ring_emit(ringbuf, 0);
1752                 intel_logical_ring_emit(ringbuf, 0);
1753                 intel_logical_ring_emit(ringbuf, 0);
1754         }
1755
1756         if (dc_flush_wa) {
1757                 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1758                 intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
1759                 intel_logical_ring_emit(ringbuf, 0);
1760                 intel_logical_ring_emit(ringbuf, 0);
1761                 intel_logical_ring_emit(ringbuf, 0);
1762                 intel_logical_ring_emit(ringbuf, 0);
1763         }
1764
1765         intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1766         intel_logical_ring_emit(ringbuf, flags);
1767         intel_logical_ring_emit(ringbuf, scratch_addr);
1768         intel_logical_ring_emit(ringbuf, 0);
1769         intel_logical_ring_emit(ringbuf, 0);
1770         intel_logical_ring_emit(ringbuf, 0);
1771
1772         if (dc_flush_wa) {
1773                 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1774                 intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
1775                 intel_logical_ring_emit(ringbuf, 0);
1776                 intel_logical_ring_emit(ringbuf, 0);
1777                 intel_logical_ring_emit(ringbuf, 0);
1778                 intel_logical_ring_emit(ringbuf, 0);
1779         }
1780
1781         intel_logical_ring_advance(ringbuf);
1782
1783         return 0;
1784 }
1785
1786 static u32 gen8_get_seqno(struct intel_engine_cs *engine)
1787 {
1788         return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1789 }
1790
1791 static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1792 {
1793         intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1794 }
1795
1796 static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
1797 {
1798         /*
1799          * On BXT A steppings there is a HW coherency issue whereby the
1800          * MI_STORE_DATA_IMM storing the completed request's seqno
1801          * occasionally doesn't invalidate the CPU cache. Work around this by
1802          * clflushing the corresponding cacheline whenever the caller wants
1803          * the coherency to be guaranteed. Note that this cacheline is known
1804          * to be clean at this point, since we only write it in
1805          * bxt_a_set_seqno(), where we also do a clflush after the write. So
1806          * this clflush in practice becomes an invalidate operation.
1807          */
1808         intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1809 }
1810
1811 static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1812 {
1813         intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1814
1815         /* See bxt_a_get_seqno() explaining the reason for the clflush. */
1816         intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1817 }
1818
1819 /*
1820  * Reserve space for 2 NOOPs at the end of each request to be
1821  * used as a workaround for not being allowed to do lite
1822  * restore with HEAD==TAIL (WaIdleLiteRestore).
1823  */
1824 #define WA_TAIL_DWORDS 2
1825
1826 static int gen8_emit_request(struct drm_i915_gem_request *request)
1827 {
1828         struct intel_ringbuffer *ringbuf = request->ringbuf;
1829         int ret;
1830
1831         ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
1832         if (ret)
1833                 return ret;
1834
1835         /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1836         BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
1837
1838         intel_logical_ring_emit(ringbuf,
1839                                 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1840         intel_logical_ring_emit(ringbuf,
1841                                 intel_hws_seqno_address(request->engine) |
1842                                 MI_FLUSH_DW_USE_GTT);
1843         intel_logical_ring_emit(ringbuf, 0);
1844         intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1845         intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1846         intel_logical_ring_emit(ringbuf, MI_NOOP);
1847         return intel_logical_ring_advance_and_submit(request);
1848 }
1849
1850 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1851 {
1852         struct intel_ringbuffer *ringbuf = request->ringbuf;
1853         int ret;
1854
1855         ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
1856         if (ret)
1857                 return ret;
1858
1859         /* We're using qword write, seqno should be aligned to 8 bytes. */
1860         BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1861
1862         /* w/a for post sync ops following a GPGPU operation we
1863          * need a prior CS_STALL, which is emitted by the flush
1864          * following the batch.
1865          */
1866         intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1867         intel_logical_ring_emit(ringbuf,
1868                                 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1869                                  PIPE_CONTROL_CS_STALL |
1870                                  PIPE_CONTROL_QW_WRITE));
1871         intel_logical_ring_emit(ringbuf,
1872                                 intel_hws_seqno_address(request->engine));
1873         intel_logical_ring_emit(ringbuf, 0);
1874         intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1875         /* We're thrashing one dword of HWS. */
1876         intel_logical_ring_emit(ringbuf, 0);
1877         intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1878         intel_logical_ring_emit(ringbuf, MI_NOOP);
1879         return intel_logical_ring_advance_and_submit(request);
1880 }
1881
1882 static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
1883 {
1884         struct render_state so;
1885         int ret;
1886
1887         ret = i915_gem_render_state_prepare(req->engine, &so);
1888         if (ret)
1889                 return ret;
1890
1891         if (so.rodata == NULL)
1892                 return 0;
1893
1894         ret = req->engine->emit_bb_start(req, so.ggtt_offset,
1895                                        I915_DISPATCH_SECURE);
1896         if (ret)
1897                 goto out;
1898
1899         ret = req->engine->emit_bb_start(req,
1900                                        (so.ggtt_offset + so.aux_batch_offset),
1901                                        I915_DISPATCH_SECURE);
1902         if (ret)
1903                 goto out;
1904
1905         i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
1906
1907 out:
1908         i915_gem_render_state_fini(&so);
1909         return ret;
1910 }
1911
1912 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
1913 {
1914         int ret;
1915
1916         ret = intel_logical_ring_workarounds_emit(req);
1917         if (ret)
1918                 return ret;
1919
1920         ret = intel_rcs_context_init_mocs(req);
1921         /*
1922          * Failing to program the MOCS is non-fatal.The system will not
1923          * run at peak performance. So generate an error and carry on.
1924          */
1925         if (ret)
1926                 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1927
1928         return intel_lr_context_render_state_init(req);
1929 }
1930
1931 /**
1932  * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1933  *
1934  * @engine: Engine Command Streamer.
1935  *
1936  */
1937 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1938 {
1939         struct drm_i915_private *dev_priv;
1940
1941         if (!intel_engine_initialized(engine))
1942                 return;
1943
1944         /*
1945          * Tasklet cannot be active at this point due intel_mark_active/idle
1946          * so this is just for documentation.
1947          */
1948         if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1949                 tasklet_kill(&engine->irq_tasklet);
1950
1951         dev_priv = engine->i915;
1952
1953         if (engine->buffer) {
1954                 intel_logical_ring_stop(engine);
1955                 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
1956         }
1957
1958         if (engine->cleanup)
1959                 engine->cleanup(engine);
1960
1961         i915_cmd_parser_fini_ring(engine);
1962         i915_gem_batch_pool_fini(&engine->batch_pool);
1963
1964         if (engine->status_page.obj) {
1965                 i915_gem_object_unpin_map(engine->status_page.obj);
1966                 engine->status_page.obj = NULL;
1967         }
1968         intel_lr_context_unpin(dev_priv->kernel_context, engine);
1969
1970         engine->idle_lite_restore_wa = 0;
1971         engine->disable_lite_restore_wa = false;
1972         engine->ctx_desc_template = 0;
1973
1974         lrc_destroy_wa_ctx_obj(engine);
1975         engine->i915 = NULL;
1976 }
1977
1978 static void
1979 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1980 {
1981         /* Default vfuncs which can be overriden by each engine. */
1982         engine->init_hw = gen8_init_common_ring;
1983         engine->emit_request = gen8_emit_request;
1984         engine->emit_flush = gen8_emit_flush;
1985         engine->irq_get = gen8_logical_ring_get_irq;
1986         engine->irq_put = gen8_logical_ring_put_irq;
1987         engine->emit_bb_start = gen8_emit_bb_start;
1988         engine->get_seqno = gen8_get_seqno;
1989         engine->set_seqno = gen8_set_seqno;
1990         if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1991                 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
1992                 engine->set_seqno = bxt_a_set_seqno;
1993         }
1994 }
1995
1996 static inline void
1997 logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
1998 {
1999         engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
2000         engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
2001         init_waitqueue_head(&engine->irq_queue);
2002 }
2003
2004 static int
2005 lrc_setup_hws(struct intel_engine_cs *engine,
2006               struct drm_i915_gem_object *dctx_obj)
2007 {
2008         void *hws;
2009
2010         /* The HWSP is part of the default context object in LRC mode. */
2011         engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
2012                                        LRC_PPHWSP_PN * PAGE_SIZE;
2013         hws = i915_gem_object_pin_map(dctx_obj);
2014         if (IS_ERR(hws))
2015                 return PTR_ERR(hws);
2016         engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
2017         engine->status_page.obj = dctx_obj;
2018
2019         return 0;
2020 }
2021
2022 static int
2023 logical_ring_init(struct intel_engine_cs *engine)
2024 {
2025         struct i915_gem_context *dctx = engine->i915->kernel_context;
2026         int ret;
2027
2028         ret = i915_cmd_parser_init_ring(engine);
2029         if (ret)
2030                 goto error;
2031
2032         ret = execlists_context_deferred_alloc(dctx, engine);
2033         if (ret)
2034                 goto error;
2035
2036         /* As this is the default context, always pin it */
2037         ret = intel_lr_context_pin(dctx, engine);
2038         if (ret) {
2039                 DRM_ERROR("Failed to pin context for %s: %d\n",
2040                           engine->name, ret);
2041                 goto error;
2042         }
2043
2044         /* And setup the hardware status page. */
2045         ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
2046         if (ret) {
2047                 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
2048                 goto error;
2049         }
2050
2051         return 0;
2052
2053 error:
2054         intel_logical_ring_cleanup(engine);
2055         return ret;
2056 }
2057
2058 static int logical_render_ring_init(struct intel_engine_cs *engine)
2059 {
2060         struct drm_i915_private *dev_priv = engine->i915;
2061         int ret;
2062
2063         if (HAS_L3_DPF(dev_priv))
2064                 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2065
2066         /* Override some for render ring. */
2067         if (INTEL_GEN(dev_priv) >= 9)
2068                 engine->init_hw = gen9_init_render_ring;
2069         else
2070                 engine->init_hw = gen8_init_render_ring;
2071         engine->init_context = gen8_init_rcs_context;
2072         engine->cleanup = intel_fini_pipe_control;
2073         engine->emit_flush = gen8_emit_flush_render;
2074         engine->emit_request = gen8_emit_request_render;
2075
2076         ret = intel_init_pipe_control(engine);
2077         if (ret)
2078                 return ret;
2079
2080         ret = intel_init_workaround_bb(engine);
2081         if (ret) {
2082                 /*
2083                  * We continue even if we fail to initialize WA batch
2084                  * because we only expect rare glitches but nothing
2085                  * critical to prevent us from using GPU
2086                  */
2087                 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2088                           ret);
2089         }
2090
2091         ret = logical_ring_init(engine);
2092         if (ret) {
2093                 lrc_destroy_wa_ctx_obj(engine);
2094         }
2095
2096         return ret;
2097 }
2098
2099 static const struct logical_ring_info {
2100         const char *name;
2101         unsigned exec_id;
2102         unsigned guc_id;
2103         u32 mmio_base;
2104         unsigned irq_shift;
2105         int (*init)(struct intel_engine_cs *engine);
2106 } logical_rings[] = {
2107         [RCS] = {
2108                 .name = "render ring",
2109                 .exec_id = I915_EXEC_RENDER,
2110                 .guc_id = GUC_RENDER_ENGINE,
2111                 .mmio_base = RENDER_RING_BASE,
2112                 .irq_shift = GEN8_RCS_IRQ_SHIFT,
2113                 .init = logical_render_ring_init,
2114         },
2115         [BCS] = {
2116                 .name = "blitter ring",
2117                 .exec_id = I915_EXEC_BLT,
2118                 .guc_id = GUC_BLITTER_ENGINE,
2119                 .mmio_base = BLT_RING_BASE,
2120                 .irq_shift = GEN8_BCS_IRQ_SHIFT,
2121                 .init = logical_ring_init,
2122         },
2123         [VCS] = {
2124                 .name = "bsd ring",
2125                 .exec_id = I915_EXEC_BSD,
2126                 .guc_id = GUC_VIDEO_ENGINE,
2127                 .mmio_base = GEN6_BSD_RING_BASE,
2128                 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
2129                 .init = logical_ring_init,
2130         },
2131         [VCS2] = {
2132                 .name = "bsd2 ring",
2133                 .exec_id = I915_EXEC_BSD,
2134                 .guc_id = GUC_VIDEO_ENGINE2,
2135                 .mmio_base = GEN8_BSD2_RING_BASE,
2136                 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
2137                 .init = logical_ring_init,
2138         },
2139         [VECS] = {
2140                 .name = "video enhancement ring",
2141                 .exec_id = I915_EXEC_VEBOX,
2142                 .guc_id = GUC_VIDEOENHANCE_ENGINE,
2143                 .mmio_base = VEBOX_RING_BASE,
2144                 .irq_shift = GEN8_VECS_IRQ_SHIFT,
2145                 .init = logical_ring_init,
2146         },
2147 };
2148
2149 static struct intel_engine_cs *
2150 logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
2151 {
2152         const struct logical_ring_info *info = &logical_rings[id];
2153         struct intel_engine_cs *engine = &dev_priv->engine[id];
2154         enum forcewake_domains fw_domains;
2155
2156         engine->id = id;
2157         engine->name = info->name;
2158         engine->exec_id = info->exec_id;
2159         engine->guc_id = info->guc_id;
2160         engine->mmio_base = info->mmio_base;
2161
2162         engine->i915 = dev_priv;
2163
2164         /* Intentionally left blank. */
2165         engine->buffer = NULL;
2166
2167         fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2168                                                     RING_ELSP(engine),
2169                                                     FW_REG_WRITE);
2170
2171         fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2172                                                      RING_CONTEXT_STATUS_PTR(engine),
2173                                                      FW_REG_READ | FW_REG_WRITE);
2174
2175         fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2176                                                      RING_CONTEXT_STATUS_BUF_BASE(engine),
2177                                                      FW_REG_READ);
2178
2179         engine->fw_domains = fw_domains;
2180
2181         INIT_LIST_HEAD(&engine->active_list);
2182         INIT_LIST_HEAD(&engine->request_list);
2183         INIT_LIST_HEAD(&engine->buffers);
2184         INIT_LIST_HEAD(&engine->execlist_queue);
2185         spin_lock_init(&engine->execlist_lock);
2186
2187         tasklet_init(&engine->irq_tasklet,
2188                      intel_lrc_irq_handler, (unsigned long)engine);
2189
2190         logical_ring_init_platform_invariants(engine);
2191         logical_ring_default_vfuncs(engine);
2192         logical_ring_default_irqs(engine, info->irq_shift);
2193
2194         intel_engine_init_hangcheck(engine);
2195         i915_gem_batch_pool_init(dev_priv->dev, &engine->batch_pool);
2196
2197         return engine;
2198 }
2199
2200 /**
2201  * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2202  * @dev: DRM device.
2203  *
2204  * This function inits the engines for an Execlists submission style (the
2205  * equivalent in the legacy ringbuffer submission world would be
2206  * i915_gem_init_engines). It does it only for those engines that are present in
2207  * the hardware.
2208  *
2209  * Return: non-zero if the initialization failed.
2210  */
2211 int intel_logical_rings_init(struct drm_device *dev)
2212 {
2213         struct drm_i915_private *dev_priv = dev->dev_private;
2214         unsigned int mask = 0;
2215         unsigned int i;
2216         int ret;
2217
2218         WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
2219                 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
2220
2221         for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
2222                 if (!HAS_ENGINE(dev_priv, i))
2223                         continue;
2224
2225                 if (!logical_rings[i].init)
2226                         continue;
2227
2228                 ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
2229                 if (ret)
2230                         goto cleanup;
2231
2232                 mask |= ENGINE_MASK(i);
2233         }
2234
2235         /*
2236          * Catch failures to update logical_rings table when the new engines
2237          * are added to the driver by a warning and disabling the forgotten
2238          * engines.
2239          */
2240         if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
2241                 struct intel_device_info *info =
2242                         (struct intel_device_info *)&dev_priv->info;
2243                 info->ring_mask = mask;
2244         }
2245
2246         return 0;
2247
2248 cleanup:
2249         for (i = 0; i < I915_NUM_ENGINES; i++)
2250                 intel_logical_ring_cleanup(&dev_priv->engine[i]);
2251
2252         return ret;
2253 }
2254
2255 static u32
2256 make_rpcs(struct drm_i915_private *dev_priv)
2257 {
2258         u32 rpcs = 0;
2259
2260         /*
2261          * No explicit RPCS request is needed to ensure full
2262          * slice/subslice/EU enablement prior to Gen9.
2263         */
2264         if (INTEL_GEN(dev_priv) < 9)
2265                 return 0;
2266
2267         /*
2268          * Starting in Gen9, render power gating can leave
2269          * slice/subslice/EU in a partially enabled state. We
2270          * must make an explicit request through RPCS for full
2271          * enablement.
2272         */
2273         if (INTEL_INFO(dev_priv)->has_slice_pg) {
2274                 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2275                 rpcs |= INTEL_INFO(dev_priv)->slice_total <<
2276                         GEN8_RPCS_S_CNT_SHIFT;
2277                 rpcs |= GEN8_RPCS_ENABLE;
2278         }
2279
2280         if (INTEL_INFO(dev_priv)->has_subslice_pg) {
2281                 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2282                 rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
2283                         GEN8_RPCS_SS_CNT_SHIFT;
2284                 rpcs |= GEN8_RPCS_ENABLE;
2285         }
2286
2287         if (INTEL_INFO(dev_priv)->has_eu_pg) {
2288                 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2289                         GEN8_RPCS_EU_MIN_SHIFT;
2290                 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2291                         GEN8_RPCS_EU_MAX_SHIFT;
2292                 rpcs |= GEN8_RPCS_ENABLE;
2293         }
2294
2295         return rpcs;
2296 }
2297
2298 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2299 {
2300         u32 indirect_ctx_offset;
2301
2302         switch (INTEL_GEN(engine->i915)) {
2303         default:
2304                 MISSING_CASE(INTEL_GEN(engine->i915));
2305                 /* fall through */
2306         case 9:
2307                 indirect_ctx_offset =
2308                         GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2309                 break;
2310         case 8:
2311                 indirect_ctx_offset =
2312                         GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2313                 break;
2314         }
2315
2316         return indirect_ctx_offset;
2317 }
2318
2319 static int
2320 populate_lr_context(struct i915_gem_context *ctx,
2321                     struct drm_i915_gem_object *ctx_obj,
2322                     struct intel_engine_cs *engine,
2323                     struct intel_ringbuffer *ringbuf)
2324 {
2325         struct drm_i915_private *dev_priv = ctx->i915;
2326         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2327         void *vaddr;
2328         u32 *reg_state;
2329         int ret;
2330
2331         if (!ppgtt)
2332                 ppgtt = dev_priv->mm.aliasing_ppgtt;
2333
2334         ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2335         if (ret) {
2336                 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2337                 return ret;
2338         }
2339
2340         vaddr = i915_gem_object_pin_map(ctx_obj);
2341         if (IS_ERR(vaddr)) {
2342                 ret = PTR_ERR(vaddr);
2343                 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2344                 return ret;
2345         }
2346         ctx_obj->dirty = true;
2347
2348         /* The second page of the context object contains some fields which must
2349          * be set up prior to the first execution. */
2350         reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2351
2352         /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2353          * commands followed by (reg, value) pairs. The values we are setting here are
2354          * only for the first context restore: on a subsequent save, the GPU will
2355          * recreate this batchbuffer with new values (including all the missing
2356          * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
2357         reg_state[CTX_LRI_HEADER_0] =
2358                 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2359         ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2360                        RING_CONTEXT_CONTROL(engine),
2361                        _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2362                                           CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2363                                           (HAS_RESOURCE_STREAMER(dev_priv) ?
2364                                             CTX_CTRL_RS_CTX_ENABLE : 0)));
2365         ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2366                        0);
2367         ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2368                        0);
2369         /* Ring buffer start address is not known until the buffer is pinned.
2370          * It is written to the context image in execlists_update_context()
2371          */
2372         ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2373                        RING_START(engine->mmio_base), 0);
2374         ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2375                        RING_CTL(engine->mmio_base),
2376                        ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
2377         ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2378                        RING_BBADDR_UDW(engine->mmio_base), 0);
2379         ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2380                        RING_BBADDR(engine->mmio_base), 0);
2381         ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2382                        RING_BBSTATE(engine->mmio_base),
2383                        RING_BB_PPGTT);
2384         ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2385                        RING_SBBADDR_UDW(engine->mmio_base), 0);
2386         ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2387                        RING_SBBADDR(engine->mmio_base), 0);
2388         ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2389                        RING_SBBSTATE(engine->mmio_base), 0);
2390         if (engine->id == RCS) {
2391                 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2392                                RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2393                 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2394                                RING_INDIRECT_CTX(engine->mmio_base), 0);
2395                 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2396                                RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2397                 if (engine->wa_ctx.obj) {
2398                         struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2399                         uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2400
2401                         reg_state[CTX_RCS_INDIRECT_CTX+1] =
2402                                 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2403                                 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2404
2405                         reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
2406                                 intel_lr_indirect_ctx_offset(engine) << 6;
2407
2408                         reg_state[CTX_BB_PER_CTX_PTR+1] =
2409                                 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2410                                 0x01;
2411                 }
2412         }
2413         reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2414         ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2415                        RING_CTX_TIMESTAMP(engine->mmio_base), 0);
2416         /* PDP values well be assigned later if needed */
2417         ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2418                        0);
2419         ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2420                        0);
2421         ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2422                        0);
2423         ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2424                        0);
2425         ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2426                        0);
2427         ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2428                        0);
2429         ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2430                        0);
2431         ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2432                        0);
2433
2434         if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2435                 /* 64b PPGTT (48bit canonical)
2436                  * PDP0_DESCRIPTOR contains the base address to PML4 and
2437                  * other PDP Descriptors are ignored.
2438                  */
2439                 ASSIGN_CTX_PML4(ppgtt, reg_state);
2440         } else {
2441                 /* 32b PPGTT
2442                  * PDP*_DESCRIPTOR contains the base address of space supported.
2443                  * With dynamic page allocation, PDPs may not be allocated at
2444                  * this point. Point the unallocated PDPs to the scratch page
2445                  */
2446                 execlists_update_context_pdps(ppgtt, reg_state);
2447         }
2448
2449         if (engine->id == RCS) {
2450                 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2451                 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2452                                make_rpcs(dev_priv));
2453         }
2454
2455         i915_gem_object_unpin_map(ctx_obj);
2456
2457         return 0;
2458 }
2459
2460 /**
2461  * intel_lr_context_size() - return the size of the context for an engine
2462  * @engine: which engine to find the context size for
2463  *
2464  * Each engine may require a different amount of space for a context image,
2465  * so when allocating (or copying) an image, this function can be used to
2466  * find the right size for the specific engine.
2467  *
2468  * Return: size (in bytes) of an engine-specific context image
2469  *
2470  * Note: this size includes the HWSP, which is part of the context image
2471  * in LRC mode, but does not include the "shared data page" used with
2472  * GuC submission. The caller should account for this if using the GuC.
2473  */
2474 uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2475 {
2476         int ret = 0;
2477
2478         WARN_ON(INTEL_GEN(engine->i915) < 8);
2479
2480         switch (engine->id) {
2481         case RCS:
2482                 if (INTEL_GEN(engine->i915) >= 9)
2483                         ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2484                 else
2485                         ret = GEN8_LR_CONTEXT_RENDER_SIZE;
2486                 break;
2487         case VCS:
2488         case BCS:
2489         case VECS:
2490         case VCS2:
2491                 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2492                 break;
2493         }
2494
2495         return ret;
2496 }
2497
2498 /**
2499  * execlists_context_deferred_alloc() - create the LRC specific bits of a context
2500  * @ctx: LR context to create.
2501  * @engine: engine to be used with the context.
2502  *
2503  * This function can be called more than once, with different engines, if we plan
2504  * to use the context with them. The context backing objects and the ringbuffers
2505  * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
2506  * the creation is a deferred call: it's better to make sure first that we need to use
2507  * a given ring with the context.
2508  *
2509  * Return: non-zero on error.
2510  */
2511 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2512                                             struct intel_engine_cs *engine)
2513 {
2514         struct drm_i915_gem_object *ctx_obj;
2515         struct intel_context *ce = &ctx->engine[engine->id];
2516         uint32_t context_size;
2517         struct intel_ringbuffer *ringbuf;
2518         int ret;
2519
2520         WARN_ON(ce->state);
2521
2522         context_size = round_up(intel_lr_context_size(engine), 4096);
2523
2524         /* One extra page as the sharing data between driver and GuC */
2525         context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2526
2527         ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
2528         if (IS_ERR(ctx_obj)) {
2529                 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2530                 return PTR_ERR(ctx_obj);
2531         }
2532
2533         ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
2534         if (IS_ERR(ringbuf)) {
2535                 ret = PTR_ERR(ringbuf);
2536                 goto error_deref_obj;
2537         }
2538
2539         ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
2540         if (ret) {
2541                 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2542                 goto error_ringbuf;
2543         }
2544
2545         ce->ringbuf = ringbuf;
2546         ce->state = ctx_obj;
2547         ce->initialised = engine->init_context == NULL;
2548
2549         return 0;
2550
2551 error_ringbuf:
2552         intel_ringbuffer_free(ringbuf);
2553 error_deref_obj:
2554         drm_gem_object_unreference(&ctx_obj->base);
2555         ce->ringbuf = NULL;
2556         ce->state = NULL;
2557         return ret;
2558 }
2559
2560 void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2561                             struct i915_gem_context *ctx)
2562 {
2563         struct intel_engine_cs *engine;
2564
2565         for_each_engine(engine, dev_priv) {
2566                 struct intel_context *ce = &ctx->engine[engine->id];
2567                 struct drm_i915_gem_object *ctx_obj = ce->state;
2568                 void *vaddr;
2569                 uint32_t *reg_state;
2570
2571                 if (!ctx_obj)
2572                         continue;
2573
2574                 vaddr = i915_gem_object_pin_map(ctx_obj);
2575                 if (WARN_ON(IS_ERR(vaddr)))
2576                         continue;
2577
2578                 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2579                 ctx_obj->dirty = true;
2580
2581                 reg_state[CTX_RING_HEAD+1] = 0;
2582                 reg_state[CTX_RING_TAIL+1] = 0;
2583
2584                 i915_gem_object_unpin_map(ctx_obj);
2585
2586                 ce->ringbuf->head = 0;
2587                 ce->ringbuf->tail = 0;
2588         }
2589 }