Backmerge tag 'v4.7-rc2' into drm-next
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_lrc.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *    Michel Thierry <michel.thierry@intel.com>
26  *    Thomas Daniel <thomas.daniel@intel.com>
27  *    Oscar Mateo <oscar.mateo@intel.com>
28  *
29  */
30
31 /**
32  * DOC: Logical Rings, Logical Ring Contexts and Execlists
33  *
34  * Motivation:
35  * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36  * These expanded contexts enable a number of new abilities, especially
37  * "Execlists" (also implemented in this file).
38  *
39  * One of the main differences with the legacy HW contexts is that logical
40  * ring contexts incorporate many more things to the context's state, like
41  * PDPs or ringbuffer control registers:
42  *
43  * The reason why PDPs are included in the context is straightforward: as
44  * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45  * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46  * instead, the GPU will do it for you on the context switch.
47  *
48  * But, what about the ringbuffer control registers (head, tail, etc..)?
49  * shouldn't we just need a set of those per engine command streamer? This is
50  * where the name "Logical Rings" starts to make sense: by virtualizing the
51  * rings, the engine cs shifts to a new "ring buffer" with every context
52  * switch. When you want to submit a workload to the GPU you: A) choose your
53  * context, B) find its appropriate virtualized ring, C) write commands to it
54  * and then, finally, D) tell the GPU to switch to that context.
55  *
56  * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57  * to a contexts is via a context execution list, ergo "Execlists".
58  *
59  * LRC implementation:
60  * Regarding the creation of contexts, we have:
61  *
62  * - One global default context.
63  * - One local default context for each opened fd.
64  * - One local extra context for each context create ioctl call.
65  *
66  * Now that ringbuffers belong per-context (and not per-engine, like before)
67  * and that contexts are uniquely tied to a given engine (and not reusable,
68  * like before) we need:
69  *
70  * - One ringbuffer per-engine inside each context.
71  * - One backing object per-engine inside each context.
72  *
73  * The global default context starts its life with these new objects fully
74  * allocated and populated. The local default context for each opened fd is
75  * more complex, because we don't know at creation time which engine is going
76  * to use them. To handle this, we have implemented a deferred creation of LR
77  * contexts:
78  *
79  * The local context starts its life as a hollow or blank holder, that only
80  * gets populated for a given engine once we receive an execbuffer. If later
81  * on we receive another execbuffer ioctl for the same context but a different
82  * engine, we allocate/populate a new ringbuffer and context backing object and
83  * so on.
84  *
85  * Finally, regarding local contexts created using the ioctl call: as they are
86  * only allowed with the render ring, we can allocate & populate them right
87  * away (no need to defer anything, at least for now).
88  *
89  * Execlists implementation:
90  * Execlists are the new method by which, on gen8+ hardware, workloads are
91  * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92  * This method works as follows:
93  *
94  * When a request is committed, its commands (the BB start and any leading or
95  * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96  * for the appropriate context. The tail pointer in the hardware context is not
97  * updated at this time, but instead, kept by the driver in the ringbuffer
98  * structure. A structure representing this request is added to a request queue
99  * for the appropriate engine: this structure contains a copy of the context's
100  * tail after the request was written to the ring buffer and a pointer to the
101  * context itself.
102  *
103  * If the engine's request queue was empty before the request was added, the
104  * queue is processed immediately. Otherwise the queue will be processed during
105  * a context switch interrupt. In any case, elements on the queue will get sent
106  * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107  * globally unique 20-bits submission ID.
108  *
109  * When execution of a request completes, the GPU updates the context status
110  * buffer with a context complete event and generates a context switch interrupt.
111  * During the interrupt handling, the driver examines the events in the buffer:
112  * for each context complete event, if the announced ID matches that on the head
113  * of the request queue, then that request is retired and removed from the queue.
114  *
115  * After processing, if any requests were retired and the queue is not empty
116  * then a new execution list can be submitted. The two requests at the front of
117  * the queue are next to be submitted but since a context may not occur twice in
118  * an execution list, if subsequent requests have the same ID as the first then
119  * the two requests must be combined. This is done simply by discarding requests
120  * at the head of the queue until either only one requests is left (in which case
121  * we use a NULL second context) or the first two requests have unique IDs.
122  *
123  * By always executing the first two requests in the queue the driver ensures
124  * that the GPU is kept as busy as possible. In the case where a single context
125  * completes but a second context is still executing, the request for this second
126  * context will be at the head of the queue when we remove the first one. This
127  * request will then be resubmitted along with a new request for a different context,
128  * which will cause the hardware to continue executing the second request and queue
129  * the new request (the GPU detects the condition of a context getting preempted
130  * with the same context and optimizes the context switch flow by not doing
131  * preemption, but just sampling the new tail pointer).
132  *
133  */
134 #include <linux/interrupt.h>
135
136 #include <drm/drmP.h>
137 #include <drm/i915_drm.h>
138 #include "i915_drv.h"
139 #include "intel_mocs.h"
140
141 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
142 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
145 #define RING_EXECLIST_QFULL             (1 << 0x2)
146 #define RING_EXECLIST1_VALID            (1 << 0x3)
147 #define RING_EXECLIST0_VALID            (1 << 0x4)
148 #define RING_EXECLIST_ACTIVE_STATUS     (3 << 0xE)
149 #define RING_EXECLIST1_ACTIVE           (1 << 0x11)
150 #define RING_EXECLIST0_ACTIVE           (1 << 0x12)
151
152 #define GEN8_CTX_STATUS_IDLE_ACTIVE     (1 << 0)
153 #define GEN8_CTX_STATUS_PREEMPTED       (1 << 1)
154 #define GEN8_CTX_STATUS_ELEMENT_SWITCH  (1 << 2)
155 #define GEN8_CTX_STATUS_ACTIVE_IDLE     (1 << 3)
156 #define GEN8_CTX_STATUS_COMPLETE        (1 << 4)
157 #define GEN8_CTX_STATUS_LITE_RESTORE    (1 << 15)
158
159 #define CTX_LRI_HEADER_0                0x01
160 #define CTX_CONTEXT_CONTROL             0x02
161 #define CTX_RING_HEAD                   0x04
162 #define CTX_RING_TAIL                   0x06
163 #define CTX_RING_BUFFER_START           0x08
164 #define CTX_RING_BUFFER_CONTROL         0x0a
165 #define CTX_BB_HEAD_U                   0x0c
166 #define CTX_BB_HEAD_L                   0x0e
167 #define CTX_BB_STATE                    0x10
168 #define CTX_SECOND_BB_HEAD_U            0x12
169 #define CTX_SECOND_BB_HEAD_L            0x14
170 #define CTX_SECOND_BB_STATE             0x16
171 #define CTX_BB_PER_CTX_PTR              0x18
172 #define CTX_RCS_INDIRECT_CTX            0x1a
173 #define CTX_RCS_INDIRECT_CTX_OFFSET     0x1c
174 #define CTX_LRI_HEADER_1                0x21
175 #define CTX_CTX_TIMESTAMP               0x22
176 #define CTX_PDP3_UDW                    0x24
177 #define CTX_PDP3_LDW                    0x26
178 #define CTX_PDP2_UDW                    0x28
179 #define CTX_PDP2_LDW                    0x2a
180 #define CTX_PDP1_UDW                    0x2c
181 #define CTX_PDP1_LDW                    0x2e
182 #define CTX_PDP0_UDW                    0x30
183 #define CTX_PDP0_LDW                    0x32
184 #define CTX_LRI_HEADER_2                0x41
185 #define CTX_R_PWR_CLK_STATE             0x42
186 #define CTX_GPGPU_CSR_BASE_ADDRESS      0x44
187
188 #define GEN8_CTX_VALID (1<<0)
189 #define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
190 #define GEN8_CTX_FORCE_RESTORE (1<<2)
191 #define GEN8_CTX_L3LLC_COHERENT (1<<5)
192 #define GEN8_CTX_PRIVILEGE (1<<8)
193
194 #define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
195         (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
196         (reg_state)[(pos)+1] = (val); \
197 } while (0)
198
199 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do {                \
200         const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
201         reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
202         reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
203 } while (0)
204
205 #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
206         reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
207         reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
208 } while (0)
209
210 enum {
211         ADVANCED_CONTEXT = 0,
212         LEGACY_32B_CONTEXT,
213         ADVANCED_AD_CONTEXT,
214         LEGACY_64B_CONTEXT
215 };
216 #define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
217 #define GEN8_CTX_ADDRESSING_MODE(dev)  (USES_FULL_48BIT_PPGTT(dev) ?\
218                 LEGACY_64B_CONTEXT :\
219                 LEGACY_32B_CONTEXT)
220 enum {
221         FAULT_AND_HANG = 0,
222         FAULT_AND_HALT, /* Debug only */
223         FAULT_AND_STREAM,
224         FAULT_AND_CONTINUE /* Unsupported */
225 };
226 #define GEN8_CTX_ID_SHIFT 32
227 #define GEN8_CTX_ID_WIDTH 21
228 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT        0x17
229 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT        0x26
230
231 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
232 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
233
234 static int execlists_context_deferred_alloc(struct intel_context *ctx,
235                                             struct intel_engine_cs *engine);
236 static int intel_lr_context_pin(struct intel_context *ctx,
237                                 struct intel_engine_cs *engine);
238
239 /**
240  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
241  * @dev: DRM device.
242  * @enable_execlists: value of i915.enable_execlists module parameter.
243  *
244  * Only certain platforms support Execlists (the prerequisites being
245  * support for Logical Ring Contexts and Aliasing PPGTT or better).
246  *
247  * Return: 1 if Execlists is supported and has to be enabled.
248  */
249 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
250 {
251         /* On platforms with execlist available, vGPU will only
252          * support execlist mode, no ring buffer mode.
253          */
254         if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
255                 return 1;
256
257         if (INTEL_GEN(dev_priv) >= 9)
258                 return 1;
259
260         if (enable_execlists == 0)
261                 return 0;
262
263         if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
264             USES_PPGTT(dev_priv) &&
265             i915.use_mmio_flip >= 0)
266                 return 1;
267
268         return 0;
269 }
270
271 static void
272 logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
273 {
274         struct drm_i915_private *dev_priv = engine->i915;
275
276         if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
277                 engine->idle_lite_restore_wa = ~0;
278
279         engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
280                                         IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
281                                         (engine->id == VCS || engine->id == VCS2);
282
283         engine->ctx_desc_template = GEN8_CTX_VALID;
284         engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
285                                    GEN8_CTX_ADDRESSING_MODE_SHIFT;
286         if (IS_GEN8(dev_priv))
287                 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
288         engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
289
290         /* TODO: WaDisableLiteRestore when we start using semaphore
291          * signalling between Command Streamers */
292         /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
293
294         /* WaEnableForceRestoreInCtxtDescForVCS:skl */
295         /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
296         if (engine->disable_lite_restore_wa)
297                 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
298 }
299
300 /**
301  * intel_lr_context_descriptor_update() - calculate & cache the descriptor
302  *                                        descriptor for a pinned context
303  *
304  * @ctx: Context to work on
305  * @ring: Engine the descriptor will be used with
306  *
307  * The context descriptor encodes various attributes of a context,
308  * including its GTT address and some flags. Because it's fairly
309  * expensive to calculate, we'll just do it once and cache the result,
310  * which remains valid until the context is unpinned.
311  *
312  * This is what a descriptor looks like, from LSB to MSB:
313  *    bits  0-11:    flags, GEN8_CTX_* (cached in ctx_desc_template)
314  *    bits 12-31:    LRCA, GTT address of (the HWSP of) this context
315  *    bits 32-52:    ctx ID, a globally unique tag
316  *    bits 53-54:    mbz, reserved for use by hardware
317  *    bits 55-63:    group ID, currently unused and set to 0
318  */
319 static void
320 intel_lr_context_descriptor_update(struct intel_context *ctx,
321                                    struct intel_engine_cs *engine)
322 {
323         u64 desc;
324
325         BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
326
327         desc = engine->ctx_desc_template;                       /* bits  0-11 */
328         desc |= ctx->engine[engine->id].lrc_vma->node.start +   /* bits 12-31 */
329                LRC_PPHWSP_PN * PAGE_SIZE;
330         desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;           /* bits 32-52 */
331
332         ctx->engine[engine->id].lrc_desc = desc;
333 }
334
335 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
336                                      struct intel_engine_cs *engine)
337 {
338         return ctx->engine[engine->id].lrc_desc;
339 }
340
341 static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
342                                  struct drm_i915_gem_request *rq1)
343 {
344
345         struct intel_engine_cs *engine = rq0->engine;
346         struct drm_i915_private *dev_priv = rq0->i915;
347         uint64_t desc[2];
348
349         if (rq1) {
350                 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
351                 rq1->elsp_submitted++;
352         } else {
353                 desc[1] = 0;
354         }
355
356         desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
357         rq0->elsp_submitted++;
358
359         /* You must always write both descriptors in the order below. */
360         I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
361         I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
362
363         I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
364         /* The context is automatically loaded after the following */
365         I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
366
367         /* ELSP is a wo register, use another nearby reg for posting */
368         POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
369 }
370
371 static void
372 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
373 {
374         ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
375         ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
376         ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
377         ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
378 }
379
380 static void execlists_update_context(struct drm_i915_gem_request *rq)
381 {
382         struct intel_engine_cs *engine = rq->engine;
383         struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
384         uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
385
386         reg_state[CTX_RING_TAIL+1] = rq->tail;
387
388         /* True 32b PPGTT with dynamic page allocation: update PDP
389          * registers and point the unallocated PDPs to scratch page.
390          * PML4 is allocated during ppgtt init, so this is not needed
391          * in 48-bit mode.
392          */
393         if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
394                 execlists_update_context_pdps(ppgtt, reg_state);
395 }
396
397 static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
398                                       struct drm_i915_gem_request *rq1)
399 {
400         struct drm_i915_private *dev_priv = rq0->i915;
401         unsigned int fw_domains = rq0->engine->fw_domains;
402
403         execlists_update_context(rq0);
404
405         if (rq1)
406                 execlists_update_context(rq1);
407
408         spin_lock_irq(&dev_priv->uncore.lock);
409         intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
410
411         execlists_elsp_write(rq0, rq1);
412
413         intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
414         spin_unlock_irq(&dev_priv->uncore.lock);
415 }
416
417 static void execlists_context_unqueue(struct intel_engine_cs *engine)
418 {
419         struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
420         struct drm_i915_gem_request *cursor, *tmp;
421
422         assert_spin_locked(&engine->execlist_lock);
423
424         /*
425          * If irqs are not active generate a warning as batches that finish
426          * without the irqs may get lost and a GPU Hang may occur.
427          */
428         WARN_ON(!intel_irqs_enabled(engine->i915));
429
430         /* Try to read in pairs */
431         list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
432                                  execlist_link) {
433                 if (!req0) {
434                         req0 = cursor;
435                 } else if (req0->ctx == cursor->ctx) {
436                         /* Same ctx: ignore first request, as second request
437                          * will update tail past first request's workload */
438                         cursor->elsp_submitted = req0->elsp_submitted;
439                         list_del(&req0->execlist_link);
440                         i915_gem_request_unreference(req0);
441                         req0 = cursor;
442                 } else {
443                         req1 = cursor;
444                         WARN_ON(req1->elsp_submitted);
445                         break;
446                 }
447         }
448
449         if (unlikely(!req0))
450                 return;
451
452         if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
453                 /*
454                  * WaIdleLiteRestore: make sure we never cause a lite restore
455                  * with HEAD==TAIL.
456                  *
457                  * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
458                  * resubmit the request. See gen8_emit_request() for where we
459                  * prepare the padding after the end of the request.
460                  */
461                 struct intel_ringbuffer *ringbuf;
462
463                 ringbuf = req0->ctx->engine[engine->id].ringbuf;
464                 req0->tail += 8;
465                 req0->tail &= ringbuf->size - 1;
466         }
467
468         execlists_submit_requests(req0, req1);
469 }
470
471 static unsigned int
472 execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
473 {
474         struct drm_i915_gem_request *head_req;
475
476         assert_spin_locked(&engine->execlist_lock);
477
478         head_req = list_first_entry_or_null(&engine->execlist_queue,
479                                             struct drm_i915_gem_request,
480                                             execlist_link);
481
482         if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
483                return 0;
484
485         WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
486
487         if (--head_req->elsp_submitted > 0)
488                 return 0;
489
490         list_del(&head_req->execlist_link);
491         i915_gem_request_unreference(head_req);
492
493         return 1;
494 }
495
496 static u32
497 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
498                    u32 *context_id)
499 {
500         struct drm_i915_private *dev_priv = engine->i915;
501         u32 status;
502
503         read_pointer %= GEN8_CSB_ENTRIES;
504
505         status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
506
507         if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
508                 return 0;
509
510         *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
511                                                               read_pointer));
512
513         return status;
514 }
515
516 /**
517  * intel_lrc_irq_handler() - handle Context Switch interrupts
518  * @engine: Engine Command Streamer to handle.
519  *
520  * Check the unread Context Status Buffers and manage the submission of new
521  * contexts to the ELSP accordingly.
522  */
523 static void intel_lrc_irq_handler(unsigned long data)
524 {
525         struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
526         struct drm_i915_private *dev_priv = engine->i915;
527         u32 status_pointer;
528         unsigned int read_pointer, write_pointer;
529         u32 csb[GEN8_CSB_ENTRIES][2];
530         unsigned int csb_read = 0, i;
531         unsigned int submit_contexts = 0;
532
533         intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
534
535         status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
536
537         read_pointer = engine->next_context_status_buffer;
538         write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
539         if (read_pointer > write_pointer)
540                 write_pointer += GEN8_CSB_ENTRIES;
541
542         while (read_pointer < write_pointer) {
543                 if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
544                         break;
545                 csb[csb_read][0] = get_context_status(engine, ++read_pointer,
546                                                       &csb[csb_read][1]);
547                 csb_read++;
548         }
549
550         engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
551
552         /* Update the read pointer to the old write pointer. Manual ringbuffer
553          * management ftw </sarcasm> */
554         I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
555                       _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
556                                     engine->next_context_status_buffer << 8));
557
558         intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
559
560         spin_lock(&engine->execlist_lock);
561
562         for (i = 0; i < csb_read; i++) {
563                 if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
564                         if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
565                                 if (execlists_check_remove_request(engine, csb[i][1]))
566                                         WARN(1, "Lite Restored request removed from queue\n");
567                         } else
568                                 WARN(1, "Preemption without Lite Restore\n");
569                 }
570
571                 if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
572                     GEN8_CTX_STATUS_ELEMENT_SWITCH))
573                         submit_contexts +=
574                                 execlists_check_remove_request(engine, csb[i][1]);
575         }
576
577         if (submit_contexts) {
578                 if (!engine->disable_lite_restore_wa ||
579                     (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
580                         execlists_context_unqueue(engine);
581         }
582
583         spin_unlock(&engine->execlist_lock);
584
585         if (unlikely(submit_contexts > 2))
586                 DRM_ERROR("More than two context complete events?\n");
587 }
588
589 static void execlists_context_queue(struct drm_i915_gem_request *request)
590 {
591         struct intel_engine_cs *engine = request->engine;
592         struct drm_i915_gem_request *cursor;
593         int num_elements = 0;
594
595         spin_lock_bh(&engine->execlist_lock);
596
597         list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
598                 if (++num_elements > 2)
599                         break;
600
601         if (num_elements > 2) {
602                 struct drm_i915_gem_request *tail_req;
603
604                 tail_req = list_last_entry(&engine->execlist_queue,
605                                            struct drm_i915_gem_request,
606                                            execlist_link);
607
608                 if (request->ctx == tail_req->ctx) {
609                         WARN(tail_req->elsp_submitted != 0,
610                                 "More than 2 already-submitted reqs queued\n");
611                         list_del(&tail_req->execlist_link);
612                         i915_gem_request_unreference(tail_req);
613                 }
614         }
615
616         i915_gem_request_reference(request);
617         list_add_tail(&request->execlist_link, &engine->execlist_queue);
618         request->ctx_hw_id = request->ctx->hw_id;
619         if (num_elements == 0)
620                 execlists_context_unqueue(engine);
621
622         spin_unlock_bh(&engine->execlist_lock);
623 }
624
625 static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
626 {
627         struct intel_engine_cs *engine = req->engine;
628         uint32_t flush_domains;
629         int ret;
630
631         flush_domains = 0;
632         if (engine->gpu_caches_dirty)
633                 flush_domains = I915_GEM_GPU_DOMAINS;
634
635         ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
636         if (ret)
637                 return ret;
638
639         engine->gpu_caches_dirty = false;
640         return 0;
641 }
642
643 static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
644                                  struct list_head *vmas)
645 {
646         const unsigned other_rings = ~intel_engine_flag(req->engine);
647         struct i915_vma *vma;
648         uint32_t flush_domains = 0;
649         bool flush_chipset = false;
650         int ret;
651
652         list_for_each_entry(vma, vmas, exec_list) {
653                 struct drm_i915_gem_object *obj = vma->obj;
654
655                 if (obj->active & other_rings) {
656                         ret = i915_gem_object_sync(obj, req->engine, &req);
657                         if (ret)
658                                 return ret;
659                 }
660
661                 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
662                         flush_chipset |= i915_gem_clflush_object(obj, false);
663
664                 flush_domains |= obj->base.write_domain;
665         }
666
667         if (flush_domains & I915_GEM_DOMAIN_GTT)
668                 wmb();
669
670         /* Unconditionally invalidate gpu caches and ensure that we do flush
671          * any residual writes from the previous batch.
672          */
673         return logical_ring_invalidate_all_caches(req);
674 }
675
676 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
677 {
678         struct intel_engine_cs *engine = request->engine;
679         int ret;
680
681         /* Flush enough space to reduce the likelihood of waiting after
682          * we start building the request - in which case we will just
683          * have to repeat work.
684          */
685         request->reserved_space += EXECLISTS_REQUEST_SIZE;
686
687         if (request->ctx->engine[engine->id].state == NULL) {
688                 ret = execlists_context_deferred_alloc(request->ctx, engine);
689                 if (ret)
690                         return ret;
691         }
692
693         request->ringbuf = request->ctx->engine[engine->id].ringbuf;
694
695         if (i915.enable_guc_submission) {
696                 /*
697                  * Check that the GuC has space for the request before
698                  * going any further, as the i915_add_request() call
699                  * later on mustn't fail ...
700                  */
701                 struct intel_guc *guc = &request->i915->guc;
702
703                 ret = i915_guc_wq_check_space(guc->execbuf_client);
704                 if (ret)
705                         return ret;
706         }
707
708         ret = intel_lr_context_pin(request->ctx, engine);
709         if (ret)
710                 return ret;
711
712         ret = intel_ring_begin(request, 0);
713         if (ret)
714                 goto err_unpin;
715
716         if (!request->ctx->engine[engine->id].initialised) {
717                 ret = engine->init_context(request);
718                 if (ret)
719                         goto err_unpin;
720
721                 request->ctx->engine[engine->id].initialised = true;
722         }
723
724         /* Note that after this point, we have committed to using
725          * this request as it is being used to both track the
726          * state of engine initialisation and liveness of the
727          * golden renderstate above. Think twice before you try
728          * to cancel/unwind this request now.
729          */
730
731         request->reserved_space -= EXECLISTS_REQUEST_SIZE;
732         return 0;
733
734 err_unpin:
735         intel_lr_context_unpin(request->ctx, engine);
736         return ret;
737 }
738
739 /*
740  * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
741  * @request: Request to advance the logical ringbuffer of.
742  *
743  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
744  * really happens during submission is that the context and current tail will be placed
745  * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
746  * point, the tail *inside* the context is updated and the ELSP written to.
747  */
748 static int
749 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
750 {
751         struct intel_ringbuffer *ringbuf = request->ringbuf;
752         struct drm_i915_private *dev_priv = request->i915;
753         struct intel_engine_cs *engine = request->engine;
754
755         intel_logical_ring_advance(ringbuf);
756         request->tail = ringbuf->tail;
757
758         /*
759          * Here we add two extra NOOPs as padding to avoid
760          * lite restore of a context with HEAD==TAIL.
761          *
762          * Caller must reserve WA_TAIL_DWORDS for us!
763          */
764         intel_logical_ring_emit(ringbuf, MI_NOOP);
765         intel_logical_ring_emit(ringbuf, MI_NOOP);
766         intel_logical_ring_advance(ringbuf);
767
768         if (intel_engine_stopped(engine))
769                 return 0;
770
771         /* We keep the previous context alive until we retire the following
772          * request. This ensures that any the context object is still pinned
773          * for any residual writes the HW makes into it on the context switch
774          * into the next object following the breadcrumb. Otherwise, we may
775          * retire the context too early.
776          */
777         request->previous_context = engine->last_context;
778         engine->last_context = request->ctx;
779
780         if (dev_priv->guc.execbuf_client)
781                 i915_guc_submit(dev_priv->guc.execbuf_client, request);
782         else
783                 execlists_context_queue(request);
784
785         return 0;
786 }
787
788 /**
789  * execlists_submission() - submit a batchbuffer for execution, Execlists style
790  * @dev: DRM device.
791  * @file: DRM file.
792  * @ring: Engine Command Streamer to submit to.
793  * @ctx: Context to employ for this submission.
794  * @args: execbuffer call arguments.
795  * @vmas: list of vmas.
796  * @batch_obj: the batchbuffer to submit.
797  * @exec_start: batchbuffer start virtual address pointer.
798  * @dispatch_flags: translated execbuffer call flags.
799  *
800  * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
801  * away the submission details of the execbuffer ioctl call.
802  *
803  * Return: non-zero if the submission fails.
804  */
805 int intel_execlists_submission(struct i915_execbuffer_params *params,
806                                struct drm_i915_gem_execbuffer2 *args,
807                                struct list_head *vmas)
808 {
809         struct drm_device       *dev = params->dev;
810         struct intel_engine_cs *engine = params->engine;
811         struct drm_i915_private *dev_priv = dev->dev_private;
812         struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
813         u64 exec_start;
814         int instp_mode;
815         u32 instp_mask;
816         int ret;
817
818         instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
819         instp_mask = I915_EXEC_CONSTANTS_MASK;
820         switch (instp_mode) {
821         case I915_EXEC_CONSTANTS_REL_GENERAL:
822         case I915_EXEC_CONSTANTS_ABSOLUTE:
823         case I915_EXEC_CONSTANTS_REL_SURFACE:
824                 if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
825                         DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
826                         return -EINVAL;
827                 }
828
829                 if (instp_mode != dev_priv->relative_constants_mode) {
830                         if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
831                                 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
832                                 return -EINVAL;
833                         }
834
835                         /* The HW changed the meaning on this bit on gen6 */
836                         instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
837                 }
838                 break;
839         default:
840                 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
841                 return -EINVAL;
842         }
843
844         if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
845                 DRM_DEBUG("sol reset is gen7 only\n");
846                 return -EINVAL;
847         }
848
849         ret = execlists_move_to_gpu(params->request, vmas);
850         if (ret)
851                 return ret;
852
853         if (engine == &dev_priv->engine[RCS] &&
854             instp_mode != dev_priv->relative_constants_mode) {
855                 ret = intel_ring_begin(params->request, 4);
856                 if (ret)
857                         return ret;
858
859                 intel_logical_ring_emit(ringbuf, MI_NOOP);
860                 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
861                 intel_logical_ring_emit_reg(ringbuf, INSTPM);
862                 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
863                 intel_logical_ring_advance(ringbuf);
864
865                 dev_priv->relative_constants_mode = instp_mode;
866         }
867
868         exec_start = params->batch_obj_vm_offset +
869                      args->batch_start_offset;
870
871         ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
872         if (ret)
873                 return ret;
874
875         trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
876
877         i915_gem_execbuffer_move_to_active(vmas, params->request);
878
879         return 0;
880 }
881
882 void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
883 {
884         struct drm_i915_gem_request *req, *tmp;
885         LIST_HEAD(cancel_list);
886
887         WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
888
889         spin_lock_bh(&engine->execlist_lock);
890         list_replace_init(&engine->execlist_queue, &cancel_list);
891         spin_unlock_bh(&engine->execlist_lock);
892
893         list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
894                 list_del(&req->execlist_link);
895                 i915_gem_request_unreference(req);
896         }
897 }
898
899 void intel_logical_ring_stop(struct intel_engine_cs *engine)
900 {
901         struct drm_i915_private *dev_priv = engine->i915;
902         int ret;
903
904         if (!intel_engine_initialized(engine))
905                 return;
906
907         ret = intel_engine_idle(engine);
908         if (ret)
909                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
910                           engine->name, ret);
911
912         /* TODO: Is this correct with Execlists enabled? */
913         I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
914         if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
915                 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
916                 return;
917         }
918         I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
919 }
920
921 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
922 {
923         struct intel_engine_cs *engine = req->engine;
924         int ret;
925
926         if (!engine->gpu_caches_dirty)
927                 return 0;
928
929         ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
930         if (ret)
931                 return ret;
932
933         engine->gpu_caches_dirty = false;
934         return 0;
935 }
936
937 static int intel_lr_context_pin(struct intel_context *ctx,
938                                 struct intel_engine_cs *engine)
939 {
940         struct drm_i915_private *dev_priv = ctx->i915;
941         struct drm_i915_gem_object *ctx_obj;
942         struct intel_ringbuffer *ringbuf;
943         void *vaddr;
944         u32 *lrc_reg_state;
945         int ret;
946
947         lockdep_assert_held(&ctx->i915->dev->struct_mutex);
948
949         if (ctx->engine[engine->id].pin_count++)
950                 return 0;
951
952         ctx_obj = ctx->engine[engine->id].state;
953         ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
954                         PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
955         if (ret)
956                 goto err;
957
958         vaddr = i915_gem_object_pin_map(ctx_obj);
959         if (IS_ERR(vaddr)) {
960                 ret = PTR_ERR(vaddr);
961                 goto unpin_ctx_obj;
962         }
963
964         lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
965
966         ringbuf = ctx->engine[engine->id].ringbuf;
967         ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
968         if (ret)
969                 goto unpin_map;
970
971         i915_gem_context_reference(ctx);
972         ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
973         intel_lr_context_descriptor_update(ctx, engine);
974         lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
975         ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
976         ctx_obj->dirty = true;
977
978         /* Invalidate GuC TLB. */
979         if (i915.enable_guc_submission)
980                 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
981
982         return 0;
983
984 unpin_map:
985         i915_gem_object_unpin_map(ctx_obj);
986 unpin_ctx_obj:
987         i915_gem_object_ggtt_unpin(ctx_obj);
988 err:
989         ctx->engine[engine->id].pin_count = 0;
990         return ret;
991 }
992
993 void intel_lr_context_unpin(struct intel_context *ctx,
994                             struct intel_engine_cs *engine)
995 {
996         struct drm_i915_gem_object *ctx_obj;
997
998         lockdep_assert_held(&ctx->i915->dev->struct_mutex);
999         GEM_BUG_ON(ctx->engine[engine->id].pin_count == 0);
1000
1001         if (--ctx->engine[engine->id].pin_count)
1002                 return;
1003
1004         intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
1005
1006         ctx_obj = ctx->engine[engine->id].state;
1007         i915_gem_object_unpin_map(ctx_obj);
1008         i915_gem_object_ggtt_unpin(ctx_obj);
1009
1010         ctx->engine[engine->id].lrc_vma = NULL;
1011         ctx->engine[engine->id].lrc_desc = 0;
1012         ctx->engine[engine->id].lrc_reg_state = NULL;
1013
1014         i915_gem_context_unreference(ctx);
1015 }
1016
1017 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1018 {
1019         int ret, i;
1020         struct intel_engine_cs *engine = req->engine;
1021         struct intel_ringbuffer *ringbuf = req->ringbuf;
1022         struct i915_workarounds *w = &req->i915->workarounds;
1023
1024         if (w->count == 0)
1025                 return 0;
1026
1027         engine->gpu_caches_dirty = true;
1028         ret = logical_ring_flush_all_caches(req);
1029         if (ret)
1030                 return ret;
1031
1032         ret = intel_ring_begin(req, w->count * 2 + 2);
1033         if (ret)
1034                 return ret;
1035
1036         intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1037         for (i = 0; i < w->count; i++) {
1038                 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
1039                 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1040         }
1041         intel_logical_ring_emit(ringbuf, MI_NOOP);
1042
1043         intel_logical_ring_advance(ringbuf);
1044
1045         engine->gpu_caches_dirty = true;
1046         ret = logical_ring_flush_all_caches(req);
1047         if (ret)
1048                 return ret;
1049
1050         return 0;
1051 }
1052
1053 #define wa_ctx_emit(batch, index, cmd)                                  \
1054         do {                                                            \
1055                 int __index = (index)++;                                \
1056                 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
1057                         return -ENOSPC;                                 \
1058                 }                                                       \
1059                 batch[__index] = (cmd);                                 \
1060         } while (0)
1061
1062 #define wa_ctx_emit_reg(batch, index, reg) \
1063         wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
1064
1065 /*
1066  * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1067  * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1068  * but there is a slight complication as this is applied in WA batch where the
1069  * values are only initialized once so we cannot take register value at the
1070  * beginning and reuse it further; hence we save its value to memory, upload a
1071  * constant value with bit21 set and then we restore it back with the saved value.
1072  * To simplify the WA, a constant value is formed by using the default value
1073  * of this register. This shouldn't be a problem because we are only modifying
1074  * it for a short period and this batch in non-premptible. We can ofcourse
1075  * use additional instructions that read the actual value of the register
1076  * at that time and set our bit of interest but it makes the WA complicated.
1077  *
1078  * This WA is also required for Gen9 so extracting as a function avoids
1079  * code duplication.
1080  */
1081 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
1082                                                 uint32_t *const batch,
1083                                                 uint32_t index)
1084 {
1085         uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1086
1087         /*
1088          * WaDisableLSQCROPERFforOCL:skl
1089          * This WA is implemented in skl_init_clock_gating() but since
1090          * this batch updates GEN8_L3SQCREG4 with default value we need to
1091          * set this bit here to retain the WA during flush.
1092          */
1093         if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0))
1094                 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1095
1096         wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
1097                                    MI_SRM_LRM_GLOBAL_GTT));
1098         wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1099         wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
1100         wa_ctx_emit(batch, index, 0);
1101
1102         wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1103         wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1104         wa_ctx_emit(batch, index, l3sqc4_flush);
1105
1106         wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1107         wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1108                                    PIPE_CONTROL_DC_FLUSH_ENABLE));
1109         wa_ctx_emit(batch, index, 0);
1110         wa_ctx_emit(batch, index, 0);
1111         wa_ctx_emit(batch, index, 0);
1112         wa_ctx_emit(batch, index, 0);
1113
1114         wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
1115                                    MI_SRM_LRM_GLOBAL_GTT));
1116         wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1117         wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
1118         wa_ctx_emit(batch, index, 0);
1119
1120         return index;
1121 }
1122
1123 static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1124                                     uint32_t offset,
1125                                     uint32_t start_alignment)
1126 {
1127         return wa_ctx->offset = ALIGN(offset, start_alignment);
1128 }
1129
1130 static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1131                              uint32_t offset,
1132                              uint32_t size_alignment)
1133 {
1134         wa_ctx->size = offset - wa_ctx->offset;
1135
1136         WARN(wa_ctx->size % size_alignment,
1137              "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1138              wa_ctx->size, size_alignment);
1139         return 0;
1140 }
1141
1142 /**
1143  * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1144  *
1145  * @ring: only applicable for RCS
1146  * @wa_ctx: structure representing wa_ctx
1147  *  offset: specifies start of the batch, should be cache-aligned. This is updated
1148  *    with the offset value received as input.
1149  *  size: size of the batch in DWORDS but HW expects in terms of cachelines
1150  * @batch: page in which WA are loaded
1151  * @offset: This field specifies the start of the batch, it should be
1152  *  cache-aligned otherwise it is adjusted accordingly.
1153  *  Typically we only have one indirect_ctx and per_ctx batch buffer which are
1154  *  initialized at the beginning and shared across all contexts but this field
1155  *  helps us to have multiple batches at different offsets and select them based
1156  *  on a criteria. At the moment this batch always start at the beginning of the page
1157  *  and at this point we don't have multiple wa_ctx batch buffers.
1158  *
1159  *  The number of WA applied are not known at the beginning; we use this field
1160  *  to return the no of DWORDS written.
1161  *
1162  *  It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1163  *  so it adds NOOPs as padding to make it cacheline aligned.
1164  *  MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1165  *  makes a complete batch buffer.
1166  *
1167  * Return: non-zero if we exceed the PAGE_SIZE limit.
1168  */
1169
1170 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1171                                     struct i915_wa_ctx_bb *wa_ctx,
1172                                     uint32_t *const batch,
1173                                     uint32_t *offset)
1174 {
1175         uint32_t scratch_addr;
1176         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1177
1178         /* WaDisableCtxRestoreArbitration:bdw,chv */
1179         wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1180
1181         /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1182         if (IS_BROADWELL(engine->i915)) {
1183                 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1184                 if (rc < 0)
1185                         return rc;
1186                 index = rc;
1187         }
1188
1189         /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1190         /* Actual scratch location is at 128 bytes offset */
1191         scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
1192
1193         wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1194         wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1195                                    PIPE_CONTROL_GLOBAL_GTT_IVB |
1196                                    PIPE_CONTROL_CS_STALL |
1197                                    PIPE_CONTROL_QW_WRITE));
1198         wa_ctx_emit(batch, index, scratch_addr);
1199         wa_ctx_emit(batch, index, 0);
1200         wa_ctx_emit(batch, index, 0);
1201         wa_ctx_emit(batch, index, 0);
1202
1203         /* Pad to end of cacheline */
1204         while (index % CACHELINE_DWORDS)
1205                 wa_ctx_emit(batch, index, MI_NOOP);
1206
1207         /*
1208          * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1209          * execution depends on the length specified in terms of cache lines
1210          * in the register CTX_RCS_INDIRECT_CTX
1211          */
1212
1213         return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1214 }
1215
1216 /**
1217  * gen8_init_perctx_bb() - initialize per ctx batch with WA
1218  *
1219  * @ring: only applicable for RCS
1220  * @wa_ctx: structure representing wa_ctx
1221  *  offset: specifies start of the batch, should be cache-aligned.
1222  *  size: size of the batch in DWORDS but HW expects in terms of cachelines
1223  * @batch: page in which WA are loaded
1224  * @offset: This field specifies the start of this batch.
1225  *   This batch is started immediately after indirect_ctx batch. Since we ensure
1226  *   that indirect_ctx ends on a cacheline this batch is aligned automatically.
1227  *
1228  *   The number of DWORDS written are returned using this field.
1229  *
1230  *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1231  *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1232  */
1233 static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
1234                                struct i915_wa_ctx_bb *wa_ctx,
1235                                uint32_t *const batch,
1236                                uint32_t *offset)
1237 {
1238         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1239
1240         /* WaDisableCtxRestoreArbitration:bdw,chv */
1241         wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1242
1243         wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1244
1245         return wa_ctx_end(wa_ctx, *offset = index, 1);
1246 }
1247
1248 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1249                                     struct i915_wa_ctx_bb *wa_ctx,
1250                                     uint32_t *const batch,
1251                                     uint32_t *offset)
1252 {
1253         int ret;
1254         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1255
1256         /* WaDisableCtxRestoreArbitration:skl,bxt */
1257         if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1258             IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1259                 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1260
1261         /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
1262         ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1263         if (ret < 0)
1264                 return ret;
1265         index = ret;
1266
1267         /* Pad to end of cacheline */
1268         while (index % CACHELINE_DWORDS)
1269                 wa_ctx_emit(batch, index, MI_NOOP);
1270
1271         return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1272 }
1273
1274 static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1275                                struct i915_wa_ctx_bb *wa_ctx,
1276                                uint32_t *const batch,
1277                                uint32_t *offset)
1278 {
1279         uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1280
1281         /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1282         if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1283             IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1284                 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1285                 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1286                 wa_ctx_emit(batch, index,
1287                             _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1288                 wa_ctx_emit(batch, index, MI_NOOP);
1289         }
1290
1291         /* WaClearTdlStateAckDirtyBits:bxt */
1292         if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
1293                 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1294
1295                 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
1296                 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1297
1298                 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
1299                 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1300
1301                 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
1302                 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1303
1304                 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
1305                 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
1306                 wa_ctx_emit(batch, index, 0x0);
1307                 wa_ctx_emit(batch, index, MI_NOOP);
1308         }
1309
1310         /* WaDisableCtxRestoreArbitration:skl,bxt */
1311         if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1312             IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1313                 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1314
1315         wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1316
1317         return wa_ctx_end(wa_ctx, *offset = index, 1);
1318 }
1319
1320 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1321 {
1322         int ret;
1323
1324         engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
1325                                                    PAGE_ALIGN(size));
1326         if (IS_ERR(engine->wa_ctx.obj)) {
1327                 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1328                 ret = PTR_ERR(engine->wa_ctx.obj);
1329                 engine->wa_ctx.obj = NULL;
1330                 return ret;
1331         }
1332
1333         ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
1334         if (ret) {
1335                 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1336                                  ret);
1337                 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1338                 return ret;
1339         }
1340
1341         return 0;
1342 }
1343
1344 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
1345 {
1346         if (engine->wa_ctx.obj) {
1347                 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
1348                 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1349                 engine->wa_ctx.obj = NULL;
1350         }
1351 }
1352
1353 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1354 {
1355         int ret;
1356         uint32_t *batch;
1357         uint32_t offset;
1358         struct page *page;
1359         struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1360
1361         WARN_ON(engine->id != RCS);
1362
1363         /* update this when WA for higher Gen are added */
1364         if (INTEL_GEN(engine->i915) > 9) {
1365                 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1366                           INTEL_GEN(engine->i915));
1367                 return 0;
1368         }
1369
1370         /* some WA perform writes to scratch page, ensure it is valid */
1371         if (engine->scratch.obj == NULL) {
1372                 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
1373                 return -EINVAL;
1374         }
1375
1376         ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
1377         if (ret) {
1378                 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1379                 return ret;
1380         }
1381
1382         page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
1383         batch = kmap_atomic(page);
1384         offset = 0;
1385
1386         if (IS_GEN8(engine->i915)) {
1387                 ret = gen8_init_indirectctx_bb(engine,
1388                                                &wa_ctx->indirect_ctx,
1389                                                batch,
1390                                                &offset);
1391                 if (ret)
1392                         goto out;
1393
1394                 ret = gen8_init_perctx_bb(engine,
1395                                           &wa_ctx->per_ctx,
1396                                           batch,
1397                                           &offset);
1398                 if (ret)
1399                         goto out;
1400         } else if (IS_GEN9(engine->i915)) {
1401                 ret = gen9_init_indirectctx_bb(engine,
1402                                                &wa_ctx->indirect_ctx,
1403                                                batch,
1404                                                &offset);
1405                 if (ret)
1406                         goto out;
1407
1408                 ret = gen9_init_perctx_bb(engine,
1409                                           &wa_ctx->per_ctx,
1410                                           batch,
1411                                           &offset);
1412                 if (ret)
1413                         goto out;
1414         }
1415
1416 out:
1417         kunmap_atomic(batch);
1418         if (ret)
1419                 lrc_destroy_wa_ctx_obj(engine);
1420
1421         return ret;
1422 }
1423
1424 static void lrc_init_hws(struct intel_engine_cs *engine)
1425 {
1426         struct drm_i915_private *dev_priv = engine->i915;
1427
1428         I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1429                    (u32)engine->status_page.gfx_addr);
1430         POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1431 }
1432
1433 static int gen8_init_common_ring(struct intel_engine_cs *engine)
1434 {
1435         struct drm_i915_private *dev_priv = engine->i915;
1436         unsigned int next_context_status_buffer_hw;
1437
1438         lrc_init_hws(engine);
1439
1440         I915_WRITE_IMR(engine,
1441                        ~(engine->irq_enable_mask | engine->irq_keep_mask));
1442         I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1443
1444         I915_WRITE(RING_MODE_GEN7(engine),
1445                    _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1446                    _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1447         POSTING_READ(RING_MODE_GEN7(engine));
1448
1449         /*
1450          * Instead of resetting the Context Status Buffer (CSB) read pointer to
1451          * zero, we need to read the write pointer from hardware and use its
1452          * value because "this register is power context save restored".
1453          * Effectively, these states have been observed:
1454          *
1455          *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1456          * BDW  | CSB regs not reset       | CSB regs reset       |
1457          * CHT  | CSB regs not reset       | CSB regs not reset   |
1458          * SKL  |         ?                |         ?            |
1459          * BXT  |         ?                |         ?            |
1460          */
1461         next_context_status_buffer_hw =
1462                 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
1463
1464         /*
1465          * When the CSB registers are reset (also after power-up / gpu reset),
1466          * CSB write pointer is set to all 1's, which is not valid, use '5' in
1467          * this special case, so the first element read is CSB[0].
1468          */
1469         if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1470                 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1471
1472         engine->next_context_status_buffer = next_context_status_buffer_hw;
1473         DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1474
1475         intel_engine_init_hangcheck(engine);
1476
1477         return intel_mocs_init_engine(engine);
1478 }
1479
1480 static int gen8_init_render_ring(struct intel_engine_cs *engine)
1481 {
1482         struct drm_i915_private *dev_priv = engine->i915;
1483         int ret;
1484
1485         ret = gen8_init_common_ring(engine);
1486         if (ret)
1487                 return ret;
1488
1489         /* We need to disable the AsyncFlip performance optimisations in order
1490          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1491          * programmed to '1' on all products.
1492          *
1493          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1494          */
1495         I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1496
1497         I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1498
1499         return init_workarounds_ring(engine);
1500 }
1501
1502 static int gen9_init_render_ring(struct intel_engine_cs *engine)
1503 {
1504         int ret;
1505
1506         ret = gen8_init_common_ring(engine);
1507         if (ret)
1508                 return ret;
1509
1510         return init_workarounds_ring(engine);
1511 }
1512
1513 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1514 {
1515         struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1516         struct intel_engine_cs *engine = req->engine;
1517         struct intel_ringbuffer *ringbuf = req->ringbuf;
1518         const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1519         int i, ret;
1520
1521         ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
1522         if (ret)
1523                 return ret;
1524
1525         intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1526         for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1527                 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1528
1529                 intel_logical_ring_emit_reg(ringbuf,
1530                                             GEN8_RING_PDP_UDW(engine, i));
1531                 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
1532                 intel_logical_ring_emit_reg(ringbuf,
1533                                             GEN8_RING_PDP_LDW(engine, i));
1534                 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1535         }
1536
1537         intel_logical_ring_emit(ringbuf, MI_NOOP);
1538         intel_logical_ring_advance(ringbuf);
1539
1540         return 0;
1541 }
1542
1543 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1544                               u64 offset, unsigned dispatch_flags)
1545 {
1546         struct intel_ringbuffer *ringbuf = req->ringbuf;
1547         bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
1548         int ret;
1549
1550         /* Don't rely in hw updating PDPs, specially in lite-restore.
1551          * Ideally, we should set Force PD Restore in ctx descriptor,
1552          * but we can't. Force Restore would be a second option, but
1553          * it is unsafe in case of lite-restore (because the ctx is
1554          * not idle). PML4 is allocated during ppgtt init so this is
1555          * not needed in 48-bit.*/
1556         if (req->ctx->ppgtt &&
1557             (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1558                 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1559                     !intel_vgpu_active(req->i915)) {
1560                         ret = intel_logical_ring_emit_pdps(req);
1561                         if (ret)
1562                                 return ret;
1563                 }
1564
1565                 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
1566         }
1567
1568         ret = intel_ring_begin(req, 4);
1569         if (ret)
1570                 return ret;
1571
1572         /* FIXME(BDW): Address space and security selectors. */
1573         intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
1574                                 (ppgtt<<8) |
1575                                 (dispatch_flags & I915_DISPATCH_RS ?
1576                                  MI_BATCH_RESOURCE_STREAMER : 0));
1577         intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1578         intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1579         intel_logical_ring_emit(ringbuf, MI_NOOP);
1580         intel_logical_ring_advance(ringbuf);
1581
1582         return 0;
1583 }
1584
1585 static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
1586 {
1587         struct drm_i915_private *dev_priv = engine->i915;
1588         unsigned long flags;
1589
1590         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1591                 return false;
1592
1593         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1594         if (engine->irq_refcount++ == 0) {
1595                 I915_WRITE_IMR(engine,
1596                                ~(engine->irq_enable_mask | engine->irq_keep_mask));
1597                 POSTING_READ(RING_IMR(engine->mmio_base));
1598         }
1599         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1600
1601         return true;
1602 }
1603
1604 static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
1605 {
1606         struct drm_i915_private *dev_priv = engine->i915;
1607         unsigned long flags;
1608
1609         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1610         if (--engine->irq_refcount == 0) {
1611                 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1612                 POSTING_READ(RING_IMR(engine->mmio_base));
1613         }
1614         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1615 }
1616
1617 static int gen8_emit_flush(struct drm_i915_gem_request *request,
1618                            u32 invalidate_domains,
1619                            u32 unused)
1620 {
1621         struct intel_ringbuffer *ringbuf = request->ringbuf;
1622         struct intel_engine_cs *engine = ringbuf->engine;
1623         struct drm_i915_private *dev_priv = request->i915;
1624         uint32_t cmd;
1625         int ret;
1626
1627         ret = intel_ring_begin(request, 4);
1628         if (ret)
1629                 return ret;
1630
1631         cmd = MI_FLUSH_DW + 1;
1632
1633         /* We always require a command barrier so that subsequent
1634          * commands, such as breadcrumb interrupts, are strictly ordered
1635          * wrt the contents of the write cache being flushed to memory
1636          * (and thus being coherent from the CPU).
1637          */
1638         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1639
1640         if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1641                 cmd |= MI_INVALIDATE_TLB;
1642                 if (engine == &dev_priv->engine[VCS])
1643                         cmd |= MI_INVALIDATE_BSD;
1644         }
1645
1646         intel_logical_ring_emit(ringbuf, cmd);
1647         intel_logical_ring_emit(ringbuf,
1648                                 I915_GEM_HWS_SCRATCH_ADDR |
1649                                 MI_FLUSH_DW_USE_GTT);
1650         intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1651         intel_logical_ring_emit(ringbuf, 0); /* value */
1652         intel_logical_ring_advance(ringbuf);
1653
1654         return 0;
1655 }
1656
1657 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1658                                   u32 invalidate_domains,
1659                                   u32 flush_domains)
1660 {
1661         struct intel_ringbuffer *ringbuf = request->ringbuf;
1662         struct intel_engine_cs *engine = ringbuf->engine;
1663         u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1664         bool vf_flush_wa = false;
1665         u32 flags = 0;
1666         int ret;
1667
1668         flags |= PIPE_CONTROL_CS_STALL;
1669
1670         if (flush_domains) {
1671                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1672                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1673                 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
1674                 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1675         }
1676
1677         if (invalidate_domains) {
1678                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1679                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1680                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1681                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1682                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1683                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1684                 flags |= PIPE_CONTROL_QW_WRITE;
1685                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1686
1687                 /*
1688                  * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1689                  * pipe control.
1690                  */
1691                 if (IS_GEN9(request->i915))
1692                         vf_flush_wa = true;
1693         }
1694
1695         ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
1696         if (ret)
1697                 return ret;
1698
1699         if (vf_flush_wa) {
1700                 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1701                 intel_logical_ring_emit(ringbuf, 0);
1702                 intel_logical_ring_emit(ringbuf, 0);
1703                 intel_logical_ring_emit(ringbuf, 0);
1704                 intel_logical_ring_emit(ringbuf, 0);
1705                 intel_logical_ring_emit(ringbuf, 0);
1706         }
1707
1708         intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1709         intel_logical_ring_emit(ringbuf, flags);
1710         intel_logical_ring_emit(ringbuf, scratch_addr);
1711         intel_logical_ring_emit(ringbuf, 0);
1712         intel_logical_ring_emit(ringbuf, 0);
1713         intel_logical_ring_emit(ringbuf, 0);
1714         intel_logical_ring_advance(ringbuf);
1715
1716         return 0;
1717 }
1718
1719 static u32 gen8_get_seqno(struct intel_engine_cs *engine)
1720 {
1721         return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1722 }
1723
1724 static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1725 {
1726         intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1727 }
1728
1729 static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
1730 {
1731         /*
1732          * On BXT A steppings there is a HW coherency issue whereby the
1733          * MI_STORE_DATA_IMM storing the completed request's seqno
1734          * occasionally doesn't invalidate the CPU cache. Work around this by
1735          * clflushing the corresponding cacheline whenever the caller wants
1736          * the coherency to be guaranteed. Note that this cacheline is known
1737          * to be clean at this point, since we only write it in
1738          * bxt_a_set_seqno(), where we also do a clflush after the write. So
1739          * this clflush in practice becomes an invalidate operation.
1740          */
1741         intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1742 }
1743
1744 static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1745 {
1746         intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1747
1748         /* See bxt_a_get_seqno() explaining the reason for the clflush. */
1749         intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1750 }
1751
1752 /*
1753  * Reserve space for 2 NOOPs at the end of each request to be
1754  * used as a workaround for not being allowed to do lite
1755  * restore with HEAD==TAIL (WaIdleLiteRestore).
1756  */
1757 #define WA_TAIL_DWORDS 2
1758
1759 static int gen8_emit_request(struct drm_i915_gem_request *request)
1760 {
1761         struct intel_ringbuffer *ringbuf = request->ringbuf;
1762         int ret;
1763
1764         ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
1765         if (ret)
1766                 return ret;
1767
1768         /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1769         BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
1770
1771         intel_logical_ring_emit(ringbuf,
1772                                 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1773         intel_logical_ring_emit(ringbuf,
1774                                 intel_hws_seqno_address(request->engine) |
1775                                 MI_FLUSH_DW_USE_GTT);
1776         intel_logical_ring_emit(ringbuf, 0);
1777         intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1778         intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1779         intel_logical_ring_emit(ringbuf, MI_NOOP);
1780         return intel_logical_ring_advance_and_submit(request);
1781 }
1782
1783 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1784 {
1785         struct intel_ringbuffer *ringbuf = request->ringbuf;
1786         int ret;
1787
1788         ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
1789         if (ret)
1790                 return ret;
1791
1792         /* We're using qword write, seqno should be aligned to 8 bytes. */
1793         BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1794
1795         /* w/a for post sync ops following a GPGPU operation we
1796          * need a prior CS_STALL, which is emitted by the flush
1797          * following the batch.
1798          */
1799         intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1800         intel_logical_ring_emit(ringbuf,
1801                                 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1802                                  PIPE_CONTROL_CS_STALL |
1803                                  PIPE_CONTROL_QW_WRITE));
1804         intel_logical_ring_emit(ringbuf,
1805                                 intel_hws_seqno_address(request->engine));
1806         intel_logical_ring_emit(ringbuf, 0);
1807         intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1808         /* We're thrashing one dword of HWS. */
1809         intel_logical_ring_emit(ringbuf, 0);
1810         intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1811         intel_logical_ring_emit(ringbuf, MI_NOOP);
1812         return intel_logical_ring_advance_and_submit(request);
1813 }
1814
1815 static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
1816 {
1817         struct render_state so;
1818         int ret;
1819
1820         ret = i915_gem_render_state_prepare(req->engine, &so);
1821         if (ret)
1822                 return ret;
1823
1824         if (so.rodata == NULL)
1825                 return 0;
1826
1827         ret = req->engine->emit_bb_start(req, so.ggtt_offset,
1828                                        I915_DISPATCH_SECURE);
1829         if (ret)
1830                 goto out;
1831
1832         ret = req->engine->emit_bb_start(req,
1833                                        (so.ggtt_offset + so.aux_batch_offset),
1834                                        I915_DISPATCH_SECURE);
1835         if (ret)
1836                 goto out;
1837
1838         i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
1839
1840 out:
1841         i915_gem_render_state_fini(&so);
1842         return ret;
1843 }
1844
1845 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
1846 {
1847         int ret;
1848
1849         ret = intel_logical_ring_workarounds_emit(req);
1850         if (ret)
1851                 return ret;
1852
1853         ret = intel_rcs_context_init_mocs(req);
1854         /*
1855          * Failing to program the MOCS is non-fatal.The system will not
1856          * run at peak performance. So generate an error and carry on.
1857          */
1858         if (ret)
1859                 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1860
1861         return intel_lr_context_render_state_init(req);
1862 }
1863
1864 /**
1865  * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1866  *
1867  * @ring: Engine Command Streamer.
1868  *
1869  */
1870 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1871 {
1872         struct drm_i915_private *dev_priv;
1873
1874         if (!intel_engine_initialized(engine))
1875                 return;
1876
1877         /*
1878          * Tasklet cannot be active at this point due intel_mark_active/idle
1879          * so this is just for documentation.
1880          */
1881         if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1882                 tasklet_kill(&engine->irq_tasklet);
1883
1884         dev_priv = engine->i915;
1885
1886         if (engine->buffer) {
1887                 intel_logical_ring_stop(engine);
1888                 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
1889         }
1890
1891         if (engine->cleanup)
1892                 engine->cleanup(engine);
1893
1894         i915_cmd_parser_fini_ring(engine);
1895         i915_gem_batch_pool_fini(&engine->batch_pool);
1896
1897         if (engine->status_page.obj) {
1898                 i915_gem_object_unpin_map(engine->status_page.obj);
1899                 engine->status_page.obj = NULL;
1900         }
1901         intel_lr_context_unpin(dev_priv->kernel_context, engine);
1902
1903         engine->idle_lite_restore_wa = 0;
1904         engine->disable_lite_restore_wa = false;
1905         engine->ctx_desc_template = 0;
1906
1907         lrc_destroy_wa_ctx_obj(engine);
1908         engine->i915 = NULL;
1909 }
1910
1911 static void
1912 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1913 {
1914         /* Default vfuncs which can be overriden by each engine. */
1915         engine->init_hw = gen8_init_common_ring;
1916         engine->emit_request = gen8_emit_request;
1917         engine->emit_flush = gen8_emit_flush;
1918         engine->irq_get = gen8_logical_ring_get_irq;
1919         engine->irq_put = gen8_logical_ring_put_irq;
1920         engine->emit_bb_start = gen8_emit_bb_start;
1921         engine->get_seqno = gen8_get_seqno;
1922         engine->set_seqno = gen8_set_seqno;
1923         if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1924                 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
1925                 engine->set_seqno = bxt_a_set_seqno;
1926         }
1927 }
1928
1929 static inline void
1930 logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
1931 {
1932         engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1933         engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1934         init_waitqueue_head(&engine->irq_queue);
1935 }
1936
1937 static int
1938 lrc_setup_hws(struct intel_engine_cs *engine,
1939               struct drm_i915_gem_object *dctx_obj)
1940 {
1941         void *hws;
1942
1943         /* The HWSP is part of the default context object in LRC mode. */
1944         engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
1945                                        LRC_PPHWSP_PN * PAGE_SIZE;
1946         hws = i915_gem_object_pin_map(dctx_obj);
1947         if (IS_ERR(hws))
1948                 return PTR_ERR(hws);
1949         engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
1950         engine->status_page.obj = dctx_obj;
1951
1952         return 0;
1953 }
1954
1955 static const struct logical_ring_info {
1956         const char *name;
1957         unsigned exec_id;
1958         unsigned guc_id;
1959         u32 mmio_base;
1960         unsigned irq_shift;
1961 } logical_rings[] = {
1962         [RCS] = {
1963                 .name = "render ring",
1964                 .exec_id = I915_EXEC_RENDER,
1965                 .guc_id = GUC_RENDER_ENGINE,
1966                 .mmio_base = RENDER_RING_BASE,
1967                 .irq_shift = GEN8_RCS_IRQ_SHIFT,
1968         },
1969         [BCS] = {
1970                 .name = "blitter ring",
1971                 .exec_id = I915_EXEC_BLT,
1972                 .guc_id = GUC_BLITTER_ENGINE,
1973                 .mmio_base = BLT_RING_BASE,
1974                 .irq_shift = GEN8_BCS_IRQ_SHIFT,
1975         },
1976         [VCS] = {
1977                 .name = "bsd ring",
1978                 .exec_id = I915_EXEC_BSD,
1979                 .guc_id = GUC_VIDEO_ENGINE,
1980                 .mmio_base = GEN6_BSD_RING_BASE,
1981                 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
1982         },
1983         [VCS2] = {
1984                 .name = "bsd2 ring",
1985                 .exec_id = I915_EXEC_BSD,
1986                 .guc_id = GUC_VIDEO_ENGINE2,
1987                 .mmio_base = GEN8_BSD2_RING_BASE,
1988                 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
1989         },
1990         [VECS] = {
1991                 .name = "video enhancement ring",
1992                 .exec_id = I915_EXEC_VEBOX,
1993                 .guc_id = GUC_VIDEOENHANCE_ENGINE,
1994                 .mmio_base = VEBOX_RING_BASE,
1995                 .irq_shift = GEN8_VECS_IRQ_SHIFT,
1996         },
1997 };
1998
1999 static struct intel_engine_cs *
2000 logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
2001 {
2002         const struct logical_ring_info *info = &logical_rings[id];
2003         struct drm_i915_private *dev_priv = to_i915(dev);
2004         struct intel_engine_cs *engine = &dev_priv->engine[id];
2005         enum forcewake_domains fw_domains;
2006
2007         engine->id = id;
2008         engine->name = info->name;
2009         engine->exec_id = info->exec_id;
2010         engine->guc_id = info->guc_id;
2011         engine->mmio_base = info->mmio_base;
2012
2013         engine->i915 = dev_priv;
2014
2015         /* Intentionally left blank. */
2016         engine->buffer = NULL;
2017
2018         fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2019                                                     RING_ELSP(engine),
2020                                                     FW_REG_WRITE);
2021
2022         fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2023                                                      RING_CONTEXT_STATUS_PTR(engine),
2024                                                      FW_REG_READ | FW_REG_WRITE);
2025
2026         fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2027                                                      RING_CONTEXT_STATUS_BUF_BASE(engine),
2028                                                      FW_REG_READ);
2029
2030         engine->fw_domains = fw_domains;
2031
2032         INIT_LIST_HEAD(&engine->active_list);
2033         INIT_LIST_HEAD(&engine->request_list);
2034         INIT_LIST_HEAD(&engine->buffers);
2035         INIT_LIST_HEAD(&engine->execlist_queue);
2036         spin_lock_init(&engine->execlist_lock);
2037
2038         tasklet_init(&engine->irq_tasklet,
2039                      intel_lrc_irq_handler, (unsigned long)engine);
2040
2041         logical_ring_init_platform_invariants(engine);
2042         logical_ring_default_vfuncs(engine);
2043         logical_ring_default_irqs(engine, info->irq_shift);
2044
2045         intel_engine_init_hangcheck(engine);
2046         i915_gem_batch_pool_init(dev, &engine->batch_pool);
2047
2048         return engine;
2049 }
2050
2051 static int
2052 logical_ring_init(struct intel_engine_cs *engine)
2053 {
2054         struct intel_context *dctx = engine->i915->kernel_context;
2055         int ret;
2056
2057         ret = i915_cmd_parser_init_ring(engine);
2058         if (ret)
2059                 goto error;
2060
2061         ret = execlists_context_deferred_alloc(dctx, engine);
2062         if (ret)
2063                 goto error;
2064
2065         /* As this is the default context, always pin it */
2066         ret = intel_lr_context_pin(dctx, engine);
2067         if (ret) {
2068                 DRM_ERROR("Failed to pin context for %s: %d\n",
2069                           engine->name, ret);
2070                 goto error;
2071         }
2072
2073         /* And setup the hardware status page. */
2074         ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
2075         if (ret) {
2076                 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
2077                 goto error;
2078         }
2079
2080         return 0;
2081
2082 error:
2083         intel_logical_ring_cleanup(engine);
2084         return ret;
2085 }
2086
2087 static int logical_render_ring_init(struct drm_device *dev)
2088 {
2089         struct intel_engine_cs *engine = logical_ring_setup(dev, RCS);
2090         int ret;
2091
2092         if (HAS_L3_DPF(dev))
2093                 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2094
2095         /* Override some for render ring. */
2096         if (INTEL_INFO(dev)->gen >= 9)
2097                 engine->init_hw = gen9_init_render_ring;
2098         else
2099                 engine->init_hw = gen8_init_render_ring;
2100         engine->init_context = gen8_init_rcs_context;
2101         engine->cleanup = intel_fini_pipe_control;
2102         engine->emit_flush = gen8_emit_flush_render;
2103         engine->emit_request = gen8_emit_request_render;
2104
2105         ret = intel_init_pipe_control(engine);
2106         if (ret)
2107                 return ret;
2108
2109         ret = intel_init_workaround_bb(engine);
2110         if (ret) {
2111                 /*
2112                  * We continue even if we fail to initialize WA batch
2113                  * because we only expect rare glitches but nothing
2114                  * critical to prevent us from using GPU
2115                  */
2116                 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2117                           ret);
2118         }
2119
2120         ret = logical_ring_init(engine);
2121         if (ret) {
2122                 lrc_destroy_wa_ctx_obj(engine);
2123         }
2124
2125         return ret;
2126 }
2127
2128 static int logical_bsd_ring_init(struct drm_device *dev)
2129 {
2130         struct intel_engine_cs *engine = logical_ring_setup(dev, VCS);
2131
2132         return logical_ring_init(engine);
2133 }
2134
2135 static int logical_bsd2_ring_init(struct drm_device *dev)
2136 {
2137         struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2);
2138
2139         return logical_ring_init(engine);
2140 }
2141
2142 static int logical_blt_ring_init(struct drm_device *dev)
2143 {
2144         struct intel_engine_cs *engine = logical_ring_setup(dev, BCS);
2145
2146         return logical_ring_init(engine);
2147 }
2148
2149 static int logical_vebox_ring_init(struct drm_device *dev)
2150 {
2151         struct intel_engine_cs *engine = logical_ring_setup(dev, VECS);
2152
2153         return logical_ring_init(engine);
2154 }
2155
2156 /**
2157  * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2158  * @dev: DRM device.
2159  *
2160  * This function inits the engines for an Execlists submission style (the equivalent in the
2161  * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
2162  * those engines that are present in the hardware.
2163  *
2164  * Return: non-zero if the initialization failed.
2165  */
2166 int intel_logical_rings_init(struct drm_device *dev)
2167 {
2168         struct drm_i915_private *dev_priv = dev->dev_private;
2169         int ret;
2170
2171         ret = logical_render_ring_init(dev);
2172         if (ret)
2173                 return ret;
2174
2175         if (HAS_BSD(dev)) {
2176                 ret = logical_bsd_ring_init(dev);
2177                 if (ret)
2178                         goto cleanup_render_ring;
2179         }
2180
2181         if (HAS_BLT(dev)) {
2182                 ret = logical_blt_ring_init(dev);
2183                 if (ret)
2184                         goto cleanup_bsd_ring;
2185         }
2186
2187         if (HAS_VEBOX(dev)) {
2188                 ret = logical_vebox_ring_init(dev);
2189                 if (ret)
2190                         goto cleanup_blt_ring;
2191         }
2192
2193         if (HAS_BSD2(dev)) {
2194                 ret = logical_bsd2_ring_init(dev);
2195                 if (ret)
2196                         goto cleanup_vebox_ring;
2197         }
2198
2199         return 0;
2200
2201 cleanup_vebox_ring:
2202         intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
2203 cleanup_blt_ring:
2204         intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
2205 cleanup_bsd_ring:
2206         intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
2207 cleanup_render_ring:
2208         intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
2209
2210         return ret;
2211 }
2212
2213 static u32
2214 make_rpcs(struct drm_i915_private *dev_priv)
2215 {
2216         u32 rpcs = 0;
2217
2218         /*
2219          * No explicit RPCS request is needed to ensure full
2220          * slice/subslice/EU enablement prior to Gen9.
2221         */
2222         if (INTEL_GEN(dev_priv) < 9)
2223                 return 0;
2224
2225         /*
2226          * Starting in Gen9, render power gating can leave
2227          * slice/subslice/EU in a partially enabled state. We
2228          * must make an explicit request through RPCS for full
2229          * enablement.
2230         */
2231         if (INTEL_INFO(dev_priv)->has_slice_pg) {
2232                 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2233                 rpcs |= INTEL_INFO(dev_priv)->slice_total <<
2234                         GEN8_RPCS_S_CNT_SHIFT;
2235                 rpcs |= GEN8_RPCS_ENABLE;
2236         }
2237
2238         if (INTEL_INFO(dev_priv)->has_subslice_pg) {
2239                 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2240                 rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
2241                         GEN8_RPCS_SS_CNT_SHIFT;
2242                 rpcs |= GEN8_RPCS_ENABLE;
2243         }
2244
2245         if (INTEL_INFO(dev_priv)->has_eu_pg) {
2246                 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2247                         GEN8_RPCS_EU_MIN_SHIFT;
2248                 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2249                         GEN8_RPCS_EU_MAX_SHIFT;
2250                 rpcs |= GEN8_RPCS_ENABLE;
2251         }
2252
2253         return rpcs;
2254 }
2255
2256 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2257 {
2258         u32 indirect_ctx_offset;
2259
2260         switch (INTEL_GEN(engine->i915)) {
2261         default:
2262                 MISSING_CASE(INTEL_GEN(engine->i915));
2263                 /* fall through */
2264         case 9:
2265                 indirect_ctx_offset =
2266                         GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2267                 break;
2268         case 8:
2269                 indirect_ctx_offset =
2270                         GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2271                 break;
2272         }
2273
2274         return indirect_ctx_offset;
2275 }
2276
2277 static int
2278 populate_lr_context(struct intel_context *ctx,
2279                     struct drm_i915_gem_object *ctx_obj,
2280                     struct intel_engine_cs *engine,
2281                     struct intel_ringbuffer *ringbuf)
2282 {
2283         struct drm_i915_private *dev_priv = ctx->i915;
2284         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2285         void *vaddr;
2286         u32 *reg_state;
2287         int ret;
2288
2289         if (!ppgtt)
2290                 ppgtt = dev_priv->mm.aliasing_ppgtt;
2291
2292         ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2293         if (ret) {
2294                 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2295                 return ret;
2296         }
2297
2298         vaddr = i915_gem_object_pin_map(ctx_obj);
2299         if (IS_ERR(vaddr)) {
2300                 ret = PTR_ERR(vaddr);
2301                 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2302                 return ret;
2303         }
2304         ctx_obj->dirty = true;
2305
2306         /* The second page of the context object contains some fields which must
2307          * be set up prior to the first execution. */
2308         reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2309
2310         /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2311          * commands followed by (reg, value) pairs. The values we are setting here are
2312          * only for the first context restore: on a subsequent save, the GPU will
2313          * recreate this batchbuffer with new values (including all the missing
2314          * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
2315         reg_state[CTX_LRI_HEADER_0] =
2316                 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2317         ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2318                        RING_CONTEXT_CONTROL(engine),
2319                        _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2320                                           CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2321                                           (HAS_RESOURCE_STREAMER(dev_priv) ?
2322                                             CTX_CTRL_RS_CTX_ENABLE : 0)));
2323         ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2324                        0);
2325         ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2326                        0);
2327         /* Ring buffer start address is not known until the buffer is pinned.
2328          * It is written to the context image in execlists_update_context()
2329          */
2330         ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2331                        RING_START(engine->mmio_base), 0);
2332         ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2333                        RING_CTL(engine->mmio_base),
2334                        ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
2335         ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2336                        RING_BBADDR_UDW(engine->mmio_base), 0);
2337         ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2338                        RING_BBADDR(engine->mmio_base), 0);
2339         ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2340                        RING_BBSTATE(engine->mmio_base),
2341                        RING_BB_PPGTT);
2342         ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2343                        RING_SBBADDR_UDW(engine->mmio_base), 0);
2344         ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2345                        RING_SBBADDR(engine->mmio_base), 0);
2346         ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2347                        RING_SBBSTATE(engine->mmio_base), 0);
2348         if (engine->id == RCS) {
2349                 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2350                                RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2351                 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2352                                RING_INDIRECT_CTX(engine->mmio_base), 0);
2353                 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2354                                RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2355                 if (engine->wa_ctx.obj) {
2356                         struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2357                         uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2358
2359                         reg_state[CTX_RCS_INDIRECT_CTX+1] =
2360                                 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2361                                 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2362
2363                         reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
2364                                 intel_lr_indirect_ctx_offset(engine) << 6;
2365
2366                         reg_state[CTX_BB_PER_CTX_PTR+1] =
2367                                 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2368                                 0x01;
2369                 }
2370         }
2371         reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2372         ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2373                        RING_CTX_TIMESTAMP(engine->mmio_base), 0);
2374         /* PDP values well be assigned later if needed */
2375         ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2376                        0);
2377         ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2378                        0);
2379         ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2380                        0);
2381         ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2382                        0);
2383         ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2384                        0);
2385         ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2386                        0);
2387         ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2388                        0);
2389         ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2390                        0);
2391
2392         if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2393                 /* 64b PPGTT (48bit canonical)
2394                  * PDP0_DESCRIPTOR contains the base address to PML4 and
2395                  * other PDP Descriptors are ignored.
2396                  */
2397                 ASSIGN_CTX_PML4(ppgtt, reg_state);
2398         } else {
2399                 /* 32b PPGTT
2400                  * PDP*_DESCRIPTOR contains the base address of space supported.
2401                  * With dynamic page allocation, PDPs may not be allocated at
2402                  * this point. Point the unallocated PDPs to the scratch page
2403                  */
2404                 execlists_update_context_pdps(ppgtt, reg_state);
2405         }
2406
2407         if (engine->id == RCS) {
2408                 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2409                 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2410                                make_rpcs(dev_priv));
2411         }
2412
2413         i915_gem_object_unpin_map(ctx_obj);
2414
2415         return 0;
2416 }
2417
2418 /**
2419  * intel_lr_context_free() - free the LRC specific bits of a context
2420  * @ctx: the LR context to free.
2421  *
2422  * The real context freeing is done in i915_gem_context_free: this only
2423  * takes care of the bits that are LRC related: the per-engine backing
2424  * objects and the logical ringbuffer.
2425  */
2426 void intel_lr_context_free(struct intel_context *ctx)
2427 {
2428         int i;
2429
2430         for (i = I915_NUM_ENGINES; --i >= 0; ) {
2431                 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
2432                 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
2433
2434                 if (!ctx_obj)
2435                         continue;
2436
2437                 WARN_ON(ctx->engine[i].pin_count);
2438                 intel_ringbuffer_free(ringbuf);
2439                 drm_gem_object_unreference(&ctx_obj->base);
2440         }
2441 }
2442
2443 /**
2444  * intel_lr_context_size() - return the size of the context for an engine
2445  * @ring: which engine to find the context size for
2446  *
2447  * Each engine may require a different amount of space for a context image,
2448  * so when allocating (or copying) an image, this function can be used to
2449  * find the right size for the specific engine.
2450  *
2451  * Return: size (in bytes) of an engine-specific context image
2452  *
2453  * Note: this size includes the HWSP, which is part of the context image
2454  * in LRC mode, but does not include the "shared data page" used with
2455  * GuC submission. The caller should account for this if using the GuC.
2456  */
2457 uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2458 {
2459         int ret = 0;
2460
2461         WARN_ON(INTEL_GEN(engine->i915) < 8);
2462
2463         switch (engine->id) {
2464         case RCS:
2465                 if (INTEL_GEN(engine->i915) >= 9)
2466                         ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2467                 else
2468                         ret = GEN8_LR_CONTEXT_RENDER_SIZE;
2469                 break;
2470         case VCS:
2471         case BCS:
2472         case VECS:
2473         case VCS2:
2474                 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2475                 break;
2476         }
2477
2478         return ret;
2479 }
2480
2481 /**
2482  * execlists_context_deferred_alloc() - create the LRC specific bits of a context
2483  * @ctx: LR context to create.
2484  * @engine: engine to be used with the context.
2485  *
2486  * This function can be called more than once, with different engines, if we plan
2487  * to use the context with them. The context backing objects and the ringbuffers
2488  * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
2489  * the creation is a deferred call: it's better to make sure first that we need to use
2490  * a given ring with the context.
2491  *
2492  * Return: non-zero on error.
2493  */
2494 static int execlists_context_deferred_alloc(struct intel_context *ctx,
2495                                             struct intel_engine_cs *engine)
2496 {
2497         struct drm_i915_gem_object *ctx_obj;
2498         uint32_t context_size;
2499         struct intel_ringbuffer *ringbuf;
2500         int ret;
2501
2502         WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
2503         WARN_ON(ctx->engine[engine->id].state);
2504
2505         context_size = round_up(intel_lr_context_size(engine), 4096);
2506
2507         /* One extra page as the sharing data between driver and GuC */
2508         context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2509
2510         ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
2511         if (IS_ERR(ctx_obj)) {
2512                 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2513                 return PTR_ERR(ctx_obj);
2514         }
2515
2516         ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
2517         if (IS_ERR(ringbuf)) {
2518                 ret = PTR_ERR(ringbuf);
2519                 goto error_deref_obj;
2520         }
2521
2522         ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
2523         if (ret) {
2524                 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2525                 goto error_ringbuf;
2526         }
2527
2528         ctx->engine[engine->id].ringbuf = ringbuf;
2529         ctx->engine[engine->id].state = ctx_obj;
2530         ctx->engine[engine->id].initialised = engine->init_context == NULL;
2531
2532         return 0;
2533
2534 error_ringbuf:
2535         intel_ringbuffer_free(ringbuf);
2536 error_deref_obj:
2537         drm_gem_object_unreference(&ctx_obj->base);
2538         ctx->engine[engine->id].ringbuf = NULL;
2539         ctx->engine[engine->id].state = NULL;
2540         return ret;
2541 }
2542
2543 void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2544                             struct intel_context *ctx)
2545 {
2546         struct intel_engine_cs *engine;
2547
2548         for_each_engine(engine, dev_priv) {
2549                 struct drm_i915_gem_object *ctx_obj =
2550                                 ctx->engine[engine->id].state;
2551                 struct intel_ringbuffer *ringbuf =
2552                                 ctx->engine[engine->id].ringbuf;
2553                 void *vaddr;
2554                 uint32_t *reg_state;
2555
2556                 if (!ctx_obj)
2557                         continue;
2558
2559                 vaddr = i915_gem_object_pin_map(ctx_obj);
2560                 if (WARN_ON(IS_ERR(vaddr)))
2561                         continue;
2562
2563                 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2564                 ctx_obj->dirty = true;
2565
2566                 reg_state[CTX_RING_HEAD+1] = 0;
2567                 reg_state[CTX_RING_TAIL+1] = 0;
2568
2569                 i915_gem_object_unpin_map(ctx_obj);
2570
2571                 ringbuf->head = 0;
2572                 ringbuf->tail = 0;
2573         }
2574 }