2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <engine/fifo.h>
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/enum.h>
29 #include <core/handle.h>
30 #include <subdev/bar.h>
31 #include <subdev/fb.h>
32 #include <subdev/mmu.h>
33 #include <subdev/timer.h>
35 #include <nvif/class.h>
36 #include <nvif/unpack.h>
39 struct nvkm_fifo base;
41 struct work_struct fault;
45 struct nvkm_gpuobj *mem[2];
47 wait_queue_head_t wait;
51 struct nvkm_gpuobj *mem;
57 struct gf100_fifo_base {
58 struct nvkm_fifo_base base;
59 struct nvkm_gpuobj *pgd;
63 struct gf100_fifo_chan {
64 struct nvkm_fifo_chan base;
72 /*******************************************************************************
73 * FIFO channel objects
74 ******************************************************************************/
77 gf100_fifo_runlist_update(struct gf100_fifo *fifo)
79 struct nvkm_device *device = fifo->base.engine.subdev.device;
80 struct nvkm_bar *bar = device->bar;
81 struct nvkm_gpuobj *cur;
84 mutex_lock(&nv_subdev(fifo)->mutex);
85 cur = fifo->runlist.mem[fifo->runlist.active];
86 fifo->runlist.active = !fifo->runlist.active;
88 for (i = 0, p = 0; i < 128; i++) {
89 struct gf100_fifo_chan *chan = (void *)fifo->base.channel[i];
90 if (chan && chan->state == RUNNING) {
91 nv_wo32(cur, p + 0, i);
92 nv_wo32(cur, p + 4, 0x00000004);
98 nvkm_wr32(device, 0x002270, cur->addr >> 12);
99 nvkm_wr32(device, 0x002274, 0x01f00000 | (p >> 3));
101 if (wait_event_timeout(fifo->runlist.wait,
102 !(nvkm_rd32(device, 0x00227c) & 0x00100000),
103 msecs_to_jiffies(2000)) == 0)
104 nv_error(fifo, "runlist update timeout\n");
105 mutex_unlock(&nv_subdev(fifo)->mutex);
109 gf100_fifo_context_attach(struct nvkm_object *parent,
110 struct nvkm_object *object)
112 struct nvkm_bar *bar = nvkm_bar(parent);
113 struct gf100_fifo_base *base = (void *)parent->parent;
114 struct nvkm_engctx *ectx = (void *)object;
118 switch (nv_engidx(object->engine)) {
119 case NVDEV_ENGINE_SW : return 0;
120 case NVDEV_ENGINE_GR : addr = 0x0210; break;
121 case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
122 case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
123 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
124 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
125 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
130 if (!ectx->vma.node) {
131 ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
132 NV_MEM_ACCESS_RW, &ectx->vma);
136 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
139 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
140 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
146 gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
147 struct nvkm_object *object)
149 struct gf100_fifo *fifo = (void *)parent->engine;
150 struct gf100_fifo_base *base = (void *)parent->parent;
151 struct gf100_fifo_chan *chan = (void *)parent;
152 struct nvkm_device *device = fifo->base.engine.subdev.device;
153 struct nvkm_bar *bar = device->bar;
156 switch (nv_engidx(object->engine)) {
157 case NVDEV_ENGINE_SW : return 0;
158 case NVDEV_ENGINE_GR : addr = 0x0210; break;
159 case NVDEV_ENGINE_CE0 : addr = 0x0230; break;
160 case NVDEV_ENGINE_CE1 : addr = 0x0240; break;
161 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
162 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
163 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
168 nvkm_wr32(device, 0x002634, chan->base.chid);
169 if (!nv_wait(fifo, 0x002634, 0xffffffff, chan->base.chid)) {
170 nv_error(fifo, "channel %d [%s] kick timeout\n",
171 chan->base.chid, nvkm_client_name(chan));
176 nv_wo32(base, addr + 0x00, 0x00000000);
177 nv_wo32(base, addr + 0x04, 0x00000000);
183 gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
184 struct nvkm_oclass *oclass, void *data, u32 size,
185 struct nvkm_object **pobject)
188 struct nv50_channel_gpfifo_v0 v0;
190 struct nvkm_bar *bar = nvkm_bar(parent);
191 struct gf100_fifo *fifo = (void *)engine;
192 struct gf100_fifo_base *base = (void *)parent;
193 struct gf100_fifo_chan *chan;
194 u64 usermem, ioffset, ilength;
197 nv_ioctl(parent, "create channel gpfifo size %d\n", size);
198 if (nvif_unpack(args->v0, 0, 0, false)) {
199 nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
200 "ioffset %016llx ilength %08x\n",
201 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
206 ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
207 fifo->user.bar.offset, 0x1000,
209 (1ULL << NVDEV_ENGINE_SW) |
210 (1ULL << NVDEV_ENGINE_GR) |
211 (1ULL << NVDEV_ENGINE_CE0) |
212 (1ULL << NVDEV_ENGINE_CE1) |
213 (1ULL << NVDEV_ENGINE_MSVLD) |
214 (1ULL << NVDEV_ENGINE_MSPDEC) |
215 (1ULL << NVDEV_ENGINE_MSPPP), &chan);
216 *pobject = nv_object(chan);
220 args->v0.chid = chan->base.chid;
222 nv_parent(chan)->context_attach = gf100_fifo_context_attach;
223 nv_parent(chan)->context_detach = gf100_fifo_context_detach;
225 usermem = chan->base.chid * 0x1000;
226 ioffset = args->v0.ioffset;
227 ilength = order_base_2(args->v0.ilength / 8);
229 for (i = 0; i < 0x1000; i += 4)
230 nv_wo32(fifo->user.mem, usermem + i, 0x00000000);
232 nv_wo32(base, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
233 nv_wo32(base, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
234 nv_wo32(base, 0x10, 0x0000face);
235 nv_wo32(base, 0x30, 0xfffff902);
236 nv_wo32(base, 0x48, lower_32_bits(ioffset));
237 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
238 nv_wo32(base, 0x54, 0x00000002);
239 nv_wo32(base, 0x84, 0x20400000);
240 nv_wo32(base, 0x94, 0x30000001);
241 nv_wo32(base, 0x9c, 0x00000100);
242 nv_wo32(base, 0xa4, 0x1f1f1f1f);
243 nv_wo32(base, 0xa8, 0x1f1f1f1f);
244 nv_wo32(base, 0xac, 0x0000001f);
245 nv_wo32(base, 0xb8, 0xf8000000);
246 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
247 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
253 gf100_fifo_chan_init(struct nvkm_object *object)
255 struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
256 struct gf100_fifo *fifo = (void *)object->engine;
257 struct gf100_fifo_chan *chan = (void *)object;
258 struct nvkm_device *device = fifo->base.engine.subdev.device;
259 u32 chid = chan->base.chid;
262 ret = nvkm_fifo_channel_init(&chan->base);
266 nvkm_wr32(device, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
268 if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
269 nvkm_wr32(device, 0x003004 + (chid * 8), 0x001f0001);
270 gf100_fifo_runlist_update(fifo);
276 static void gf100_fifo_intr_engine(struct gf100_fifo *fifo);
279 gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
281 struct gf100_fifo *fifo = (void *)object->engine;
282 struct gf100_fifo_chan *chan = (void *)object;
283 struct nvkm_device *device = fifo->base.engine.subdev.device;
284 u32 chid = chan->base.chid;
286 if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
287 nvkm_mask(device, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
288 gf100_fifo_runlist_update(fifo);
291 gf100_fifo_intr_engine(fifo);
293 nvkm_wr32(device, 0x003000 + (chid * 8), 0x00000000);
294 return nvkm_fifo_channel_fini(&chan->base, suspend);
297 static struct nvkm_ofuncs
298 gf100_fifo_ofuncs = {
299 .ctor = gf100_fifo_chan_ctor,
300 .dtor = _nvkm_fifo_channel_dtor,
301 .init = gf100_fifo_chan_init,
302 .fini = gf100_fifo_chan_fini,
303 .map = _nvkm_fifo_channel_map,
304 .rd32 = _nvkm_fifo_channel_rd32,
305 .wr32 = _nvkm_fifo_channel_wr32,
306 .ntfy = _nvkm_fifo_channel_ntfy
309 static struct nvkm_oclass
310 gf100_fifo_sclass[] = {
311 { FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
315 /*******************************************************************************
316 * FIFO context - instmem heap and vm setup
317 ******************************************************************************/
320 gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
321 struct nvkm_oclass *oclass, void *data, u32 size,
322 struct nvkm_object **pobject)
324 struct gf100_fifo_base *base;
327 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
328 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
329 NVOBJ_FLAG_HEAP, &base);
330 *pobject = nv_object(base);
334 ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
339 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
340 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
341 nv_wo32(base, 0x0208, 0xffffffff);
342 nv_wo32(base, 0x020c, 0x000000ff);
344 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
352 gf100_fifo_context_dtor(struct nvkm_object *object)
354 struct gf100_fifo_base *base = (void *)object;
355 nvkm_vm_ref(NULL, &base->vm, base->pgd);
356 nvkm_gpuobj_ref(NULL, &base->pgd);
357 nvkm_fifo_context_destroy(&base->base);
360 static struct nvkm_oclass
361 gf100_fifo_cclass = {
362 .handle = NV_ENGCTX(FIFO, 0xc0),
363 .ofuncs = &(struct nvkm_ofuncs) {
364 .ctor = gf100_fifo_context_ctor,
365 .dtor = gf100_fifo_context_dtor,
366 .init = _nvkm_fifo_context_init,
367 .fini = _nvkm_fifo_context_fini,
368 .rd32 = _nvkm_fifo_context_rd32,
369 .wr32 = _nvkm_fifo_context_wr32,
373 /*******************************************************************************
375 ******************************************************************************/
378 gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
381 case NVDEV_ENGINE_GR : engn = 0; break;
382 case NVDEV_ENGINE_MSVLD : engn = 1; break;
383 case NVDEV_ENGINE_MSPPP : engn = 2; break;
384 case NVDEV_ENGINE_MSPDEC: engn = 3; break;
385 case NVDEV_ENGINE_CE0 : engn = 4; break;
386 case NVDEV_ENGINE_CE1 : engn = 5; break;
394 static inline struct nvkm_engine *
395 gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
398 case 0: engn = NVDEV_ENGINE_GR; break;
399 case 1: engn = NVDEV_ENGINE_MSVLD; break;
400 case 2: engn = NVDEV_ENGINE_MSPPP; break;
401 case 3: engn = NVDEV_ENGINE_MSPDEC; break;
402 case 4: engn = NVDEV_ENGINE_CE0; break;
403 case 5: engn = NVDEV_ENGINE_CE1; break;
408 return nvkm_engine(fifo, engn);
412 gf100_fifo_recover_work(struct work_struct *work)
414 struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
415 struct nvkm_device *device = fifo->base.engine.subdev.device;
416 struct nvkm_object *engine;
421 spin_lock_irqsave(&fifo->base.lock, flags);
424 spin_unlock_irqrestore(&fifo->base.lock, flags);
426 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
427 engm |= 1 << gf100_fifo_engidx(fifo, engn);
428 nvkm_mask(device, 0x002630, engm, engm);
430 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
431 if ((engine = (void *)nvkm_engine(fifo, engn))) {
432 nv_ofuncs(engine)->fini(engine, false);
433 WARN_ON(nv_ofuncs(engine)->init(engine));
437 gf100_fifo_runlist_update(fifo);
438 nvkm_wr32(device, 0x00262c, engm);
439 nvkm_mask(device, 0x002630, engm, 0x00000000);
443 gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
444 struct gf100_fifo_chan *chan)
446 struct nvkm_device *device = fifo->base.engine.subdev.device;
447 u32 chid = chan->base.chid;
450 nv_error(fifo, "%s engine fault on channel %d, recovering...\n",
451 nv_subdev(engine)->name, chid);
453 nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
454 chan->state = KILLED;
456 spin_lock_irqsave(&fifo->base.lock, flags);
457 fifo->mask |= 1ULL << nv_engidx(engine);
458 spin_unlock_irqrestore(&fifo->base.lock, flags);
459 schedule_work(&fifo->fault);
463 gf100_fifo_swmthd(struct gf100_fifo *fifo, u32 chid, u32 mthd, u32 data)
465 struct gf100_fifo_chan *chan = NULL;
466 struct nvkm_handle *bind;
470 spin_lock_irqsave(&fifo->base.lock, flags);
471 if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
472 chan = (void *)fifo->base.channel[chid];
476 bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
478 if (!mthd || !nv_call(bind->object, mthd, data))
480 nvkm_namedb_put(bind);
484 spin_unlock_irqrestore(&fifo->base.lock, flags);
488 static const struct nvkm_enum
489 gf100_fifo_sched_reason[] = {
490 { 0x0a, "CTXSW_TIMEOUT" },
495 gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
497 struct nvkm_device *device = fifo->base.engine.subdev.device;
498 struct nvkm_engine *engine;
499 struct gf100_fifo_chan *chan;
502 for (engn = 0; engn < 6; engn++) {
503 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
504 u32 busy = (stat & 0x80000000);
505 u32 save = (stat & 0x00100000); /* maybe? */
506 u32 unk0 = (stat & 0x00040000);
507 u32 unk1 = (stat & 0x00001000);
508 u32 chid = (stat & 0x0000007f);
511 if (busy && unk0 && unk1) {
512 if (!(chan = (void *)fifo->base.channel[chid]))
514 if (!(engine = gf100_fifo_engine(fifo, engn)))
516 gf100_fifo_recover(fifo, engine, chan);
522 gf100_fifo_intr_sched(struct gf100_fifo *fifo)
524 struct nvkm_device *device = fifo->base.engine.subdev.device;
525 u32 intr = nvkm_rd32(device, 0x00254c);
526 u32 code = intr & 0x000000ff;
527 const struct nvkm_enum *en;
530 en = nvkm_enum_find(gf100_fifo_sched_reason, code);
532 snprintf(enunk, sizeof(enunk), "UNK%02x", code);
534 nv_error(fifo, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
538 gf100_fifo_intr_sched_ctxsw(fifo);
545 static const struct nvkm_enum
546 gf100_fifo_fault_engine[] = {
547 { 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
548 { 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
549 { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
550 { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
551 { 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
552 { 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
553 { 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
554 { 0x13, "PCOUNTER" },
555 { 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
556 { 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
557 { 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
562 static const struct nvkm_enum
563 gf100_fifo_fault_reason[] = {
564 { 0x00, "PT_NOT_PRESENT" },
565 { 0x01, "PT_TOO_SHORT" },
566 { 0x02, "PAGE_NOT_PRESENT" },
567 { 0x03, "VM_LIMIT_EXCEEDED" },
568 { 0x04, "NO_CHANNEL" },
569 { 0x05, "PAGE_SYSTEM_ONLY" },
570 { 0x06, "PAGE_READ_ONLY" },
571 { 0x0a, "COMPRESSED_SYSRAM" },
572 { 0x0c, "INVALID_STORAGE_TYPE" },
576 static const struct nvkm_enum
577 gf100_fifo_fault_hubclient[] = {
580 { 0x04, "DISPATCH" },
583 { 0x07, "BAR_READ" },
584 { 0x08, "BAR_WRITE" },
588 { 0x11, "PCOUNTER" },
591 { 0x15, "CCACHE_POST" },
595 static const struct nvkm_enum
596 gf100_fifo_fault_gpcclient[] = {
605 gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
607 struct nvkm_device *device = fifo->base.engine.subdev.device;
608 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
609 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
610 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
611 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
612 u32 gpc = (stat & 0x1f000000) >> 24;
613 u32 client = (stat & 0x00001f00) >> 8;
614 u32 write = (stat & 0x00000080);
615 u32 hub = (stat & 0x00000040);
616 u32 reason = (stat & 0x0000000f);
617 struct nvkm_object *engctx = NULL, *object;
618 struct nvkm_engine *engine = NULL;
619 const struct nvkm_enum *er, *eu, *ec;
625 er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
627 snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
629 eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
632 case NVDEV_SUBDEV_BAR:
633 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
635 case NVDEV_SUBDEV_INSTMEM:
636 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
638 case NVDEV_ENGINE_IFB:
639 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
642 engine = nvkm_engine(fifo, eu->data2);
644 engctx = nvkm_engctx_get(engine, inst);
648 snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
652 ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
654 ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
655 snprintf(gpcid, sizeof(gpcid), "%d", gpc);
659 snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
661 nv_error(fifo, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
662 "channel 0x%010llx [%s]\n", write ? "write" : "read",
663 (u64)vahi << 32 | valo, er ? er->name : erunk,
664 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
665 ec ? ec->name : ecunk, (u64)inst << 12,
666 nvkm_client_name(engctx));
670 switch (nv_mclass(object)) {
671 case FERMI_CHANNEL_GPFIFO:
672 gf100_fifo_recover(fifo, engine, (void *)object);
675 object = object->parent;
678 nvkm_engctx_put(engctx);
681 static const struct nvkm_bitfield
682 gf100_fifo_pbdma_intr[] = {
683 /* { 0x00008000, "" } seen with null ib push */
684 { 0x00200000, "ILLEGAL_MTHD" },
685 { 0x00800000, "EMPTY_SUBC" },
690 gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
692 struct nvkm_device *device = fifo->base.engine.subdev.device;
693 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
694 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
695 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
696 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
697 u32 subc = (addr & 0x00070000) >> 16;
698 u32 mthd = (addr & 0x00003ffc);
701 if (stat & 0x00800000) {
702 if (!gf100_fifo_swmthd(fifo, chid, mthd, data))
707 nv_error(fifo, "PBDMA%d:", unit);
708 nvkm_bitfield_print(gf100_fifo_pbdma_intr, show);
711 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
713 nvkm_client_name_for_fifo_chid(&fifo->base, chid),
717 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
718 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
722 gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
724 struct nvkm_device *device = fifo->base.engine.subdev.device;
725 u32 intr = nvkm_rd32(device, 0x002a00);
727 if (intr & 0x10000000) {
728 wake_up(&fifo->runlist.wait);
729 nvkm_wr32(device, 0x002a00, 0x10000000);
734 nv_error(fifo, "RUNLIST 0x%08x\n", intr);
735 nvkm_wr32(device, 0x002a00, intr);
740 gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
742 struct nvkm_device *device = fifo->base.engine.subdev.device;
743 u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
744 u32 inte = nvkm_rd32(device, 0x002628);
747 nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
749 for (unkn = 0; unkn < 8; unkn++) {
750 u32 ints = (intr >> (unkn * 0x04)) & inte;
752 nvkm_fifo_uevent(&fifo->base);
756 nv_error(fifo, "ENGINE %d %d %01x", engn, unkn, ints);
757 nvkm_mask(device, 0x002628, ints, 0);
763 gf100_fifo_intr_engine(struct gf100_fifo *fifo)
765 struct nvkm_device *device = fifo->base.engine.subdev.device;
766 u32 mask = nvkm_rd32(device, 0x0025a4);
768 u32 unit = __ffs(mask);
769 gf100_fifo_intr_engine_unit(fifo, unit);
770 mask &= ~(1 << unit);
775 gf100_fifo_intr(struct nvkm_subdev *subdev)
777 struct gf100_fifo *fifo = (void *)subdev;
778 struct nvkm_device *device = fifo->base.engine.subdev.device;
779 u32 mask = nvkm_rd32(device, 0x002140);
780 u32 stat = nvkm_rd32(device, 0x002100) & mask;
782 if (stat & 0x00000001) {
783 u32 intr = nvkm_rd32(device, 0x00252c);
784 nv_warn(fifo, "INTR 0x00000001: 0x%08x\n", intr);
785 nvkm_wr32(device, 0x002100, 0x00000001);
789 if (stat & 0x00000100) {
790 gf100_fifo_intr_sched(fifo);
791 nvkm_wr32(device, 0x002100, 0x00000100);
795 if (stat & 0x00010000) {
796 u32 intr = nvkm_rd32(device, 0x00256c);
797 nv_warn(fifo, "INTR 0x00010000: 0x%08x\n", intr);
798 nvkm_wr32(device, 0x002100, 0x00010000);
802 if (stat & 0x01000000) {
803 u32 intr = nvkm_rd32(device, 0x00258c);
804 nv_warn(fifo, "INTR 0x01000000: 0x%08x\n", intr);
805 nvkm_wr32(device, 0x002100, 0x01000000);
809 if (stat & 0x10000000) {
810 u32 mask = nvkm_rd32(device, 0x00259c);
812 u32 unit = __ffs(mask);
813 gf100_fifo_intr_fault(fifo, unit);
814 nvkm_wr32(device, 0x00259c, (1 << unit));
815 mask &= ~(1 << unit);
820 if (stat & 0x20000000) {
821 u32 mask = nvkm_rd32(device, 0x0025a0);
823 u32 unit = __ffs(mask);
824 gf100_fifo_intr_pbdma(fifo, unit);
825 nvkm_wr32(device, 0x0025a0, (1 << unit));
826 mask &= ~(1 << unit);
831 if (stat & 0x40000000) {
832 gf100_fifo_intr_runlist(fifo);
836 if (stat & 0x80000000) {
837 gf100_fifo_intr_engine(fifo);
842 nv_error(fifo, "INTR 0x%08x\n", stat);
843 nvkm_mask(device, 0x002140, stat, 0x00000000);
844 nvkm_wr32(device, 0x002100, stat);
849 gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
851 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
852 struct nvkm_device *device = fifo->engine.subdev.device;
853 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
857 gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
859 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
860 struct nvkm_device *device = fifo->engine.subdev.device;
861 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
864 static const struct nvkm_event_func
865 gf100_fifo_uevent_func = {
866 .ctor = nvkm_fifo_uevent_ctor,
867 .init = gf100_fifo_uevent_init,
868 .fini = gf100_fifo_uevent_fini,
872 gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
873 struct nvkm_oclass *oclass, void *data, u32 size,
874 struct nvkm_object **pobject)
876 struct gf100_fifo *fifo;
879 ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &fifo);
880 *pobject = nv_object(fifo);
884 INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
886 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x1000, 0x1000, 0,
887 &fifo->runlist.mem[0]);
891 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x1000, 0x1000, 0,
892 &fifo->runlist.mem[1]);
896 init_waitqueue_head(&fifo->runlist.wait);
898 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 0x1000, 0x1000, 0,
903 ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW,
908 ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &fifo->base.uevent);
912 nv_subdev(fifo)->unit = 0x00000100;
913 nv_subdev(fifo)->intr = gf100_fifo_intr;
914 nv_engine(fifo)->cclass = &gf100_fifo_cclass;
915 nv_engine(fifo)->sclass = gf100_fifo_sclass;
920 gf100_fifo_dtor(struct nvkm_object *object)
922 struct gf100_fifo *fifo = (void *)object;
924 nvkm_gpuobj_unmap(&fifo->user.bar);
925 nvkm_gpuobj_ref(NULL, &fifo->user.mem);
926 nvkm_gpuobj_ref(NULL, &fifo->runlist.mem[0]);
927 nvkm_gpuobj_ref(NULL, &fifo->runlist.mem[1]);
929 nvkm_fifo_destroy(&fifo->base);
933 gf100_fifo_init(struct nvkm_object *object)
935 struct gf100_fifo *fifo = (void *)object;
936 struct nvkm_device *device = fifo->base.engine.subdev.device;
939 ret = nvkm_fifo_init(&fifo->base);
943 nvkm_wr32(device, 0x000204, 0xffffffff);
944 nvkm_wr32(device, 0x002204, 0xffffffff);
946 fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
947 nv_debug(fifo, "%d PBDMA unit(s)\n", fifo->spoon_nr);
949 /* assign engines to PBDMAs */
950 if (fifo->spoon_nr >= 3) {
951 nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
952 nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
953 nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
954 nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
955 nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
956 nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
960 for (i = 0; i < fifo->spoon_nr; i++) {
961 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
962 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
963 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
966 nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
967 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
969 nvkm_wr32(device, 0x002100, 0xffffffff);
970 nvkm_wr32(device, 0x002140, 0x7fffffff);
971 nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
976 gf100_fifo_oclass = &(struct nvkm_oclass) {
977 .handle = NV_ENGINE(FIFO, 0xc0),
978 .ofuncs = &(struct nvkm_ofuncs) {
979 .ctor = gf100_fifo_ctor,
980 .dtor = gf100_fifo_dtor,
981 .init = gf100_fifo_init,
982 .fini = _nvkm_fifo_fini,