2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <core/client.h>
29 #include <core/notify.h>
30 #include <core/oproxy.h>
31 #include <subdev/bios.h>
32 #include <subdev/bios/dcb.h>
34 #include <nvif/class.h>
35 #include <nvif/cl0046.h>
36 #include <nvif/event.h>
37 #include <nvif/unpack.h>
40 nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int head)
42 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
43 disp->func->head.vblank_fini(disp, head);
47 nvkm_disp_vblank_init(struct nvkm_event *event, int type, int head)
49 struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
50 disp->func->head.vblank_init(disp, head);
54 nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
55 struct nvkm_notify *notify)
57 struct nvkm_disp *disp =
58 container_of(notify->event, typeof(*disp), vblank);
60 struct nvif_notify_head_req_v0 v0;
64 if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
65 notify->size = sizeof(struct nvif_notify_head_rep_v0);
66 if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) {
68 notify->index = req->v0.head;
76 static const struct nvkm_event_func
77 nvkm_disp_vblank_func = {
78 .ctor = nvkm_disp_vblank_ctor,
79 .init = nvkm_disp_vblank_init,
80 .fini = nvkm_disp_vblank_fini,
84 nvkm_disp_vblank(struct nvkm_disp *disp, int head)
86 struct nvif_notify_head_rep_v0 rep = {};
87 nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep));
91 nvkm_disp_hpd_ctor(struct nvkm_object *object, void *data, u32 size,
92 struct nvkm_notify *notify)
94 struct nvkm_disp *disp =
95 container_of(notify->event, typeof(*disp), hpd);
97 struct nvif_notify_conn_req_v0 v0;
99 struct nvkm_output *outp;
102 if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
103 notify->size = sizeof(struct nvif_notify_conn_rep_v0);
104 list_for_each_entry(outp, &disp->outp, head) {
105 if (ret = -ENXIO, outp->conn->index == req->v0.conn) {
106 if (ret = -ENODEV, outp->conn->hpd.event) {
107 notify->types = req->v0.mask;
108 notify->index = req->v0.conn;
119 static const struct nvkm_event_func
120 nvkm_disp_hpd_func = {
121 .ctor = nvkm_disp_hpd_ctor
125 nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
127 struct nvkm_disp *disp = nvkm_disp(object->engine);
129 case NV04_DISP_NTFY_VBLANK:
130 *event = &disp->vblank;
132 case NV04_DISP_NTFY_CONN:
142 nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
144 struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
145 mutex_lock(&disp->engine.subdev.mutex);
146 if (disp->client == oproxy)
148 mutex_unlock(&disp->engine.subdev.mutex);
151 static const struct nvkm_oproxy_func
153 .dtor[1] = nvkm_disp_class_del,
157 nvkm_disp_class_new(struct nvkm_device *device,
158 const struct nvkm_oclass *oclass, void *data, u32 size,
159 struct nvkm_object **pobject)
161 const struct nvkm_disp_oclass *sclass = oclass->engn;
162 struct nvkm_disp *disp = nvkm_disp(oclass->engine);
163 struct nvkm_oproxy *oproxy;
166 ret = nvkm_oproxy_new_(&nvkm_disp_class, oclass, &oproxy);
169 *pobject = &oproxy->base;
171 mutex_lock(&disp->engine.subdev.mutex);
173 mutex_unlock(&disp->engine.subdev.mutex);
176 disp->client = oproxy;
177 mutex_unlock(&disp->engine.subdev.mutex);
179 return sclass->ctor(disp, oclass, data, size, &oproxy->object);
182 static const struct nvkm_device_oclass
184 .ctor = nvkm_disp_class_new,
188 nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
189 const struct nvkm_device_oclass **class)
191 struct nvkm_disp *disp = nvkm_disp(oclass->engine);
193 const struct nvkm_disp_oclass *root = disp->func->root(disp);
194 oclass->base = root->base;
196 *class = &nvkm_disp_sclass;
203 nvkm_disp_intr(struct nvkm_engine *engine)
205 struct nvkm_disp *disp = nvkm_disp(engine);
206 disp->func->intr(disp);
210 nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
212 struct nvkm_disp *disp = nvkm_disp(engine);
213 struct nvkm_connector *conn;
214 struct nvkm_output *outp;
216 list_for_each_entry(outp, &disp->outp, head) {
217 nvkm_output_fini(outp);
220 list_for_each_entry(conn, &disp->conn, head) {
221 nvkm_connector_fini(conn);
228 nvkm_disp_init(struct nvkm_engine *engine)
230 struct nvkm_disp *disp = nvkm_disp(engine);
231 struct nvkm_connector *conn;
232 struct nvkm_output *outp;
234 list_for_each_entry(conn, &disp->conn, head) {
235 nvkm_connector_init(conn);
238 list_for_each_entry(outp, &disp->outp, head) {
239 nvkm_output_init(outp);
246 nvkm_disp_dtor(struct nvkm_engine *engine)
248 struct nvkm_disp *disp = nvkm_disp(engine);
249 struct nvkm_connector *conn;
250 struct nvkm_output *outp;
253 if (disp->func->dtor)
254 data = disp->func->dtor(disp);
256 nvkm_event_fini(&disp->vblank);
257 nvkm_event_fini(&disp->hpd);
259 while (!list_empty(&disp->outp)) {
260 outp = list_first_entry(&disp->outp, typeof(*outp), head);
261 list_del(&outp->head);
262 nvkm_output_del(&outp);
265 while (!list_empty(&disp->conn)) {
266 conn = list_first_entry(&disp->conn, typeof(*conn), head);
267 list_del(&conn->head);
268 nvkm_connector_del(&conn);
274 static const struct nvkm_engine_func
276 .dtor = nvkm_disp_dtor,
277 .init = nvkm_disp_init,
278 .fini = nvkm_disp_fini,
279 .intr = nvkm_disp_intr,
280 .base.sclass = nvkm_disp_class_get,
284 nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
285 int index, int heads, struct nvkm_disp *disp)
287 struct nvkm_bios *bios = device->bios;
288 struct nvkm_output *outp, *outt, *pair;
289 struct nvkm_connector *conn;
290 struct nvbios_connE connE;
291 struct dcb_output dcbE;
292 u8 hpd = 0, ver, hdr;
296 INIT_LIST_HEAD(&disp->outp);
297 INIT_LIST_HEAD(&disp->conn);
299 disp->head.nr = heads;
301 ret = nvkm_engine_ctor(&nvkm_disp, device, index, 0,
302 true, &disp->engine);
306 /* create output objects for each display path in the vbios */
308 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
309 const struct nvkm_disp_func_outp *outps;
310 int (*ctor)(struct nvkm_disp *, int, struct dcb_output *,
311 struct nvkm_output **);
313 if (dcbE.type == DCB_OUTPUT_UNUSED)
315 if (dcbE.type == DCB_OUTPUT_EOL)
319 switch (dcbE.location) {
320 case 0: outps = &disp->func->outp.internal; break;
321 case 1: outps = &disp->func->outp.external; break;
323 nvkm_warn(&disp->engine.subdev,
324 "dcb %d locn %d unknown\n", i, dcbE.location);
329 case DCB_OUTPUT_ANALOG: ctor = outps->crt ; break;
330 case DCB_OUTPUT_TV : ctor = outps->tv ; break;
331 case DCB_OUTPUT_TMDS : ctor = outps->tmds; break;
332 case DCB_OUTPUT_LVDS : ctor = outps->lvds; break;
333 case DCB_OUTPUT_DP : ctor = outps->dp ; break;
335 nvkm_warn(&disp->engine.subdev,
336 "dcb %d type %d unknown\n", i, dcbE.type);
341 ret = ctor(disp, i, &dcbE, &outp);
346 if (ret == -ENODEV) {
347 nvkm_debug(&disp->engine.subdev,
348 "dcb %d %d/%d not supported\n",
349 i, dcbE.location, dcbE.type);
352 nvkm_error(&disp->engine.subdev,
353 "failed to create output %d\n", i);
354 nvkm_output_del(&outp);
358 list_add_tail(&outp->head, &disp->outp);
359 hpd = max(hpd, (u8)(dcbE.connector + 1));
362 /* create connector objects based on the outputs we support */
363 list_for_each_entry_safe(outp, outt, &disp->outp, head) {
364 /* bios data *should* give us the most useful information */
365 data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
368 /* no bios connector data... */
370 /* heuristic: anything with the same ccb index is
371 * considered to be on the same connector, any
372 * output path without an associated ccb entry will
373 * be put on its own connector
375 int ccb_index = outp->info.i2c_index;
376 if (ccb_index != 0xf) {
377 list_for_each_entry(pair, &disp->outp, head) {
378 if (pair->info.i2c_index == ccb_index) {
379 outp->conn = pair->conn;
385 /* connector shared with another output path */
389 memset(&connE, 0x00, sizeof(connE));
390 connE.type = DCB_CONNECTOR_NONE;
393 i = outp->info.connector;
396 /* check that we haven't already created this connector */
397 list_for_each_entry(conn, &disp->conn, head) {
398 if (conn->index == outp->info.connector) {
407 /* apparently we need to create a new one! */
408 ret = nvkm_connector_new(disp, i, &connE, &outp->conn);
410 nvkm_error(&disp->engine.subdev,
411 "failed to create output %d conn: %d\n",
413 nvkm_connector_del(&outp->conn);
414 list_del(&outp->head);
415 nvkm_output_del(&outp);
419 list_add_tail(&outp->conn->head, &disp->conn);
422 ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
426 ret = nvkm_event_init(&nvkm_disp_vblank_func, 1, heads, &disp->vblank);
434 nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
435 int index, int heads, struct nvkm_disp **pdisp)
437 if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
439 return nvkm_disp_ctor(func, device, index, heads, *pdisp);