7f935e58e4b3796f43aec70edc43ea12fd25e0d3
[cascardo/linux.git] / drivers / gpu / drm / etnaviv / etnaviv_buffer.c
1 /*
2  * Copyright (C) 2014 Etnaviv Project
3  * Author: Christian Gmeiner <christian.gmeiner@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include "etnaviv_gpu.h"
19 #include "etnaviv_gem.h"
20 #include "etnaviv_mmu.h"
21
22 #include "common.xml.h"
23 #include "state.xml.h"
24 #include "cmdstream.xml.h"
25
26 /*
27  * Command Buffer helper:
28  */
29
30
31 static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
32 {
33         u32 *vaddr = (u32 *)buffer->vaddr;
34
35         BUG_ON(buffer->user_size >= buffer->size);
36
37         vaddr[buffer->user_size / 4] = data;
38         buffer->user_size += 4;
39 }
40
41 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
42         u32 reg, u32 value)
43 {
44         u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
45
46         buffer->user_size = ALIGN(buffer->user_size, 8);
47
48         /* write a register via cmd stream */
49         OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
50                     VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
51                     VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
52         OUT(buffer, value);
53 }
54
55 static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
56 {
57         buffer->user_size = ALIGN(buffer->user_size, 8);
58
59         OUT(buffer, VIV_FE_END_HEADER_OP_END);
60 }
61
62 static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
63 {
64         buffer->user_size = ALIGN(buffer->user_size, 8);
65
66         OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
67 }
68
69 static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
70         u16 prefetch, u32 address)
71 {
72         buffer->user_size = ALIGN(buffer->user_size, 8);
73
74         OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
75                     VIV_FE_LINK_HEADER_PREFETCH(prefetch));
76         OUT(buffer, address);
77 }
78
79 static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
80         u32 from, u32 to)
81 {
82         buffer->user_size = ALIGN(buffer->user_size, 8);
83
84         OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
85         OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
86 }
87
88 static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe)
89 {
90         u32 flush;
91         u32 stall;
92
93         /*
94          * This assumes that if we're switching to 2D, we're switching
95          * away from 3D, and vice versa.  Hence, if we're switching to
96          * the 2D core, we need to flush the 3D depth and color caches,
97          * otherwise we need to flush the 2D pixel engine cache.
98          */
99         if (pipe == ETNA_PIPE_2D)
100                 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
101         else
102                 flush = VIVS_GL_FLUSH_CACHE_PE2D;
103
104         stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) |
105                 VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE);
106
107         CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
108         CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall);
109
110         CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
111
112         CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
113                        VIVS_GL_PIPE_SELECT_PIPE(pipe));
114 }
115
116 static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf)
117 {
118         return buf->paddr - gpu->memory_base;
119 }
120
121 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
122         struct etnaviv_cmdbuf *buf, u32 off, u32 len)
123 {
124         u32 size = buf->size;
125         u32 *ptr = buf->vaddr + off;
126
127         dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
128                         ptr, gpu_va(gpu, buf) + off, size - len * 4 - off);
129
130         print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
131                         ptr, len * 4, 0);
132 }
133
134 /*
135  * Ensure that there is space in the command buffer to contiguously write
136  * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
137  */
138 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
139         struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
140 {
141         if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
142                 buffer->user_size = 0;
143
144         return gpu_va(gpu, buffer) + buffer->user_size;
145 }
146
147 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
148 {
149         struct etnaviv_cmdbuf *buffer = gpu->buffer;
150
151         /* initialize buffer */
152         buffer->user_size = 0;
153
154         CMD_WAIT(buffer);
155         CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4);
156
157         return buffer->user_size / 8;
158 }
159
160 void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
161 {
162         struct etnaviv_cmdbuf *buffer = gpu->buffer;
163
164         /* Replace the last WAIT with an END */
165         buffer->user_size -= 16;
166
167         CMD_END(buffer);
168         mb();
169 }
170
171 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
172         struct etnaviv_cmdbuf *cmdbuf)
173 {
174         struct etnaviv_cmdbuf *buffer = gpu->buffer;
175         u32 *lw = buffer->vaddr + buffer->user_size - 16;
176         u32 back, link_target, link_size, reserve_size, extra_size = 0;
177
178         if (drm_debug & DRM_UT_DRIVER)
179                 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
180
181         /*
182          * If we need to flush the MMU prior to submitting this buffer, we
183          * will need to append a mmu flush load state, followed by a new
184          * link to this buffer - a total of four additional words.
185          */
186         if (gpu->mmu->need_flush || gpu->switch_context) {
187                 /* link command */
188                 extra_size += 2;
189                 /* flush command */
190                 if (gpu->mmu->need_flush)
191                         extra_size += 2;
192                 /* pipe switch commands */
193                 if (gpu->switch_context)
194                         extra_size += 8;
195         }
196
197         reserve_size = (6 + extra_size) * 4;
198
199         link_target = etnaviv_buffer_reserve(gpu, buffer, reserve_size / 8);
200
201         /* save offset back into main buffer */
202         back = buffer->user_size + reserve_size - 6 * 4;
203         link_size = 6;
204
205         /* Skip over any extra instructions */
206         link_target += extra_size * sizeof(u32);
207
208         if (drm_debug & DRM_UT_DRIVER)
209                 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
210                         link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
211
212         /* jump back from cmd to main buffer */
213         CMD_LINK(cmdbuf, link_size, link_target);
214
215         link_target = gpu_va(gpu, cmdbuf);
216         link_size = cmdbuf->size / 8;
217
218         if (drm_debug & DRM_UT_DRIVER) {
219                 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
220                                cmdbuf->vaddr, cmdbuf->size, 0);
221
222                 pr_info("link op: %p\n", lw);
223                 pr_info("link addr: %p\n", lw + 1);
224                 pr_info("addr: 0x%08x\n", link_target);
225                 pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back);
226                 pr_info("event: %d\n", event);
227         }
228
229         if (gpu->mmu->need_flush || gpu->switch_context) {
230                 u32 new_target = gpu_va(gpu, buffer) + buffer->user_size;
231
232                 if (gpu->mmu->need_flush) {
233                         /* Add the MMU flush */
234                         CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
235                                        VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
236                                        VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
237                                        VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
238                                        VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
239                                        VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
240
241                         gpu->mmu->need_flush = false;
242                 }
243
244                 if (gpu->switch_context) {
245                         etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state);
246                         gpu->switch_context = false;
247                 }
248
249                 /* And the link to the first buffer */
250                 CMD_LINK(buffer, link_size, link_target);
251
252                 /* Update the link target to point to above instructions */
253                 link_target = new_target;
254                 link_size = extra_size;
255         }
256
257         /* trigger event */
258         CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
259                        VIVS_GL_EVENT_FROM_PE);
260
261         /* append WAIT/LINK to main buffer */
262         CMD_WAIT(buffer);
263         CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4));
264
265         /* Change WAIT into a LINK command; write the address first. */
266         *(lw + 1) = link_target;
267         mb();
268         *(lw) = VIV_FE_LINK_HEADER_OP_LINK |
269                 VIV_FE_LINK_HEADER_PREFETCH(link_size);
270         mb();
271
272         if (drm_debug & DRM_UT_DRIVER)
273                 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
274 }