Merge branch 'gup_flag-cleanups'
[cascardo/linux.git] / net / sunrpc / xprtrdma / svc_rdma_sendto.c
1 /*
2  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the BSD-type
9  * license below:
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  *      Redistributions of source code must retain the above copyright
16  *      notice, this list of conditions and the following disclaimer.
17  *
18  *      Redistributions in binary form must reproduce the above
19  *      copyright notice, this list of conditions and the following
20  *      disclaimer in the documentation and/or other materials provided
21  *      with the distribution.
22  *
23  *      Neither the name of the Network Appliance, Inc. nor the names of
24  *      its contributors may be used to endorse or promote products
25  *      derived from this software without specific prior written
26  *      permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  *
40  * Author: Tom Tucker <tom@opengridcomputing.com>
41  */
42
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <asm/unaligned.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/rdma_cm.h>
49 #include <linux/sunrpc/svc_rdma.h>
50
51 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
52
53 static u32 xdr_padsize(u32 len)
54 {
55         return (len & 3) ? (4 - (len & 3)) : 0;
56 }
57
58 int svc_rdma_map_xdr(struct svcxprt_rdma *xprt,
59                      struct xdr_buf *xdr,
60                      struct svc_rdma_req_map *vec,
61                      bool write_chunk_present)
62 {
63         int sge_no;
64         u32 sge_bytes;
65         u32 page_bytes;
66         u32 page_off;
67         int page_no;
68
69         if (xdr->len !=
70             (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) {
71                 pr_err("svcrdma: %s: XDR buffer length error\n", __func__);
72                 return -EIO;
73         }
74
75         /* Skip the first sge, this is for the RPCRDMA header */
76         sge_no = 1;
77
78         /* Head SGE */
79         vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
80         vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
81         sge_no++;
82
83         /* pages SGE */
84         page_no = 0;
85         page_bytes = xdr->page_len;
86         page_off = xdr->page_base;
87         while (page_bytes) {
88                 vec->sge[sge_no].iov_base =
89                         page_address(xdr->pages[page_no]) + page_off;
90                 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
91                 page_bytes -= sge_bytes;
92                 vec->sge[sge_no].iov_len = sge_bytes;
93
94                 sge_no++;
95                 page_no++;
96                 page_off = 0; /* reset for next time through loop */
97         }
98
99         /* Tail SGE */
100         if (xdr->tail[0].iov_len) {
101                 unsigned char *base = xdr->tail[0].iov_base;
102                 size_t len = xdr->tail[0].iov_len;
103                 u32 xdr_pad = xdr_padsize(xdr->page_len);
104
105                 if (write_chunk_present && xdr_pad) {
106                         base += xdr_pad;
107                         len -= xdr_pad;
108                 }
109
110                 if (len) {
111                         vec->sge[sge_no].iov_base = base;
112                         vec->sge[sge_no].iov_len = len;
113                         sge_no++;
114                 }
115         }
116
117         dprintk("svcrdma: %s: sge_no %d page_no %d "
118                 "page_base %u page_len %u head_len %zu tail_len %zu\n",
119                 __func__, sge_no, page_no, xdr->page_base, xdr->page_len,
120                 xdr->head[0].iov_len, xdr->tail[0].iov_len);
121
122         vec->count = sge_no;
123         return 0;
124 }
125
126 static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
127                               struct xdr_buf *xdr,
128                               u32 xdr_off, size_t len, int dir)
129 {
130         struct page *page;
131         dma_addr_t dma_addr;
132         if (xdr_off < xdr->head[0].iov_len) {
133                 /* This offset is in the head */
134                 xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
135                 page = virt_to_page(xdr->head[0].iov_base);
136         } else {
137                 xdr_off -= xdr->head[0].iov_len;
138                 if (xdr_off < xdr->page_len) {
139                         /* This offset is in the page list */
140                         xdr_off += xdr->page_base;
141                         page = xdr->pages[xdr_off >> PAGE_SHIFT];
142                         xdr_off &= ~PAGE_MASK;
143                 } else {
144                         /* This offset is in the tail */
145                         xdr_off -= xdr->page_len;
146                         xdr_off += (unsigned long)
147                                 xdr->tail[0].iov_base & ~PAGE_MASK;
148                         page = virt_to_page(xdr->tail[0].iov_base);
149                 }
150         }
151         dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
152                                    min_t(size_t, PAGE_SIZE, len), dir);
153         return dma_addr;
154 }
155
156 /* Returns the address of the first read chunk or <nul> if no read chunk
157  * is present
158  */
159 struct rpcrdma_read_chunk *
160 svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
161 {
162         struct rpcrdma_read_chunk *ch =
163                 (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
164
165         if (ch->rc_discrim == xdr_zero)
166                 return NULL;
167         return ch;
168 }
169
170 /* Returns the address of the first read write array element or <nul>
171  * if no write array list is present
172  */
173 static struct rpcrdma_write_array *
174 svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
175 {
176         if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
177             rmsgp->rm_body.rm_chunks[1] == xdr_zero)
178                 return NULL;
179         return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
180 }
181
182 /* Returns the address of the first reply array element or <nul> if no
183  * reply array is present
184  */
185 static struct rpcrdma_write_array *
186 svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp,
187                          struct rpcrdma_write_array *wr_ary)
188 {
189         struct rpcrdma_read_chunk *rch;
190         struct rpcrdma_write_array *rp_ary;
191
192         /* XXX: Need to fix when reply chunk may occur with read list
193          *      and/or write list.
194          */
195         if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
196             rmsgp->rm_body.rm_chunks[1] != xdr_zero)
197                 return NULL;
198
199         rch = svc_rdma_get_read_chunk(rmsgp);
200         if (rch) {
201                 while (rch->rc_discrim != xdr_zero)
202                         rch++;
203
204                 /* The reply chunk follows an empty write array located
205                  * at 'rc_position' here. The reply array is at rc_target.
206                  */
207                 rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
208                 goto found_it;
209         }
210
211         if (wr_ary) {
212                 int chunk = be32_to_cpu(wr_ary->wc_nchunks);
213
214                 rp_ary = (struct rpcrdma_write_array *)
215                          &wr_ary->wc_array[chunk].wc_target.rs_length;
216                 goto found_it;
217         }
218
219         /* No read list, no write list */
220         rp_ary = (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[2];
221
222  found_it:
223         if (rp_ary->wc_discrim == xdr_zero)
224                 return NULL;
225         return rp_ary;
226 }
227
228 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
229  * Responder's choice: requester signals it can handle Send With
230  * Invalidate, and responder chooses one rkey to invalidate.
231  *
232  * Find a candidate rkey to invalidate when sending a reply.  Picks the
233  * first rkey it finds in the chunks lists.
234  *
235  * Returns zero if RPC's chunk lists are empty.
236  */
237 static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp,
238                                  struct rpcrdma_write_array *wr_ary,
239                                  struct rpcrdma_write_array *rp_ary)
240 {
241         struct rpcrdma_read_chunk *rd_ary;
242         struct rpcrdma_segment *arg_ch;
243         u32 inv_rkey;
244
245         inv_rkey = 0;
246
247         rd_ary = svc_rdma_get_read_chunk(rdma_argp);
248         if (rd_ary) {
249                 inv_rkey = be32_to_cpu(rd_ary->rc_target.rs_handle);
250                 goto out;
251         }
252
253         if (wr_ary && be32_to_cpu(wr_ary->wc_nchunks)) {
254                 arg_ch = &wr_ary->wc_array[0].wc_target;
255                 inv_rkey = be32_to_cpu(arg_ch->rs_handle);
256                 goto out;
257         }
258
259         if (rp_ary && be32_to_cpu(rp_ary->wc_nchunks)) {
260                 arg_ch = &rp_ary->wc_array[0].wc_target;
261                 inv_rkey = be32_to_cpu(arg_ch->rs_handle);
262                 goto out;
263         }
264
265 out:
266         dprintk("svcrdma: Send With Invalidate rkey=%08x\n", inv_rkey);
267         return inv_rkey;
268 }
269
270 /* Assumptions:
271  * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
272  */
273 static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
274                       u32 rmr, u64 to,
275                       u32 xdr_off, int write_len,
276                       struct svc_rdma_req_map *vec)
277 {
278         struct ib_rdma_wr write_wr;
279         struct ib_sge *sge;
280         int xdr_sge_no;
281         int sge_no;
282         int sge_bytes;
283         int sge_off;
284         int bc;
285         struct svc_rdma_op_ctxt *ctxt;
286
287         if (vec->count > RPCSVC_MAXPAGES) {
288                 pr_err("svcrdma: Too many pages (%lu)\n", vec->count);
289                 return -EIO;
290         }
291
292         dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
293                 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
294                 rmr, (unsigned long long)to, xdr_off,
295                 write_len, vec->sge, vec->count);
296
297         ctxt = svc_rdma_get_context(xprt);
298         ctxt->direction = DMA_TO_DEVICE;
299         sge = ctxt->sge;
300
301         /* Find the SGE associated with xdr_off */
302         for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
303              xdr_sge_no++) {
304                 if (vec->sge[xdr_sge_no].iov_len > bc)
305                         break;
306                 bc -= vec->sge[xdr_sge_no].iov_len;
307         }
308
309         sge_off = bc;
310         bc = write_len;
311         sge_no = 0;
312
313         /* Copy the remaining SGE */
314         while (bc != 0) {
315                 sge_bytes = min_t(size_t,
316                           bc, vec->sge[xdr_sge_no].iov_len-sge_off);
317                 sge[sge_no].length = sge_bytes;
318                 sge[sge_no].addr =
319                         dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
320                                     sge_bytes, DMA_TO_DEVICE);
321                 xdr_off += sge_bytes;
322                 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
323                                          sge[sge_no].addr))
324                         goto err;
325                 svc_rdma_count_mappings(xprt, ctxt);
326                 sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
327                 ctxt->count++;
328                 sge_off = 0;
329                 sge_no++;
330                 xdr_sge_no++;
331                 if (xdr_sge_no > vec->count) {
332                         pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no);
333                         goto err;
334                 }
335                 bc -= sge_bytes;
336                 if (sge_no == xprt->sc_max_sge)
337                         break;
338         }
339
340         /* Prepare WRITE WR */
341         memset(&write_wr, 0, sizeof write_wr);
342         ctxt->cqe.done = svc_rdma_wc_write;
343         write_wr.wr.wr_cqe = &ctxt->cqe;
344         write_wr.wr.sg_list = &sge[0];
345         write_wr.wr.num_sge = sge_no;
346         write_wr.wr.opcode = IB_WR_RDMA_WRITE;
347         write_wr.wr.send_flags = IB_SEND_SIGNALED;
348         write_wr.rkey = rmr;
349         write_wr.remote_addr = to;
350
351         /* Post It */
352         atomic_inc(&rdma_stat_write);
353         if (svc_rdma_send(xprt, &write_wr.wr))
354                 goto err;
355         return write_len - bc;
356  err:
357         svc_rdma_unmap_dma(ctxt);
358         svc_rdma_put_context(ctxt, 0);
359         return -EIO;
360 }
361
362 noinline
363 static int send_write_chunks(struct svcxprt_rdma *xprt,
364                              struct rpcrdma_write_array *wr_ary,
365                              struct rpcrdma_msg *rdma_resp,
366                              struct svc_rqst *rqstp,
367                              struct svc_rdma_req_map *vec)
368 {
369         u32 xfer_len = rqstp->rq_res.page_len;
370         int write_len;
371         u32 xdr_off;
372         int chunk_off;
373         int chunk_no;
374         int nchunks;
375         struct rpcrdma_write_array *res_ary;
376         int ret;
377
378         res_ary = (struct rpcrdma_write_array *)
379                 &rdma_resp->rm_body.rm_chunks[1];
380
381         /* Write chunks start at the pagelist */
382         nchunks = be32_to_cpu(wr_ary->wc_nchunks);
383         for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
384              xfer_len && chunk_no < nchunks;
385              chunk_no++) {
386                 struct rpcrdma_segment *arg_ch;
387                 u64 rs_offset;
388
389                 arg_ch = &wr_ary->wc_array[chunk_no].wc_target;
390                 write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length));
391
392                 /* Prepare the response chunk given the length actually
393                  * written */
394                 xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
395                 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
396                                                 arg_ch->rs_handle,
397                                                 arg_ch->rs_offset,
398                                                 write_len);
399                 chunk_off = 0;
400                 while (write_len) {
401                         ret = send_write(xprt, rqstp,
402                                          be32_to_cpu(arg_ch->rs_handle),
403                                          rs_offset + chunk_off,
404                                          xdr_off,
405                                          write_len,
406                                          vec);
407                         if (ret <= 0)
408                                 goto out_err;
409                         chunk_off += ret;
410                         xdr_off += ret;
411                         xfer_len -= ret;
412                         write_len -= ret;
413                 }
414         }
415         /* Update the req with the number of chunks actually used */
416         svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
417
418         return rqstp->rq_res.page_len;
419
420 out_err:
421         pr_err("svcrdma: failed to send write chunks, rc=%d\n", ret);
422         return -EIO;
423 }
424
425 noinline
426 static int send_reply_chunks(struct svcxprt_rdma *xprt,
427                              struct rpcrdma_write_array *rp_ary,
428                              struct rpcrdma_msg *rdma_resp,
429                              struct svc_rqst *rqstp,
430                              struct svc_rdma_req_map *vec)
431 {
432         u32 xfer_len = rqstp->rq_res.len;
433         int write_len;
434         u32 xdr_off;
435         int chunk_no;
436         int chunk_off;
437         int nchunks;
438         struct rpcrdma_segment *ch;
439         struct rpcrdma_write_array *res_ary;
440         int ret;
441
442         /* XXX: need to fix when reply lists occur with read-list and or
443          * write-list */
444         res_ary = (struct rpcrdma_write_array *)
445                 &rdma_resp->rm_body.rm_chunks[2];
446
447         /* xdr offset starts at RPC message */
448         nchunks = be32_to_cpu(rp_ary->wc_nchunks);
449         for (xdr_off = 0, chunk_no = 0;
450              xfer_len && chunk_no < nchunks;
451              chunk_no++) {
452                 u64 rs_offset;
453                 ch = &rp_ary->wc_array[chunk_no].wc_target;
454                 write_len = min(xfer_len, be32_to_cpu(ch->rs_length));
455
456                 /* Prepare the reply chunk given the length actually
457                  * written */
458                 xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
459                 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
460                                                 ch->rs_handle, ch->rs_offset,
461                                                 write_len);
462                 chunk_off = 0;
463                 while (write_len) {
464                         ret = send_write(xprt, rqstp,
465                                          be32_to_cpu(ch->rs_handle),
466                                          rs_offset + chunk_off,
467                                          xdr_off,
468                                          write_len,
469                                          vec);
470                         if (ret <= 0)
471                                 goto out_err;
472                         chunk_off += ret;
473                         xdr_off += ret;
474                         xfer_len -= ret;
475                         write_len -= ret;
476                 }
477         }
478         /* Update the req with the number of chunks actually used */
479         svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
480
481         return rqstp->rq_res.len;
482
483 out_err:
484         pr_err("svcrdma: failed to send reply chunks, rc=%d\n", ret);
485         return -EIO;
486 }
487
488 /* This function prepares the portion of the RPCRDMA message to be
489  * sent in the RDMA_SEND. This function is called after data sent via
490  * RDMA has already been transmitted. There are three cases:
491  * - The RPCRDMA header, RPC header, and payload are all sent in a
492  *   single RDMA_SEND. This is the "inline" case.
493  * - The RPCRDMA header and some portion of the RPC header and data
494  *   are sent via this RDMA_SEND and another portion of the data is
495  *   sent via RDMA.
496  * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
497  *   header and data are all transmitted via RDMA.
498  * In all three cases, this function prepares the RPCRDMA header in
499  * sge[0], the 'type' parameter indicates the type to place in the
500  * RPCRDMA header, and the 'byte_count' field indicates how much of
501  * the XDR to include in this RDMA_SEND. NB: The offset of the payload
502  * to send is zero in the XDR.
503  */
504 static int send_reply(struct svcxprt_rdma *rdma,
505                       struct svc_rqst *rqstp,
506                       struct page *page,
507                       struct rpcrdma_msg *rdma_resp,
508                       struct svc_rdma_req_map *vec,
509                       int byte_count,
510                       u32 inv_rkey)
511 {
512         struct svc_rdma_op_ctxt *ctxt;
513         struct ib_send_wr send_wr;
514         u32 xdr_off;
515         int sge_no;
516         int sge_bytes;
517         int page_no;
518         int pages;
519         int ret = -EIO;
520
521         /* Prepare the context */
522         ctxt = svc_rdma_get_context(rdma);
523         ctxt->direction = DMA_TO_DEVICE;
524         ctxt->pages[0] = page;
525         ctxt->count = 1;
526
527         /* Prepare the SGE for the RPCRDMA Header */
528         ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
529         ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
530         ctxt->sge[0].addr =
531             ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
532                             ctxt->sge[0].length, DMA_TO_DEVICE);
533         if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
534                 goto err;
535         svc_rdma_count_mappings(rdma, ctxt);
536
537         ctxt->direction = DMA_TO_DEVICE;
538
539         /* Map the payload indicated by 'byte_count' */
540         xdr_off = 0;
541         for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
542                 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
543                 byte_count -= sge_bytes;
544                 ctxt->sge[sge_no].addr =
545                         dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
546                                     sge_bytes, DMA_TO_DEVICE);
547                 xdr_off += sge_bytes;
548                 if (ib_dma_mapping_error(rdma->sc_cm_id->device,
549                                          ctxt->sge[sge_no].addr))
550                         goto err;
551                 svc_rdma_count_mappings(rdma, ctxt);
552                 ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
553                 ctxt->sge[sge_no].length = sge_bytes;
554         }
555         if (byte_count != 0) {
556                 pr_err("svcrdma: Could not map %d bytes\n", byte_count);
557                 goto err;
558         }
559
560         /* Save all respages in the ctxt and remove them from the
561          * respages array. They are our pages until the I/O
562          * completes.
563          */
564         pages = rqstp->rq_next_page - rqstp->rq_respages;
565         for (page_no = 0; page_no < pages; page_no++) {
566                 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
567                 ctxt->count++;
568                 rqstp->rq_respages[page_no] = NULL;
569         }
570         rqstp->rq_next_page = rqstp->rq_respages + 1;
571
572         if (sge_no > rdma->sc_max_sge) {
573                 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
574                 goto err;
575         }
576         memset(&send_wr, 0, sizeof send_wr);
577         ctxt->cqe.done = svc_rdma_wc_send;
578         send_wr.wr_cqe = &ctxt->cqe;
579         send_wr.sg_list = ctxt->sge;
580         send_wr.num_sge = sge_no;
581         if (inv_rkey) {
582                 send_wr.opcode = IB_WR_SEND_WITH_INV;
583                 send_wr.ex.invalidate_rkey = inv_rkey;
584         } else
585                 send_wr.opcode = IB_WR_SEND;
586         send_wr.send_flags =  IB_SEND_SIGNALED;
587
588         ret = svc_rdma_send(rdma, &send_wr);
589         if (ret)
590                 goto err;
591
592         return 0;
593
594  err:
595         svc_rdma_unmap_dma(ctxt);
596         svc_rdma_put_context(ctxt, 1);
597         return ret;
598 }
599
600 void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
601 {
602 }
603
604 int svc_rdma_sendto(struct svc_rqst *rqstp)
605 {
606         struct svc_xprt *xprt = rqstp->rq_xprt;
607         struct svcxprt_rdma *rdma =
608                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
609         struct rpcrdma_msg *rdma_argp;
610         struct rpcrdma_msg *rdma_resp;
611         struct rpcrdma_write_array *wr_ary, *rp_ary;
612         enum rpcrdma_proc reply_type;
613         int ret;
614         int inline_bytes;
615         struct page *res_page;
616         struct svc_rdma_req_map *vec;
617         u32 inv_rkey;
618
619         dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
620
621         /* Get the RDMA request header. The receive logic always
622          * places this at the start of page 0.
623          */
624         rdma_argp = page_address(rqstp->rq_pages[0]);
625         wr_ary = svc_rdma_get_write_array(rdma_argp);
626         rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary);
627
628         inv_rkey = 0;
629         if (rdma->sc_snd_w_inv)
630                 inv_rkey = svc_rdma_get_inv_rkey(rdma_argp, wr_ary, rp_ary);
631
632         /* Build an req vec for the XDR */
633         vec = svc_rdma_get_req_map(rdma);
634         ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL);
635         if (ret)
636                 goto err0;
637         inline_bytes = rqstp->rq_res.len;
638
639         /* Create the RDMA response header */
640         ret = -ENOMEM;
641         res_page = alloc_page(GFP_KERNEL);
642         if (!res_page)
643                 goto err0;
644         rdma_resp = page_address(res_page);
645         if (rp_ary)
646                 reply_type = RDMA_NOMSG;
647         else
648                 reply_type = RDMA_MSG;
649         svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
650                                          rdma_resp, reply_type);
651
652         /* Send any write-chunk data and build resp write-list */
653         if (wr_ary) {
654                 ret = send_write_chunks(rdma, wr_ary, rdma_resp, rqstp, vec);
655                 if (ret < 0)
656                         goto err1;
657                 inline_bytes -= ret + xdr_padsize(ret);
658         }
659
660         /* Send any reply-list data and update resp reply-list */
661         if (rp_ary) {
662                 ret = send_reply_chunks(rdma, rp_ary, rdma_resp, rqstp, vec);
663                 if (ret < 0)
664                         goto err1;
665                 inline_bytes -= ret;
666         }
667
668         /* Post a fresh Receive buffer _before_ sending the reply */
669         ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
670         if (ret)
671                 goto err1;
672
673         ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec,
674                          inline_bytes, inv_rkey);
675         if (ret < 0)
676                 goto err0;
677
678         svc_rdma_put_req_map(rdma, vec);
679         dprintk("svcrdma: send_reply returns %d\n", ret);
680         return ret;
681
682  err1:
683         put_page(res_page);
684  err0:
685         svc_rdma_put_req_map(rdma, vec);
686         pr_err("svcrdma: Could not send reply, err=%d. Closing transport.\n",
687                ret);
688         set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
689         return -ENOTCONN;
690 }
691
692 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
693                          int status)
694 {
695         struct ib_send_wr err_wr;
696         struct page *p;
697         struct svc_rdma_op_ctxt *ctxt;
698         enum rpcrdma_errcode err;
699         __be32 *va;
700         int length;
701         int ret;
702
703         ret = svc_rdma_repost_recv(xprt, GFP_KERNEL);
704         if (ret)
705                 return;
706
707         p = alloc_page(GFP_KERNEL);
708         if (!p)
709                 return;
710         va = page_address(p);
711
712         /* XDR encode an error reply */
713         err = ERR_CHUNK;
714         if (status == -EPROTONOSUPPORT)
715                 err = ERR_VERS;
716         length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
717
718         ctxt = svc_rdma_get_context(xprt);
719         ctxt->direction = DMA_TO_DEVICE;
720         ctxt->count = 1;
721         ctxt->pages[0] = p;
722
723         /* Prepare SGE for local address */
724         ctxt->sge[0].lkey = xprt->sc_pd->local_dma_lkey;
725         ctxt->sge[0].length = length;
726         ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
727                                             p, 0, length, DMA_TO_DEVICE);
728         if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
729                 dprintk("svcrdma: Error mapping buffer for protocol error\n");
730                 svc_rdma_put_context(ctxt, 1);
731                 return;
732         }
733         svc_rdma_count_mappings(xprt, ctxt);
734
735         /* Prepare SEND WR */
736         memset(&err_wr, 0, sizeof(err_wr));
737         ctxt->cqe.done = svc_rdma_wc_send;
738         err_wr.wr_cqe = &ctxt->cqe;
739         err_wr.sg_list = ctxt->sge;
740         err_wr.num_sge = 1;
741         err_wr.opcode = IB_WR_SEND;
742         err_wr.send_flags = IB_SEND_SIGNALED;
743
744         /* Post It */
745         ret = svc_rdma_send(xprt, &err_wr);
746         if (ret) {
747                 dprintk("svcrdma: Error %d posting send for protocol error\n",
748                         ret);
749                 svc_rdma_unmap_dma(ctxt);
750                 svc_rdma_put_context(ctxt, 1);
751         }
752 }