svcrdma: Make RDMA_ERROR messages work
authorChuck Lever <chuck.lever@oracle.com>
Tue, 1 Mar 2016 18:06:38 +0000 (13:06 -0500)
committerJ. Bruce Fields <bfields@redhat.com>
Tue, 1 Mar 2016 21:06:38 +0000 (13:06 -0800)
Fix several issues with svc_rdma_send_error():

 - Post a receive buffer to replace the one that was consumed by
   the incoming request
 - Posting a send should use DMA_TO_DEVICE, not DMA_FROM_DEVICE
 - No need to put_page _and_ free pages in svc_rdma_put_context
 - Make sure the sge is set up completely in case the error
   path goes through svc_rdma_unmap_dma()
 - Replace the use of ENOSYS, which has a reserved meaning

Related fixes in svc_rdma_recvfrom():

 - Don't leak the ctxt associated with the incoming request
 - Don't close the connection after sending an error reply
 - Let svc_rdma_send_error() figure out the right header error code

As a last clean up, move svc_rdma_send_error() to svc_rdma_sendto.c
with other similar functions. There is some common logic in these
functions that could someday be combined to reduce code duplication.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Devesh Sharma <devesh.sharma@broadcom.com>
Tested-by: Devesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
include/linux/sunrpc/svc_rdma.h
net/sunrpc/xprtrdma/svc_rdma_marshal.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c
net/sunrpc/xprtrdma/svc_rdma_transport.c

index aef47dd..42e8522 100644 (file)
@@ -228,11 +228,11 @@ extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
 extern int svc_rdma_sendto(struct svc_rqst *);
 extern struct rpcrdma_read_chunk *
        svc_rdma_get_read_chunk(struct rpcrdma_msg *);
+extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
+                               int);
 
 /* svc_rdma_transport.c */
 extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
-extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
-                               enum rpcrdma_errcode);
 extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
 extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);
 extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
index e2fca76..f74fc52 100644 (file)
@@ -162,7 +162,7 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
        }
 
        if (rmsgp->rm_vers != rpcrdma_version)
-               return -ENOSYS;
+               return -EPROTONOSUPPORT;
 
        /* Pull in the extra for the padded case and bump our pointer */
        if (rmsgp->rm_type == rdma_msgp) {
index acf15b8..0f09052 100644 (file)
@@ -612,7 +612,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
        struct svc_rdma_op_ctxt *ctxt = NULL;
        struct rpcrdma_msg *rmsgp;
        int ret = 0;
-       int len;
 
        dprintk("svcrdma: rqstp=%p\n", rqstp);
 
@@ -654,15 +653,10 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
        rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
 
        /* Decode the RDMA header. */
-       len = svc_rdma_xdr_decode_req(&rmsgp, rqstp);
-       rqstp->rq_xprt_hlen = len;
-
-       /* If the request is invalid, reply with an error */
-       if (len < 0) {
-               if (len == -ENOSYS)
-                       svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS);
-               goto close_out;
-       }
+       ret = svc_rdma_xdr_decode_req(&rmsgp, rqstp);
+       if (ret < 0)
+               goto out_err;
+       rqstp->rq_xprt_hlen = ret;
 
        if (svc_rdma_is_backchannel_reply(xprt, rmsgp)) {
                ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, rmsgp,
@@ -698,6 +692,11 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
        svc_xprt_copy_addrs(rqstp, xprt);
        return ret;
 
+out_err:
+       svc_rdma_send_error(rdma_xprt, rmsgp, ret);
+       svc_rdma_put_context(ctxt, 0);
+       return 0;
+
  close_out:
        if (ctxt)
                svc_rdma_put_context(ctxt, 1);
index ace9efa..a26ca56 100644 (file)
@@ -652,3 +652,65 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
        set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
        return -ENOTCONN;
 }
+
+void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
+                        int status)
+{
+       struct ib_send_wr err_wr;
+       struct page *p;
+       struct svc_rdma_op_ctxt *ctxt;
+       enum rpcrdma_errcode err;
+       __be32 *va;
+       int length;
+       int ret;
+
+       ret = svc_rdma_repost_recv(xprt, GFP_KERNEL);
+       if (ret)
+               return;
+
+       p = alloc_page(GFP_KERNEL);
+       if (!p)
+               return;
+       va = page_address(p);
+
+       /* XDR encode an error reply */
+       err = ERR_CHUNK;
+       if (status == -EPROTONOSUPPORT)
+               err = ERR_VERS;
+       length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
+
+       ctxt = svc_rdma_get_context(xprt);
+       ctxt->direction = DMA_TO_DEVICE;
+       ctxt->count = 1;
+       ctxt->pages[0] = p;
+
+       /* Prepare SGE for local address */
+       ctxt->sge[0].lkey = xprt->sc_pd->local_dma_lkey;
+       ctxt->sge[0].length = length;
+       ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
+                                           p, 0, length, DMA_TO_DEVICE);
+       if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
+               dprintk("svcrdma: Error mapping buffer for protocol error\n");
+               svc_rdma_put_context(ctxt, 1);
+               return;
+       }
+       atomic_inc(&xprt->sc_dma_used);
+
+       /* Prepare SEND WR */
+       memset(&err_wr, 0, sizeof(err_wr));
+       ctxt->wr_op = IB_WR_SEND;
+       err_wr.wr_id = (unsigned long)ctxt;
+       err_wr.sg_list = ctxt->sge;
+       err_wr.num_sge = 1;
+       err_wr.opcode = IB_WR_SEND;
+       err_wr.send_flags = IB_SEND_SIGNALED;
+
+       /* Post It */
+       ret = svc_rdma_send(xprt, &err_wr);
+       if (ret) {
+               dprintk("svcrdma: Error %d posting send for protocol error\n",
+                       ret);
+               svc_rdma_unmap_dma(ctxt);
+               svc_rdma_put_context(ctxt, 1);
+       }
+}
index 03fdfce..15c8fa3 100644 (file)
@@ -1433,57 +1433,3 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
        }
        return ret;
 }
-
-void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
-                        enum rpcrdma_errcode err)
-{
-       struct ib_send_wr err_wr;
-       struct page *p;
-       struct svc_rdma_op_ctxt *ctxt;
-       __be32 *va;
-       int length;
-       int ret;
-
-       p = alloc_page(GFP_KERNEL);
-       if (!p)
-               return;
-       va = page_address(p);
-
-       /* XDR encode error */
-       length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
-
-       ctxt = svc_rdma_get_context(xprt);
-       ctxt->direction = DMA_FROM_DEVICE;
-       ctxt->count = 1;
-       ctxt->pages[0] = p;
-
-       /* Prepare SGE for local address */
-       ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
-                                           p, 0, length, DMA_FROM_DEVICE);
-       if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
-               put_page(p);
-               svc_rdma_put_context(ctxt, 1);
-               return;
-       }
-       atomic_inc(&xprt->sc_dma_used);
-       ctxt->sge[0].lkey = xprt->sc_pd->local_dma_lkey;
-       ctxt->sge[0].length = length;
-
-       /* Prepare SEND WR */
-       memset(&err_wr, 0, sizeof err_wr);
-       ctxt->wr_op = IB_WR_SEND;
-       err_wr.wr_id = (unsigned long)ctxt;
-       err_wr.sg_list = ctxt->sge;
-       err_wr.num_sge = 1;
-       err_wr.opcode = IB_WR_SEND;
-       err_wr.send_flags = IB_SEND_SIGNALED;
-
-       /* Post It */
-       ret = svc_rdma_send(xprt, &err_wr);
-       if (ret) {
-               dprintk("svcrdma: Error %d posting send for protocol error\n",
-                       ret);
-               svc_rdma_unmap_dma(ctxt);
-               svc_rdma_put_context(ctxt, 1);
-       }
-}