Merge branch 'for-linus-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[cascardo/linux.git] / net / sunrpc / xprtrdma / fmr_ops.c
index 6326ebe..21cb3b1 100644 (file)
  * verb (fmr_op_unmap).
  */
 
-/* Transport recovery
- *
- * After a transport reconnect, fmr_op_map re-uses the MR already
- * allocated for the RPC, but generates a fresh rkey then maps the
- * MR again. This process is synchronous.
- */
-
 #include "xprt_rdma.h"
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 /* Maximum scatter/gather per FMR */
 #define RPCRDMA_MAX_FMR_SGES   (64)
 
-static struct workqueue_struct *fmr_recovery_wq;
-
-#define FMR_RECOVERY_WQ_FLAGS          (WQ_UNBOUND)
+/* Access mode of externally registered pages */
+enum {
+       RPCRDMA_FMR_ACCESS_FLAGS        = IB_ACCESS_REMOTE_WRITE |
+                                         IB_ACCESS_REMOTE_READ,
+};
 
-int
-fmr_alloc_recovery_wq(void)
+bool
+fmr_is_supported(struct rpcrdma_ia *ia)
 {
-       fmr_recovery_wq = alloc_workqueue("fmr_recovery", WQ_UNBOUND, 0);
-       return !fmr_recovery_wq ? -ENOMEM : 0;
+       if (!ia->ri_device->alloc_fmr) {
+               pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
+                       ia->ri_device->name);
+               return false;
+       }
+       return true;
 }
 
-void
-fmr_destroy_recovery_wq(void)
+static int
+fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
 {
-       struct workqueue_struct *wq;
+       static struct ib_fmr_attr fmr_attr = {
+               .max_pages      = RPCRDMA_MAX_FMR_SGES,
+               .max_maps       = 1,
+               .page_shift     = PAGE_SHIFT
+       };
 
-       if (!fmr_recovery_wq)
-               return;
+       mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
+                                      sizeof(u64), GFP_KERNEL);
+       if (!mw->fmr.fm_physaddrs)
+               goto out_free;
 
-       wq = fmr_recovery_wq;
-       fmr_recovery_wq = NULL;
-       destroy_workqueue(wq);
+       mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
+                           sizeof(*mw->mw_sg), GFP_KERNEL);
+       if (!mw->mw_sg)
+               goto out_free;
+
+       sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES);
+
+       mw->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
+                                    &fmr_attr);
+       if (IS_ERR(mw->fmr.fm_mr))
+               goto out_fmr_err;
+
+       return 0;
+
+out_fmr_err:
+       dprintk("RPC:       %s: ib_alloc_fmr returned %ld\n", __func__,
+               PTR_ERR(mw->fmr.fm_mr));
+
+out_free:
+       kfree(mw->mw_sg);
+       kfree(mw->fmr.fm_physaddrs);
+       return -ENOMEM;
 }
 
 static int
 __fmr_unmap(struct rpcrdma_mw *mw)
 {
        LIST_HEAD(l);
+       int rc;
 
-       list_add(&mw->fmr.fmr->list, &l);
-       return ib_unmap_fmr(&l);
+       list_add(&mw->fmr.fm_mr->list, &l);
+       rc = ib_unmap_fmr(&l);
+       list_del_init(&mw->fmr.fm_mr->list);
+       return rc;
 }
 
-/* Deferred reset of a single FMR. Generate a fresh rkey by
- * replacing the MR. There's no recovery if this fails.
- */
 static void
-__fmr_recovery_worker(struct work_struct *work)
+fmr_op_release_mr(struct rpcrdma_mw *r)
 {
-       struct rpcrdma_mw *mw = container_of(work, struct rpcrdma_mw,
-                                           mw_work);
-       struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+       LIST_HEAD(unmap_list);
+       int rc;
 
-       __fmr_unmap(mw);
-       rpcrdma_put_mw(r_xprt, mw);
-       return;
+       /* Ensure MW is not on any rl_registered list */
+       if (!list_empty(&r->mw_list))
+               list_del(&r->mw_list);
+
+       kfree(r->fmr.fm_physaddrs);
+       kfree(r->mw_sg);
+
+       /* In case this one was left mapped, try to unmap it
+        * to prevent dealloc_fmr from failing with EBUSY
+        */
+       rc = __fmr_unmap(r);
+       if (rc)
+               pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
+                      r, rc);
+
+       rc = ib_dealloc_fmr(r->fmr.fm_mr);
+       if (rc)
+               pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
+                      r, rc);
+
+       kfree(r);
 }
 
-/* A broken MR was discovered in a context that can't sleep.
- * Defer recovery to the recovery worker.
+/* Reset of a single FMR.
  */
 static void
-__fmr_queue_recovery(struct rpcrdma_mw *mw)
+fmr_op_recover_mr(struct rpcrdma_mw *mw)
 {
-       INIT_WORK(&mw->mw_work, __fmr_recovery_worker);
-       queue_work(fmr_recovery_wq, &mw->mw_work);
+       struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+       int rc;
+
+       /* ORDER: invalidate first */
+       rc = __fmr_unmap(mw);
+
+       /* ORDER: then DMA unmap */
+       ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+                       mw->mw_sg, mw->mw_nents, mw->mw_dir);
+       if (rc)
+               goto out_release;
+
+       rpcrdma_put_mw(r_xprt, mw);
+       r_xprt->rx_stats.mrs_recovered++;
+       return;
+
+out_release:
+       pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mw);
+       r_xprt->rx_stats.mrs_orphaned++;
+
+       spin_lock(&r_xprt->rx_buf.rb_mwlock);
+       list_del(&mw->mw_all);
+       spin_unlock(&r_xprt->rx_buf.rb_mwlock);
+
+       fmr_op_release_mr(mw);
 }
 
 static int
@@ -112,86 +175,21 @@ fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
                     RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
 }
 
-static int
-fmr_op_init(struct rpcrdma_xprt *r_xprt)
-{
-       struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
-       int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;
-       struct ib_fmr_attr fmr_attr = {
-               .max_pages      = RPCRDMA_MAX_FMR_SGES,
-               .max_maps       = 1,
-               .page_shift     = PAGE_SHIFT
-       };
-       struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
-       struct rpcrdma_mw *r;
-       int i, rc;
-
-       spin_lock_init(&buf->rb_mwlock);
-       INIT_LIST_HEAD(&buf->rb_mws);
-       INIT_LIST_HEAD(&buf->rb_all);
-
-       i = max_t(int, RPCRDMA_MAX_DATA_SEGS / RPCRDMA_MAX_FMR_SGES, 1);
-       i += 2;                         /* head + tail */
-       i *= buf->rb_max_requests;      /* one set for each RPC slot */
-       dprintk("RPC:       %s: initalizing %d FMRs\n", __func__, i);
-
-       rc = -ENOMEM;
-       while (i--) {
-               r = kzalloc(sizeof(*r), GFP_KERNEL);
-               if (!r)
-                       goto out;
-
-               r->fmr.physaddrs = kmalloc(RPCRDMA_MAX_FMR_SGES *
-                                          sizeof(u64), GFP_KERNEL);
-               if (!r->fmr.physaddrs)
-                       goto out_free;
-
-               r->fmr.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr);
-               if (IS_ERR(r->fmr.fmr))
-                       goto out_fmr_err;
-
-               r->mw_xprt = r_xprt;
-               list_add(&r->mw_list, &buf->rb_mws);
-               list_add(&r->mw_all, &buf->rb_all);
-       }
-       return 0;
-
-out_fmr_err:
-       rc = PTR_ERR(r->fmr.fmr);
-       dprintk("RPC:       %s: ib_alloc_fmr status %i\n", __func__, rc);
-       kfree(r->fmr.physaddrs);
-out_free:
-       kfree(r);
-out:
-       return rc;
-}
-
 /* Use the ib_map_phys_fmr() verb to register a memory region
  * for remote access via RDMA READ or RDMA WRITE.
  */
 static int
 fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
-          int nsegs, bool writing)
+          int nsegs, bool writing, struct rpcrdma_mw **out)
 {
-       struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-       struct ib_device *device = ia->ri_device;
-       enum dma_data_direction direction = rpcrdma_data_dir(writing);
        struct rpcrdma_mr_seg *seg1 = seg;
        int len, pageoff, i, rc;
        struct rpcrdma_mw *mw;
+       u64 *dma_pages;
 
-       mw = seg1->rl_mw;
-       seg1->rl_mw = NULL;
-       if (!mw) {
-               mw = rpcrdma_get_mw(r_xprt);
-               if (!mw)
-                       return -ENOMEM;
-       } else {
-               /* this is a retransmit; generate a fresh rkey */
-               rc = __fmr_unmap(mw);
-               if (rc)
-                       return rc;
-       }
+       mw = rpcrdma_get_mw(r_xprt);
+       if (!mw)
+               return -ENOBUFS;
 
        pageoff = offset_in_page(seg1->mr_offset);
        seg1->mr_offset -= pageoff;     /* start of page */
@@ -200,8 +198,14 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
        if (nsegs > RPCRDMA_MAX_FMR_SGES)
                nsegs = RPCRDMA_MAX_FMR_SGES;
        for (i = 0; i < nsegs;) {
-               rpcrdma_map_one(device, seg, direction);
-               mw->fmr.physaddrs[i] = seg->mr_dma;
+               if (seg->mr_page)
+                       sg_set_page(&mw->mw_sg[i],
+                                   seg->mr_page,
+                                   seg->mr_len,
+                                   offset_in_page(seg->mr_offset));
+               else
+                       sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
+                                  seg->mr_len);
                len += seg->mr_len;
                ++seg;
                ++i;
@@ -210,49 +214,54 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
                    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
                        break;
        }
-
-       rc = ib_map_phys_fmr(mw->fmr.fmr, mw->fmr.physaddrs,
-                            i, seg1->mr_dma);
+       mw->mw_nents = i;
+       mw->mw_dir = rpcrdma_data_dir(writing);
+       if (i == 0)
+               goto out_dmamap_err;
+
+       if (!ib_dma_map_sg(r_xprt->rx_ia.ri_device,
+                          mw->mw_sg, mw->mw_nents, mw->mw_dir))
+               goto out_dmamap_err;
+
+       for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++)
+               dma_pages[i] = sg_dma_address(&mw->mw_sg[i]);
+       rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents,
+                            dma_pages[0]);
        if (rc)
                goto out_maperr;
 
-       seg1->rl_mw = mw;
-       seg1->mr_rkey = mw->fmr.fmr->rkey;
-       seg1->mr_base = seg1->mr_dma + pageoff;
-       seg1->mr_nsegs = i;
-       seg1->mr_len = len;
-       return i;
+       mw->mw_handle = mw->fmr.fm_mr->rkey;
+       mw->mw_length = len;
+       mw->mw_offset = dma_pages[0] + pageoff;
 
-out_maperr:
-       dprintk("RPC:       %s: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
-               __func__, len, (unsigned long long)seg1->mr_dma,
-               pageoff, i, rc);
-       while (i--)
-               rpcrdma_unmap_one(device, --seg);
-       return rc;
-}
+       *out = mw;
+       return mw->mw_nents;
 
-static void
-__fmr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
-{
-       struct ib_device *device = r_xprt->rx_ia.ri_device;
-       int nsegs = seg->mr_nsegs;
+out_dmamap_err:
+       pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
+              mw->mw_sg, mw->mw_nents);
+       rpcrdma_defer_mr_recovery(mw);
+       return -EIO;
 
-       while (nsegs--)
-               rpcrdma_unmap_one(device, seg++);
+out_maperr:
+       pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
+              len, (unsigned long long)dma_pages[0],
+              pageoff, mw->mw_nents, rc);
+       rpcrdma_defer_mr_recovery(mw);
+       return -EIO;
 }
 
 /* Invalidate all memory regions that were registered for "req".
  *
  * Sleeps until it is safe for the host CPU to access the
  * previously mapped memory regions.
+ *
+ * Caller ensures that req->rl_registered is not empty.
  */
 static void
 fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 {
-       struct rpcrdma_mr_seg *seg;
-       unsigned int i, nchunks;
-       struct rpcrdma_mw *mw;
+       struct rpcrdma_mw *mw, *tmp;
        LIST_HEAD(unmap_list);
        int rc;
 
@@ -261,90 +270,54 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
        /* ORDER: Invalidate all of the req's MRs first
         *
         * ib_unmap_fmr() is slow, so use a single call instead
-        * of one call per mapped MR.
+        * of one call per mapped FMR.
         */
-       for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
-               seg = &req->rl_segments[i];
-               mw = seg->rl_mw;
-
-               list_add(&mw->fmr.fmr->list, &unmap_list);
-
-               i += seg->mr_nsegs;
-       }
+       list_for_each_entry(mw, &req->rl_registered, mw_list)
+               list_add_tail(&mw->fmr.fm_mr->list, &unmap_list);
        rc = ib_unmap_fmr(&unmap_list);
        if (rc)
-               pr_warn("%s: ib_unmap_fmr failed (%i)\n", __func__, rc);
+               goto out_reset;
 
        /* ORDER: Now DMA unmap all of the req's MRs, and return
         * them to the free MW list.
         */
-       for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
-               seg = &req->rl_segments[i];
+       list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
+               list_del_init(&mw->mw_list);
+               list_del_init(&mw->fmr.fm_mr->list);
+               ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+                               mw->mw_sg, mw->mw_nents, mw->mw_dir);
+               rpcrdma_put_mw(r_xprt, mw);
+       }
 
-               __fmr_dma_unmap(r_xprt, seg);
-               rpcrdma_put_mw(r_xprt, seg->rl_mw);
+       return;
 
-               i += seg->mr_nsegs;
-               seg->mr_nsegs = 0;
-               seg->rl_mw = NULL;
-       }
+out_reset:
+       pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
 
-       req->rl_nchunks = 0;
+       list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
+               list_del_init(&mw->fmr.fm_mr->list);
+               fmr_op_recover_mr(mw);
+       }
 }
 
 /* Use a slow, safe mechanism to invalidate all memory regions
  * that were registered for "req".
- *
- * In the asynchronous case, DMA unmapping occurs first here
- * because the rpcrdma_mr_seg is released immediately after this
- * call. It's contents won't be available in __fmr_dma_unmap later.
- * FIXME.
  */
 static void
 fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
                  bool sync)
 {
-       struct rpcrdma_mr_seg *seg;
        struct rpcrdma_mw *mw;
-       unsigned int i;
-
-       for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
-               seg = &req->rl_segments[i];
-               mw = seg->rl_mw;
-
-               if (sync) {
-                       /* ORDER */
-                       __fmr_unmap(mw);
-                       __fmr_dma_unmap(r_xprt, seg);
-                       rpcrdma_put_mw(r_xprt, mw);
-               } else {
-                       __fmr_dma_unmap(r_xprt, seg);
-                       __fmr_queue_recovery(mw);
-               }
-
-               i += seg->mr_nsegs;
-               seg->mr_nsegs = 0;
-               seg->rl_mw = NULL;
-       }
-}
-
-static void
-fmr_op_destroy(struct rpcrdma_buffer *buf)
-{
-       struct rpcrdma_mw *r;
-       int rc;
-
-       while (!list_empty(&buf->rb_all)) {
-               r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
-               list_del(&r->mw_all);
-               kfree(r->fmr.physaddrs);
 
-               rc = ib_dealloc_fmr(r->fmr.fmr);
-               if (rc)
-                       dprintk("RPC:       %s: ib_dealloc_fmr failed %i\n",
-                               __func__, rc);
+       while (!list_empty(&req->rl_registered)) {
+               mw = list_first_entry(&req->rl_registered,
+                                     struct rpcrdma_mw, mw_list);
+               list_del_init(&mw->mw_list);
 
-               kfree(r);
+               if (sync)
+                       fmr_op_recover_mr(mw);
+               else
+                       rpcrdma_defer_mr_recovery(mw);
        }
 }
 
@@ -352,9 +325,10 @@ const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
        .ro_map                         = fmr_op_map,
        .ro_unmap_sync                  = fmr_op_unmap_sync,
        .ro_unmap_safe                  = fmr_op_unmap_safe,
+       .ro_recover_mr                  = fmr_op_recover_mr,
        .ro_open                        = fmr_op_open,
        .ro_maxpages                    = fmr_op_maxpages,
-       .ro_init                        = fmr_op_init,
-       .ro_destroy                     = fmr_op_destroy,
+       .ro_init_mr                     = fmr_op_init_mr,
+       .ro_release_mr                  = fmr_op_release_mr,
        .ro_displayname                 = "fmr",
 };