2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* Lightweight memory registration using Fast Memory Regions (FMR).
7 * Referred to sometimes as MTHCAFMR mode.
9 * FMR uses synchronous memory registration and deregistration.
10 * FMR registration is known to be fast, but FMR deregistration
11 * can take tens of usecs to complete.
16 * A Memory Region is prepared for RDMA READ or WRITE using the
17 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
18 * finished, the Memory Region is unmapped using the ib_unmap_fmr
19 * verb (fmr_op_unmap).
22 #include "xprt_rdma.h"
24 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
25 # define RPCDBG_FACILITY RPCDBG_TRANS
28 /* Maximum scatter/gather per FMR */
29 #define RPCRDMA_MAX_FMR_SGES (64)
31 /* Access mode of externally registered pages */
33 RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE |
34 IB_ACCESS_REMOTE_READ,
38 fmr_is_supported(struct rpcrdma_ia *ia)
40 if (!ia->ri_device->alloc_fmr) {
41 pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
49 fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
51 static struct ib_fmr_attr fmr_attr = {
52 .max_pages = RPCRDMA_MAX_FMR_SGES,
54 .page_shift = PAGE_SHIFT
57 mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
58 sizeof(u64), GFP_KERNEL);
59 if (!mw->fmr.fm_physaddrs)
62 mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
63 sizeof(*mw->mw_sg), GFP_KERNEL);
67 sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES);
69 mw->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
71 if (IS_ERR(mw->fmr.fm_mr))
77 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
78 PTR_ERR(mw->fmr.fm_mr));
82 kfree(mw->fmr.fm_physaddrs);
87 __fmr_unmap(struct rpcrdma_mw *mw)
92 list_add(&mw->fmr.fm_mr->list, &l);
93 rc = ib_unmap_fmr(&l);
94 list_del_init(&mw->fmr.fm_mr->list);
99 fmr_op_release_mr(struct rpcrdma_mw *r)
101 LIST_HEAD(unmap_list);
104 kfree(r->fmr.fm_physaddrs);
107 /* In case this one was left mapped, try to unmap it
108 * to prevent dealloc_fmr from failing with EBUSY
112 pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
115 rc = ib_dealloc_fmr(r->fmr.fm_mr);
117 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
123 /* Reset of a single FMR.
126 fmr_op_recover_mr(struct rpcrdma_mw *mw)
128 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
131 /* ORDER: invalidate first */
132 rc = __fmr_unmap(mw);
134 /* ORDER: then DMA unmap */
135 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
136 mw->mw_sg, mw->mw_nents, mw->mw_dir);
140 rpcrdma_put_mw(r_xprt, mw);
141 r_xprt->rx_stats.mrs_recovered++;
145 pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mw);
146 r_xprt->rx_stats.mrs_orphaned++;
148 spin_lock(&r_xprt->rx_buf.rb_mwlock);
149 list_del(&mw->mw_all);
150 spin_unlock(&r_xprt->rx_buf.rb_mwlock);
152 fmr_op_release_mr(mw);
156 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
157 struct rpcrdma_create_data_internal *cdata)
159 rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
160 RPCRDMA_MAX_DATA_SEGS /
161 RPCRDMA_MAX_FMR_SGES));
165 /* FMR mode conveys up to 64 pages of payload per chunk segment.
168 fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
170 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
171 RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
174 /* Use the ib_map_phys_fmr() verb to register a memory region
175 * for remote access via RDMA READ or RDMA WRITE.
178 fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
179 int nsegs, bool writing)
181 struct rpcrdma_mr_seg *seg1 = seg;
182 int len, pageoff, i, rc;
183 struct rpcrdma_mw *mw;
189 rpcrdma_defer_mr_recovery(mw);
190 mw = rpcrdma_get_mw(r_xprt);
194 pageoff = offset_in_page(seg1->mr_offset);
195 seg1->mr_offset -= pageoff; /* start of page */
196 seg1->mr_len += pageoff;
198 if (nsegs > RPCRDMA_MAX_FMR_SGES)
199 nsegs = RPCRDMA_MAX_FMR_SGES;
200 for (i = 0; i < nsegs;) {
202 sg_set_page(&mw->mw_sg[i],
205 offset_in_page(seg->mr_offset));
207 sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
212 /* Check for holes */
213 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
214 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
218 mw->mw_dir = rpcrdma_data_dir(writing);
222 if (!ib_dma_map_sg(r_xprt->rx_ia.ri_device,
223 mw->mw_sg, mw->mw_nents, mw->mw_dir))
226 for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++)
227 dma_pages[i] = sg_dma_address(&mw->mw_sg[i]);
228 rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents,
234 seg1->mr_rkey = mw->fmr.fm_mr->rkey;
235 seg1->mr_base = dma_pages[0] + pageoff;
236 seg1->mr_nsegs = mw->mw_nents;
241 pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
242 mw->mw_sg, mw->mw_nents);
243 rpcrdma_defer_mr_recovery(mw);
247 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
248 len, (unsigned long long)dma_pages[0],
249 pageoff, mw->mw_nents, rc);
250 rpcrdma_defer_mr_recovery(mw);
254 /* Invalidate all memory regions that were registered for "req".
256 * Sleeps until it is safe for the host CPU to access the
257 * previously mapped memory regions.
260 fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
262 struct rpcrdma_mr_seg *seg;
263 unsigned int i, nchunks;
264 struct rpcrdma_mw *mw;
265 LIST_HEAD(unmap_list);
268 dprintk("RPC: %s: req %p\n", __func__, req);
270 /* ORDER: Invalidate all of the req's MRs first
272 * ib_unmap_fmr() is slow, so use a single call instead
273 * of one call per mapped FMR.
275 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
276 seg = &req->rl_segments[i];
279 list_add_tail(&mw->fmr.fm_mr->list, &unmap_list);
283 rc = ib_unmap_fmr(&unmap_list);
287 /* ORDER: Now DMA unmap all of the req's MRs, and return
288 * them to the free MW list.
290 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
291 seg = &req->rl_segments[i];
294 list_del_init(&mw->fmr.fm_mr->list);
295 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
296 mw->mw_sg, mw->mw_nents, mw->mw_dir);
297 rpcrdma_put_mw(r_xprt, mw);
308 pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
310 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
311 seg = &req->rl_segments[i];
314 list_del_init(&mw->fmr.fm_mr->list);
315 fmr_op_recover_mr(mw);
321 /* Use a slow, safe mechanism to invalidate all memory regions
322 * that were registered for "req".
325 fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
328 struct rpcrdma_mr_seg *seg;
329 struct rpcrdma_mw *mw;
332 for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
333 seg = &req->rl_segments[i];
337 fmr_op_recover_mr(mw);
339 rpcrdma_defer_mr_recovery(mw);
347 const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
348 .ro_map = fmr_op_map,
349 .ro_unmap_sync = fmr_op_unmap_sync,
350 .ro_unmap_safe = fmr_op_unmap_safe,
351 .ro_recover_mr = fmr_op_recover_mr,
352 .ro_open = fmr_op_open,
353 .ro_maxpages = fmr_op_maxpages,
354 .ro_init_mr = fmr_op_init_mr,
355 .ro_release_mr = fmr_op_release_mr,
356 .ro_displayname = "fmr",