2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* Lightweight memory registration using Fast Memory Regions (FMR).
7 * Referred to sometimes as MTHCAFMR mode.
9 * FMR uses synchronous memory registration and deregistration.
10 * FMR registration is known to be fast, but FMR deregistration
11 * can take tens of usecs to complete.
16 * A Memory Region is prepared for RDMA READ or WRITE using the
17 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
18 * finished, the Memory Region is unmapped using the ib_unmap_fmr
19 * verb (fmr_op_unmap).
24 * After a transport reconnect, fmr_op_map re-uses the MR already
25 * allocated for the RPC, but generates a fresh rkey then maps the
26 * MR again. This process is synchronous.
29 #include "xprt_rdma.h"
31 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
32 # define RPCDBG_FACILITY RPCDBG_TRANS
35 /* Maximum scatter/gather per FMR */
36 #define RPCRDMA_MAX_FMR_SGES (64)
38 static struct workqueue_struct *fmr_recovery_wq;
40 #define FMR_RECOVERY_WQ_FLAGS (WQ_UNBOUND)
43 fmr_alloc_recovery_wq(void)
45 fmr_recovery_wq = alloc_workqueue("fmr_recovery", WQ_UNBOUND, 0);
46 return !fmr_recovery_wq ? -ENOMEM : 0;
50 fmr_destroy_recovery_wq(void)
52 struct workqueue_struct *wq;
58 fmr_recovery_wq = NULL;
59 destroy_workqueue(wq);
63 __fmr_unmap(struct rpcrdma_mw *mw)
67 list_add(&mw->fmr.fmr->list, &l);
68 return ib_unmap_fmr(&l);
71 /* Deferred reset of a single FMR. Generate a fresh rkey by
72 * replacing the MR. There's no recovery if this fails.
75 __fmr_recovery_worker(struct work_struct *work)
77 struct rpcrdma_mw *mw = container_of(work, struct rpcrdma_mw,
79 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
82 rpcrdma_put_mw(r_xprt, mw);
86 /* A broken MR was discovered in a context that can't sleep.
87 * Defer recovery to the recovery worker.
90 __fmr_queue_recovery(struct rpcrdma_mw *mw)
92 INIT_WORK(&mw->mw_work, __fmr_recovery_worker);
93 queue_work(fmr_recovery_wq, &mw->mw_work);
97 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
98 struct rpcrdma_create_data_internal *cdata)
100 rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
101 RPCRDMA_MAX_DATA_SEGS /
102 RPCRDMA_MAX_FMR_SGES));
106 /* FMR mode conveys up to 64 pages of payload per chunk segment.
109 fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
111 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
112 RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
116 fmr_op_init(struct rpcrdma_xprt *r_xprt)
118 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
119 int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;
120 struct ib_fmr_attr fmr_attr = {
121 .max_pages = RPCRDMA_MAX_FMR_SGES,
123 .page_shift = PAGE_SHIFT
125 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
126 struct rpcrdma_mw *r;
129 spin_lock_init(&buf->rb_mwlock);
130 INIT_LIST_HEAD(&buf->rb_mws);
131 INIT_LIST_HEAD(&buf->rb_all);
133 i = max_t(int, RPCRDMA_MAX_DATA_SEGS / RPCRDMA_MAX_FMR_SGES, 1);
134 i += 2; /* head + tail */
135 i *= buf->rb_max_requests; /* one set for each RPC slot */
136 dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i);
140 r = kzalloc(sizeof(*r), GFP_KERNEL);
144 r->fmr.physaddrs = kmalloc(RPCRDMA_MAX_FMR_SGES *
145 sizeof(u64), GFP_KERNEL);
146 if (!r->fmr.physaddrs)
149 r->fmr.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr);
150 if (IS_ERR(r->fmr.fmr))
154 list_add(&r->mw_list, &buf->rb_mws);
155 list_add(&r->mw_all, &buf->rb_all);
160 rc = PTR_ERR(r->fmr.fmr);
161 dprintk("RPC: %s: ib_alloc_fmr status %i\n", __func__, rc);
162 kfree(r->fmr.physaddrs);
169 /* Use the ib_map_phys_fmr() verb to register a memory region
170 * for remote access via RDMA READ or RDMA WRITE.
173 fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
174 int nsegs, bool writing)
176 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
177 struct ib_device *device = ia->ri_device;
178 enum dma_data_direction direction = rpcrdma_data_dir(writing);
179 struct rpcrdma_mr_seg *seg1 = seg;
180 int len, pageoff, i, rc;
181 struct rpcrdma_mw *mw;
186 mw = rpcrdma_get_mw(r_xprt);
190 /* this is a retransmit; generate a fresh rkey */
191 rc = __fmr_unmap(mw);
196 pageoff = offset_in_page(seg1->mr_offset);
197 seg1->mr_offset -= pageoff; /* start of page */
198 seg1->mr_len += pageoff;
200 if (nsegs > RPCRDMA_MAX_FMR_SGES)
201 nsegs = RPCRDMA_MAX_FMR_SGES;
202 for (i = 0; i < nsegs;) {
203 rpcrdma_map_one(device, seg, direction);
204 mw->fmr.physaddrs[i] = seg->mr_dma;
208 /* Check for holes */
209 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
210 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
214 rc = ib_map_phys_fmr(mw->fmr.fmr, mw->fmr.physaddrs,
220 seg1->mr_rkey = mw->fmr.fmr->rkey;
221 seg1->mr_base = seg1->mr_dma + pageoff;
227 dprintk("RPC: %s: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
228 __func__, len, (unsigned long long)seg1->mr_dma,
231 rpcrdma_unmap_one(device, --seg);
236 __fmr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
238 struct ib_device *device = r_xprt->rx_ia.ri_device;
239 int nsegs = seg->mr_nsegs;
242 rpcrdma_unmap_one(device, seg++);
245 /* Invalidate all memory regions that were registered for "req".
247 * Sleeps until it is safe for the host CPU to access the
248 * previously mapped memory regions.
251 fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
253 struct rpcrdma_mr_seg *seg;
254 unsigned int i, nchunks;
255 struct rpcrdma_mw *mw;
256 LIST_HEAD(unmap_list);
259 dprintk("RPC: %s: req %p\n", __func__, req);
261 /* ORDER: Invalidate all of the req's MRs first
263 * ib_unmap_fmr() is slow, so use a single call instead
264 * of one call per mapped MR.
266 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
267 seg = &req->rl_segments[i];
270 list_add(&mw->fmr.fmr->list, &unmap_list);
274 rc = ib_unmap_fmr(&unmap_list);
276 pr_warn("%s: ib_unmap_fmr failed (%i)\n", __func__, rc);
278 /* ORDER: Now DMA unmap all of the req's MRs, and return
279 * them to the free MW list.
281 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
282 seg = &req->rl_segments[i];
284 __fmr_dma_unmap(r_xprt, seg);
285 rpcrdma_put_mw(r_xprt, seg->rl_mw);
295 /* Use a slow, safe mechanism to invalidate all memory regions
296 * that were registered for "req".
298 * In the asynchronous case, DMA unmapping occurs first here
299 * because the rpcrdma_mr_seg is released immediately after this
300 * call. It's contents won't be available in __fmr_dma_unmap later.
304 fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
307 struct rpcrdma_mr_seg *seg;
308 struct rpcrdma_mw *mw;
311 for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
312 seg = &req->rl_segments[i];
318 __fmr_dma_unmap(r_xprt, seg);
319 rpcrdma_put_mw(r_xprt, mw);
321 __fmr_dma_unmap(r_xprt, seg);
322 __fmr_queue_recovery(mw);
332 fmr_op_destroy(struct rpcrdma_buffer *buf)
334 struct rpcrdma_mw *r;
337 while (!list_empty(&buf->rb_all)) {
338 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
339 list_del(&r->mw_all);
340 kfree(r->fmr.physaddrs);
342 rc = ib_dealloc_fmr(r->fmr.fmr);
344 dprintk("RPC: %s: ib_dealloc_fmr failed %i\n",
351 const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
352 .ro_map = fmr_op_map,
353 .ro_unmap_sync = fmr_op_unmap_sync,
354 .ro_unmap_safe = fmr_op_unmap_safe,
355 .ro_open = fmr_op_open,
356 .ro_maxpages = fmr_op_maxpages,
357 .ro_init = fmr_op_init,
358 .ro_destroy = fmr_op_destroy,
359 .ro_displayname = "fmr",