2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_smi.h>
39 /* Fast memory region */
42 struct qib_mregion mr; /* must be last */
45 static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
47 return container_of(ibfmr, struct qib_fmr, ibfmr);
50 static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
56 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
58 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
63 init_completion(&mr->comp);
64 /* count returning the ptr to user */
65 atomic_set(&mr->refcount, 1);
77 static void deinit_qib_mregion(struct qib_mregion *mr)
88 * qib_get_dma_mr - get a DMA memory region
89 * @pd: protection domain for this memory region
92 * Returns the memory region on success, otherwise returns an errno.
93 * Note that all DMA addresses should be created via the
94 * struct ib_dma_mapping_ops functions (see qib_dma.c).
96 struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
98 struct qib_mr *mr = NULL;
102 if (to_ipd(pd)->user) {
103 ret = ERR_PTR(-EPERM);
107 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
109 ret = ERR_PTR(-ENOMEM);
113 rval = init_qib_mregion(&mr->mr, pd, 0);
120 rval = qib_alloc_lkey(&mr->mr, 1);
126 mr->mr.access_flags = acc;
132 deinit_qib_mregion(&mr->mr);
138 static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
144 /* Allocate struct plus pointers to first level page tables. */
145 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
146 mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
150 rval = init_qib_mregion(&mr->mr, pd, count);
154 rval = qib_alloc_lkey(&mr->mr, 0);
157 mr->ibmr.lkey = mr->mr.lkey;
158 mr->ibmr.rkey = mr->mr.lkey;
163 deinit_qib_mregion(&mr->mr);
171 * qib_reg_user_mr - register a userspace memory region
172 * @pd: protection domain for this memory region
173 * @start: starting userspace address
174 * @length: length of region to register
175 * @mr_access_flags: access flags for this memory region
176 * @udata: unused by the QLogic_IB driver
178 * Returns the memory region on success, otherwise returns an errno.
180 struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
181 u64 virt_addr, int mr_access_flags,
182 struct ib_udata *udata)
185 struct ib_umem *umem;
186 struct scatterlist *sg;
191 ret = ERR_PTR(-EINVAL);
195 umem = ib_umem_get(pd->uobject->context, start, length,
198 return (void *) umem;
202 mr = alloc_mr(n, pd);
204 ret = (struct ib_mr *)mr;
205 ib_umem_release(umem);
209 mr->mr.user_base = start;
210 mr->mr.iova = virt_addr;
211 mr->mr.length = length;
212 mr->mr.offset = ib_umem_offset(umem);
213 mr->mr.access_flags = mr_access_flags;
216 if (is_power_of_2(umem->page_size))
217 mr->mr.page_shift = ilog2(umem->page_size);
220 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
223 vaddr = page_address(sg_page(sg));
225 ret = ERR_PTR(-EINVAL);
228 mr->mr.map[m]->segs[n].vaddr = vaddr;
229 mr->mr.map[m]->segs[n].length = umem->page_size;
231 if (n == QIB_SEGSZ) {
243 * qib_dereg_mr - unregister and free a memory region
244 * @ibmr: the memory region to free
246 * Returns 0 on success.
248 * Note that this is called to free MRs created by qib_get_dma_mr()
249 * or qib_reg_user_mr().
251 int qib_dereg_mr(struct ib_mr *ibmr)
253 struct qib_mr *mr = to_imr(ibmr);
255 unsigned long timeout;
258 qib_free_lkey(&mr->mr);
260 qib_put_mr(&mr->mr); /* will set completion if last */
261 timeout = wait_for_completion_timeout(&mr->mr.comp,
268 deinit_qib_mregion(&mr->mr);
270 ib_umem_release(mr->umem);
277 * Allocate a memory region usable with the
278 * IB_WR_REG_MR send work request.
280 * Return the memory region on success, otherwise return an errno.
282 struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
283 enum ib_mr_type mr_type,
288 if (mr_type != IB_MR_TYPE_MEM_REG)
289 return ERR_PTR(-EINVAL);
291 mr = alloc_mr(max_num_sg, pd);
293 return (struct ib_mr *)mr;
295 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
302 qib_dereg_mr(&mr->ibmr);
303 return ERR_PTR(-ENOMEM);
306 static int qib_set_page(struct ib_mr *ibmr, u64 addr)
308 struct qib_mr *mr = to_imr(ibmr);
310 if (unlikely(mr->npages == mr->mr.max_segs))
313 mr->pages[mr->npages++] = addr;
318 int qib_map_mr_sg(struct ib_mr *ibmr,
319 struct scatterlist *sg,
322 struct qib_mr *mr = to_imr(ibmr);
326 return ib_sg_to_pages(ibmr, sg, sg_nents, qib_set_page);
330 * qib_alloc_fmr - allocate a fast memory region
331 * @pd: the protection domain for this memory region
332 * @mr_access_flags: access flags for this memory region
333 * @fmr_attr: fast memory region attributes
335 * Returns the memory region on success, otherwise returns an errno.
337 struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
338 struct ib_fmr_attr *fmr_attr)
345 /* Allocate struct plus pointers to first level page tables. */
346 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
347 fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
351 rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
356 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
359 rval = qib_alloc_lkey(&fmr->mr, 0);
362 fmr->ibfmr.rkey = fmr->mr.lkey;
363 fmr->ibfmr.lkey = fmr->mr.lkey;
365 * Resources are allocated but no valid mapping (RKEY can't be
368 fmr->mr.access_flags = mr_access_flags;
369 fmr->mr.max_segs = fmr_attr->max_pages;
370 fmr->mr.page_shift = fmr_attr->page_shift;
377 deinit_qib_mregion(&fmr->mr);
385 * qib_map_phys_fmr - set up a fast memory region
386 * @ibmfr: the fast memory region to set up
387 * @page_list: the list of pages to associate with the fast memory region
388 * @list_len: the number of pages to associate with the fast memory region
389 * @iova: the virtual address of the start of the fast memory region
391 * This may be called from interrupt context.
394 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
395 int list_len, u64 iova)
397 struct qib_fmr *fmr = to_ifmr(ibfmr);
398 struct qib_lkey_table *rkt;
404 i = atomic_read(&fmr->mr.refcount);
408 if (list_len > fmr->mr.max_segs) {
412 rkt = &to_idev(ibfmr->device)->lk_table;
413 spin_lock_irqsave(&rkt->lock, flags);
414 fmr->mr.user_base = iova;
416 ps = 1 << fmr->mr.page_shift;
417 fmr->mr.length = list_len * ps;
420 for (i = 0; i < list_len; i++) {
421 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
422 fmr->mr.map[m]->segs[n].length = ps;
423 if (++n == QIB_SEGSZ) {
428 spin_unlock_irqrestore(&rkt->lock, flags);
436 * qib_unmap_fmr - unmap fast memory regions
437 * @fmr_list: the list of fast memory regions to unmap
439 * Returns 0 on success.
441 int qib_unmap_fmr(struct list_head *fmr_list)
444 struct qib_lkey_table *rkt;
447 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
448 rkt = &to_idev(fmr->ibfmr.device)->lk_table;
449 spin_lock_irqsave(&rkt->lock, flags);
450 fmr->mr.user_base = 0;
453 spin_unlock_irqrestore(&rkt->lock, flags);
459 * qib_dealloc_fmr - deallocate a fast memory region
460 * @ibfmr: the fast memory region to deallocate
462 * Returns 0 on success.
464 int qib_dealloc_fmr(struct ib_fmr *ibfmr)
466 struct qib_fmr *fmr = to_ifmr(ibfmr);
468 unsigned long timeout;
470 qib_free_lkey(&fmr->mr);
471 qib_put_mr(&fmr->mr); /* will set completion if last */
472 timeout = wait_for_completion_timeout(&fmr->mr.comp,
475 qib_get_mr(&fmr->mr);
479 deinit_qib_mregion(&fmr->mr);
485 void mr_rcu_callback(struct rcu_head *list)
487 struct qib_mregion *mr = container_of(list, struct qib_mregion, list);