2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_verbs.h>
44 MAX_PENDING_REG_MR = 8,
47 #define MLX5_UMR_ALIGN 2048
48 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
49 static __be64 mlx5_ib_update_mtt_emergency_buffer[
50 MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
51 __aligned(MLX5_UMR_ALIGN);
52 static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
55 static int order2idx(struct mlx5_ib_dev *dev, int order)
57 struct mlx5_mr_cache *cache = &dev->cache;
59 if (order < cache->ent[0].order)
62 return order - cache->ent[0].order;
65 static void reg_mr_callback(int status, void *context)
67 struct mlx5_ib_mr *mr = context;
68 struct mlx5_ib_dev *dev = mr->dev;
69 struct mlx5_mr_cache *cache = &dev->cache;
70 int c = order2idx(dev, mr->order);
71 struct mlx5_cache_ent *ent = &cache->ent[c];
74 struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
77 spin_lock_irqsave(&ent->lock, flags);
79 spin_unlock_irqrestore(&ent->lock, flags);
81 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
84 mod_timer(&dev->delay_timer, jiffies + HZ);
88 if (mr->out.hdr.status) {
89 mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
91 be32_to_cpu(mr->out.hdr.syndrome));
94 mod_timer(&dev->delay_timer, jiffies + HZ);
98 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
99 key = dev->mdev->priv.mkey_key++;
100 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
101 mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
103 cache->last_add = jiffies;
105 spin_lock_irqsave(&ent->lock, flags);
106 list_add_tail(&mr->list, &ent->head);
109 spin_unlock_irqrestore(&ent->lock, flags);
111 write_lock_irqsave(&table->lock, flags);
112 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
115 pr_err("Error inserting to mr tree. 0x%x\n", -err);
116 write_unlock_irqrestore(&table->lock, flags);
119 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
121 struct mlx5_mr_cache *cache = &dev->cache;
122 struct mlx5_cache_ent *ent = &cache->ent[c];
123 struct mlx5_create_mkey_mbox_in *in;
124 struct mlx5_ib_mr *mr;
125 int npages = 1 << ent->order;
129 in = kzalloc(sizeof(*in), GFP_KERNEL);
133 for (i = 0; i < num; i++) {
134 if (ent->pending >= MAX_PENDING_REG_MR) {
139 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
144 mr->order = ent->order;
147 in->seg.status = MLX5_MKEY_STATUS_FREE;
148 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
149 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
150 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
151 in->seg.log2_page_size = 12;
153 spin_lock_irq(&ent->lock);
155 spin_unlock_irq(&ent->lock);
156 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
157 sizeof(*in), reg_mr_callback,
160 spin_lock_irq(&ent->lock);
162 spin_unlock_irq(&ent->lock);
163 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
173 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
175 struct mlx5_mr_cache *cache = &dev->cache;
176 struct mlx5_cache_ent *ent = &cache->ent[c];
177 struct mlx5_ib_mr *mr;
181 for (i = 0; i < num; i++) {
182 spin_lock_irq(&ent->lock);
183 if (list_empty(&ent->head)) {
184 spin_unlock_irq(&ent->lock);
187 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
191 spin_unlock_irq(&ent->lock);
192 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
194 mlx5_ib_warn(dev, "failed destroy mkey\n");
200 static ssize_t size_write(struct file *filp, const char __user *buf,
201 size_t count, loff_t *pos)
203 struct mlx5_cache_ent *ent = filp->private_data;
204 struct mlx5_ib_dev *dev = ent->dev;
210 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
213 c = order2idx(dev, ent->order);
214 lbuf[sizeof(lbuf) - 1] = 0;
216 if (sscanf(lbuf, "%u", &var) != 1)
219 if (var < ent->limit)
222 if (var > ent->size) {
224 err = add_keys(dev, c, var - ent->size);
225 if (err && err != -EAGAIN)
228 usleep_range(3000, 5000);
230 } else if (var < ent->size) {
231 remove_keys(dev, c, ent->size - var);
237 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
240 struct mlx5_cache_ent *ent = filp->private_data;
247 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
251 if (copy_to_user(buf, lbuf, err))
259 static const struct file_operations size_fops = {
260 .owner = THIS_MODULE,
266 static ssize_t limit_write(struct file *filp, const char __user *buf,
267 size_t count, loff_t *pos)
269 struct mlx5_cache_ent *ent = filp->private_data;
270 struct mlx5_ib_dev *dev = ent->dev;
276 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
279 c = order2idx(dev, ent->order);
280 lbuf[sizeof(lbuf) - 1] = 0;
282 if (sscanf(lbuf, "%u", &var) != 1)
290 if (ent->cur < ent->limit) {
291 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
299 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
302 struct mlx5_cache_ent *ent = filp->private_data;
309 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
313 if (copy_to_user(buf, lbuf, err))
321 static const struct file_operations limit_fops = {
322 .owner = THIS_MODULE,
324 .write = limit_write,
328 static int someone_adding(struct mlx5_mr_cache *cache)
332 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
333 if (cache->ent[i].cur < cache->ent[i].limit)
340 static void __cache_work_func(struct mlx5_cache_ent *ent)
342 struct mlx5_ib_dev *dev = ent->dev;
343 struct mlx5_mr_cache *cache = &dev->cache;
344 int i = order2idx(dev, ent->order);
350 ent = &dev->cache.ent[i];
351 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
352 err = add_keys(dev, i, 1);
353 if (ent->cur < 2 * ent->limit) {
354 if (err == -EAGAIN) {
355 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
357 queue_delayed_work(cache->wq, &ent->dwork,
358 msecs_to_jiffies(3));
360 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
362 queue_delayed_work(cache->wq, &ent->dwork,
363 msecs_to_jiffies(1000));
365 queue_work(cache->wq, &ent->work);
368 } else if (ent->cur > 2 * ent->limit) {
369 if (!someone_adding(cache) &&
370 time_after(jiffies, cache->last_add + 300 * HZ)) {
371 remove_keys(dev, i, 1);
372 if (ent->cur > ent->limit)
373 queue_work(cache->wq, &ent->work);
375 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
380 static void delayed_cache_work_func(struct work_struct *work)
382 struct mlx5_cache_ent *ent;
384 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
385 __cache_work_func(ent);
388 static void cache_work_func(struct work_struct *work)
390 struct mlx5_cache_ent *ent;
392 ent = container_of(work, struct mlx5_cache_ent, work);
393 __cache_work_func(ent);
396 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
398 struct mlx5_mr_cache *cache = &dev->cache;
399 struct mlx5_ib_mr *mr = NULL;
400 struct mlx5_cache_ent *ent;
404 c = order2idx(dev, order);
405 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
406 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
410 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
411 ent = &cache->ent[i];
413 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
415 spin_lock_irq(&ent->lock);
416 if (!list_empty(&ent->head)) {
417 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
421 spin_unlock_irq(&ent->lock);
422 if (ent->cur < ent->limit)
423 queue_work(cache->wq, &ent->work);
426 spin_unlock_irq(&ent->lock);
428 queue_work(cache->wq, &ent->work);
435 cache->ent[c].miss++;
440 static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
442 struct mlx5_mr_cache *cache = &dev->cache;
443 struct mlx5_cache_ent *ent;
447 c = order2idx(dev, mr->order);
448 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
449 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
452 ent = &cache->ent[c];
453 spin_lock_irq(&ent->lock);
454 list_add_tail(&mr->list, &ent->head);
456 if (ent->cur > 2 * ent->limit)
458 spin_unlock_irq(&ent->lock);
461 queue_work(cache->wq, &ent->work);
464 static void clean_keys(struct mlx5_ib_dev *dev, int c)
466 struct mlx5_mr_cache *cache = &dev->cache;
467 struct mlx5_cache_ent *ent = &cache->ent[c];
468 struct mlx5_ib_mr *mr;
471 cancel_delayed_work(&ent->dwork);
473 spin_lock_irq(&ent->lock);
474 if (list_empty(&ent->head)) {
475 spin_unlock_irq(&ent->lock);
478 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
482 spin_unlock_irq(&ent->lock);
483 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
485 mlx5_ib_warn(dev, "failed destroy mkey\n");
491 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
493 struct mlx5_mr_cache *cache = &dev->cache;
494 struct mlx5_cache_ent *ent;
497 if (!mlx5_debugfs_root)
500 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
504 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
505 ent = &cache->ent[i];
506 sprintf(ent->name, "%d", ent->order);
507 ent->dir = debugfs_create_dir(ent->name, cache->root);
511 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
516 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
521 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
526 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
535 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
537 if (!mlx5_debugfs_root)
540 debugfs_remove_recursive(dev->cache.root);
543 static void delay_time_func(unsigned long ctx)
545 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
550 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
552 struct mlx5_mr_cache *cache = &dev->cache;
553 struct mlx5_cache_ent *ent;
558 cache->wq = create_singlethread_workqueue("mkey_cache");
560 mlx5_ib_warn(dev, "failed to create work queue\n");
564 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
565 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
566 INIT_LIST_HEAD(&cache->ent[i].head);
567 spin_lock_init(&cache->ent[i].lock);
569 ent = &cache->ent[i];
570 INIT_LIST_HEAD(&ent->head);
571 spin_lock_init(&ent->lock);
575 if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
576 limit = dev->mdev->profile->mr_cache[i].limit;
580 INIT_WORK(&ent->work, cache_work_func);
581 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
583 queue_work(cache->wq, &ent->work);
586 err = mlx5_mr_cache_debugfs_init(dev);
588 mlx5_ib_warn(dev, "cache debugfs failure\n");
593 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
597 dev->cache.stopped = 1;
598 flush_workqueue(dev->cache.wq);
600 mlx5_mr_cache_debugfs_cleanup(dev);
602 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
605 destroy_workqueue(dev->cache.wq);
606 del_timer_sync(&dev->delay_timer);
611 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
613 struct mlx5_ib_dev *dev = to_mdev(pd->device);
614 struct mlx5_core_dev *mdev = dev->mdev;
615 struct mlx5_create_mkey_mbox_in *in;
616 struct mlx5_mkey_seg *seg;
617 struct mlx5_ib_mr *mr;
620 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
622 return ERR_PTR(-ENOMEM);
624 in = kzalloc(sizeof(*in), GFP_KERNEL);
631 seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
632 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
633 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
636 err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
642 mr->ibmr.lkey = mr->mmr.key;
643 mr->ibmr.rkey = mr->mmr.key;
657 static int get_octo_len(u64 addr, u64 len, int page_size)
662 offset = addr & (page_size - 1);
663 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
664 return (npages + 1) / 2;
667 static int use_umr(int order)
669 return order <= MLX5_MAX_UMR_SHIFT;
672 static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
673 struct ib_sge *sg, u64 dma, int n, u32 key,
674 int page_shift, u64 virt_addr, u64 len,
677 struct mlx5_ib_dev *dev = to_mdev(pd->device);
678 struct ib_mr *mr = dev->umrc.mr;
679 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
682 sg->length = ALIGN(sizeof(u64) * n, 64);
693 wr->opcode = MLX5_IB_WR_UMR;
696 umrwr->page_shift = page_shift;
698 umrwr->target.virt_addr = virt_addr;
700 umrwr->access_flags = access_flags;
704 static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
705 struct ib_send_wr *wr, u32 key)
707 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
709 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
710 wr->opcode = MLX5_IB_WR_UMR;
714 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
716 struct mlx5_ib_umr_context *context;
721 err = ib_poll_cq(cq, 1, &wc);
723 pr_warn("poll cq error %d\n", err);
729 context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
730 context->status = wc.status;
731 complete(&context->done);
733 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
736 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
737 u64 virt_addr, u64 len, int npages,
738 int page_shift, int order, int access_flags)
740 struct mlx5_ib_dev *dev = to_mdev(pd->device);
741 struct device *ddev = dev->ib_dev.dma_device;
742 struct umr_common *umrc = &dev->umrc;
743 struct mlx5_ib_umr_context umr_context;
744 struct ib_send_wr wr, *bad;
745 struct mlx5_ib_mr *mr;
754 for (i = 0; i < 1; i++) {
755 mr = alloc_cached_mr(dev, order);
759 err = add_keys(dev, order2idx(dev, order), 1);
760 if (err && err != -EAGAIN) {
761 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
767 return ERR_PTR(-EAGAIN);
769 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
770 * To avoid copying garbage after the pas array, we allocate
772 size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
773 mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
779 pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
780 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
781 /* Clear padding after the actual pages. */
782 memset(pas + npages, 0, size - npages * sizeof(u64));
784 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
785 if (dma_mapping_error(ddev, dma)) {
790 memset(&wr, 0, sizeof(wr));
791 wr.wr_id = (u64)(unsigned long)&umr_context;
792 prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift,
793 virt_addr, len, access_flags);
795 mlx5_ib_init_umr_context(&umr_context);
797 err = ib_post_send(umrc->qp, &wr, &bad);
799 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
802 wait_for_completion(&umr_context.done);
803 if (umr_context.status != IB_WC_SUCCESS) {
804 mlx5_ib_warn(dev, "reg umr failed\n");
809 mr->mmr.iova = virt_addr;
811 mr->mmr.pd = to_mpd(pd)->pdn;
815 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
822 free_cached_mr(dev, mr);
829 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
830 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
833 struct mlx5_ib_dev *dev = mr->dev;
834 struct device *ddev = dev->ib_dev.dma_device;
835 struct umr_common *umrc = &dev->umrc;
836 struct mlx5_ib_umr_context umr_context;
837 struct ib_umem *umem = mr->umem;
841 struct ib_send_wr wr, *bad;
842 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg;
845 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
846 const int page_index_mask = page_index_alignment - 1;
847 size_t pages_mapped = 0;
848 size_t pages_to_map = 0;
849 size_t pages_iter = 0;
850 int use_emergency_buf = 0;
852 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
853 * so we need to align the offset and length accordingly */
854 if (start_page_index & page_index_mask) {
855 npages += start_page_index & page_index_mask;
856 start_page_index &= ~page_index_mask;
859 pages_to_map = ALIGN(npages, page_index_alignment);
861 if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
864 size = sizeof(u64) * pages_to_map;
865 size = min_t(int, PAGE_SIZE, size);
866 /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
867 * code, when we are called from an invalidation. The pas buffer must
868 * be 2k-aligned for Connect-IB. */
869 pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
871 mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
872 pas = mlx5_ib_update_mtt_emergency_buffer;
873 size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
874 use_emergency_buf = 1;
875 mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
876 memset(pas, 0, size);
878 pages_iter = size / sizeof(u64);
879 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
880 if (dma_mapping_error(ddev, dma)) {
881 mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
886 for (pages_mapped = 0;
887 pages_mapped < pages_to_map && !err;
888 pages_mapped += pages_iter, start_page_index += pages_iter) {
889 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
891 npages = min_t(size_t,
893 ib_umem_num_pages(umem) - start_page_index);
896 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
897 start_page_index, npages, pas,
898 MLX5_IB_MTT_PRESENT);
899 /* Clear padding after the pages brought from the
901 memset(pas + npages, 0, size - npages * sizeof(u64));
904 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
906 memset(&wr, 0, sizeof(wr));
907 wr.wr_id = (u64)(unsigned long)&umr_context;
910 sg.length = ALIGN(npages * sizeof(u64),
911 MLX5_UMR_MTT_ALIGNMENT);
912 sg.lkey = dev->umrc.mr->lkey;
914 wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
915 MLX5_IB_SEND_UMR_UPDATE_MTT;
918 wr.opcode = MLX5_IB_WR_UMR;
919 umrwr->npages = sg.length / sizeof(u64);
920 umrwr->page_shift = PAGE_SHIFT;
921 umrwr->mkey = mr->mmr.key;
922 umrwr->target.offset = start_page_index;
924 mlx5_ib_init_umr_context(&umr_context);
926 err = ib_post_send(umrc->qp, &wr, &bad);
928 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
930 wait_for_completion(&umr_context.done);
931 if (umr_context.status != IB_WC_SUCCESS) {
932 mlx5_ib_err(dev, "UMR completion failed, code %d\n",
939 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
942 if (!use_emergency_buf)
943 free_page((unsigned long)pas);
945 mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
951 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
952 u64 length, struct ib_umem *umem,
953 int npages, int page_shift,
956 struct mlx5_ib_dev *dev = to_mdev(pd->device);
957 struct mlx5_create_mkey_mbox_in *in;
958 struct mlx5_ib_mr *mr;
961 bool pg_cap = !!(dev->mdev->caps.gen.flags &
962 MLX5_DEV_CAP_FLAG_ON_DMND_PG);
964 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
966 return ERR_PTR(-ENOMEM);
968 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
969 in = mlx5_vzalloc(inlen);
974 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
975 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
977 /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
978 * in the page list submitted with the command. */
979 in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
980 in->seg.flags = convert_access(access_flags) |
981 MLX5_ACCESS_MODE_MTT;
982 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
983 in->seg.start_addr = cpu_to_be64(virt_addr);
984 in->seg.len = cpu_to_be64(length);
985 in->seg.bsfs_octo_size = 0;
986 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
987 in->seg.log2_page_size = page_shift;
988 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
989 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
991 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
994 mlx5_ib_warn(dev, "create mkey failed\n");
1000 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
1010 return ERR_PTR(err);
1013 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1014 u64 virt_addr, int access_flags,
1015 struct ib_udata *udata)
1017 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1018 struct mlx5_ib_mr *mr = NULL;
1019 struct ib_umem *umem;
1026 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1027 start, virt_addr, length, access_flags);
1028 umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
1031 mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
1032 return (void *)umem;
1035 mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
1037 mlx5_ib_warn(dev, "avoid zero region\n");
1042 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
1043 npages, ncont, order, page_shift);
1045 if (use_umr(order)) {
1046 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
1047 order, access_flags);
1048 if (PTR_ERR(mr) == -EAGAIN) {
1049 mlx5_ib_dbg(dev, "cache empty for order %d", order);
1055 mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
1063 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
1066 mr->npages = npages;
1067 spin_lock(&dev->mr_lock);
1068 dev->mdev->priv.reg_pages += npages;
1069 spin_unlock(&dev->mr_lock);
1070 mr->ibmr.lkey = mr->mmr.key;
1071 mr->ibmr.rkey = mr->mmr.key;
1076 ib_umem_release(umem);
1077 return ERR_PTR(err);
1080 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1082 struct umr_common *umrc = &dev->umrc;
1083 struct mlx5_ib_umr_context umr_context;
1084 struct ib_send_wr wr, *bad;
1087 memset(&wr, 0, sizeof(wr));
1088 wr.wr_id = (u64)(unsigned long)&umr_context;
1089 prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
1091 mlx5_ib_init_umr_context(&umr_context);
1093 err = ib_post_send(umrc->qp, &wr, &bad);
1096 mlx5_ib_dbg(dev, "err %d\n", err);
1099 wait_for_completion(&umr_context.done);
1102 if (umr_context.status != IB_WC_SUCCESS) {
1103 mlx5_ib_warn(dev, "unreg umr failed\n");
1113 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1115 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1116 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1117 struct ib_umem *umem = mr->umem;
1118 int npages = mr->npages;
1119 int umred = mr->umred;
1123 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
1125 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1130 err = unreg_umr(dev, mr);
1132 mlx5_ib_warn(dev, "failed unregister\n");
1135 free_cached_mr(dev, mr);
1139 ib_umem_release(umem);
1140 spin_lock(&dev->mr_lock);
1141 dev->mdev->priv.reg_pages -= npages;
1142 spin_unlock(&dev->mr_lock);
1151 struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
1152 struct ib_mr_init_attr *mr_init_attr)
1154 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1155 struct mlx5_create_mkey_mbox_in *in;
1156 struct mlx5_ib_mr *mr;
1157 int access_mode, err;
1158 int ndescs = roundup(mr_init_attr->max_reg_descriptors, 4);
1160 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1162 return ERR_PTR(-ENOMEM);
1164 in = kzalloc(sizeof(*in), GFP_KERNEL);
1170 in->seg.status = MLX5_MKEY_STATUS_FREE;
1171 in->seg.xlt_oct_size = cpu_to_be32(ndescs);
1172 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1173 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1174 access_mode = MLX5_ACCESS_MODE_MTT;
1176 if (mr_init_attr->flags & IB_MR_SIGNATURE_EN) {
1179 in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
1181 in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
1182 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1188 /* create mem & wire PSVs */
1189 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
1194 access_mode = MLX5_ACCESS_MODE_KLM;
1195 mr->sig->psv_memory.psv_idx = psv_index[0];
1196 mr->sig->psv_wire.psv_idx = psv_index[1];
1198 mr->sig->sig_status_checked = true;
1199 mr->sig->sig_err_exists = false;
1200 /* Next UMR, Arm SIGERR */
1201 ++mr->sig->sigerr_count;
1204 in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
1205 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
1208 goto err_destroy_psv;
1210 mr->ibmr.lkey = mr->mmr.key;
1211 mr->ibmr.rkey = mr->mmr.key;
1219 if (mlx5_core_destroy_psv(dev->mdev,
1220 mr->sig->psv_memory.psv_idx))
1221 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1222 mr->sig->psv_memory.psv_idx);
1223 if (mlx5_core_destroy_psv(dev->mdev,
1224 mr->sig->psv_wire.psv_idx))
1225 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1226 mr->sig->psv_wire.psv_idx);
1234 return ERR_PTR(err);
1237 int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
1239 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1240 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1244 if (mlx5_core_destroy_psv(dev->mdev,
1245 mr->sig->psv_memory.psv_idx))
1246 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1247 mr->sig->psv_memory.psv_idx);
1248 if (mlx5_core_destroy_psv(dev->mdev,
1249 mr->sig->psv_wire.psv_idx))
1250 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1251 mr->sig->psv_wire.psv_idx);
1255 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
1257 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1267 struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
1268 int max_page_list_len)
1270 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1271 struct mlx5_create_mkey_mbox_in *in;
1272 struct mlx5_ib_mr *mr;
1275 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1277 return ERR_PTR(-ENOMEM);
1279 in = kzalloc(sizeof(*in), GFP_KERNEL);
1285 in->seg.status = MLX5_MKEY_STATUS_FREE;
1286 in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
1287 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1288 in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
1289 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1291 * TBD not needed - issue 197292 */
1292 in->seg.log2_page_size = PAGE_SHIFT;
1294 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
1300 mr->ibmr.lkey = mr->mmr.key;
1301 mr->ibmr.rkey = mr->mmr.key;
1308 return ERR_PTR(err);
1311 struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
1314 struct mlx5_ib_fast_reg_page_list *mfrpl;
1315 int size = page_list_len * sizeof(u64);
1317 mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
1319 return ERR_PTR(-ENOMEM);
1321 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
1322 if (!mfrpl->ibfrpl.page_list)
1325 mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
1328 if (!mfrpl->mapped_page_list)
1331 WARN_ON(mfrpl->map & 0x3f);
1333 return &mfrpl->ibfrpl;
1336 kfree(mfrpl->ibfrpl.page_list);
1338 return ERR_PTR(-ENOMEM);
1341 void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1343 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
1344 struct mlx5_ib_dev *dev = to_mdev(page_list->device);
1345 int size = page_list->max_page_list_len * sizeof(u64);
1347 dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
1349 kfree(mfrpl->ibfrpl.page_list);
1353 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1354 struct ib_mr_status *mr_status)
1356 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1359 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1360 pr_err("Invalid status check mask\n");
1365 mr_status->fail_status = 0;
1366 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1369 pr_err("signature status check requested on a non-signature enabled MR\n");
1373 mmr->sig->sig_status_checked = true;
1374 if (!mmr->sig->sig_err_exists)
1377 if (ibmr->lkey == mmr->sig->err_item.key)
1378 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1379 sizeof(mr_status->sig_err));
1381 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1382 mr_status->sig_err.sig_err_offset = 0;
1383 mr_status->sig_err.key = mmr->sig->err_item.key;
1386 mmr->sig->sig_err_exists = false;
1387 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;