2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/export.h>
38 #include <linux/slab.h>
39 #include <linux/kernel.h>
40 #include <linux/vmalloc.h>
42 #include <linux/mlx4/cmd.h>
47 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
53 spin_lock(&buddy->lock);
55 for (o = order; o <= buddy->max_order; ++o)
56 if (buddy->num_free[o]) {
57 m = 1 << (buddy->max_order - o);
58 seg = find_first_bit(buddy->bits[o], m);
63 spin_unlock(&buddy->lock);
67 clear_bit(seg, buddy->bits[o]);
73 set_bit(seg ^ 1, buddy->bits[o]);
77 spin_unlock(&buddy->lock);
84 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
88 spin_lock(&buddy->lock);
90 while (test_bit(seg ^ 1, buddy->bits[order])) {
91 clear_bit(seg ^ 1, buddy->bits[order]);
92 --buddy->num_free[order];
97 set_bit(seg, buddy->bits[order]);
98 ++buddy->num_free[order];
100 spin_unlock(&buddy->lock);
103 static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
107 buddy->max_order = max_order;
108 spin_lock_init(&buddy->lock);
110 buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),
112 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
114 if (!buddy->bits || !buddy->num_free)
117 for (i = 0; i <= buddy->max_order; ++i) {
118 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
119 buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
120 if (!buddy->bits[i]) {
121 buddy->bits[i] = vzalloc(s * sizeof(long));
127 set_bit(0, buddy->bits[buddy->max_order]);
128 buddy->num_free[buddy->max_order] = 1;
133 for (i = 0; i <= buddy->max_order; ++i)
134 if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i]))
135 vfree(buddy->bits[i]);
137 kfree(buddy->bits[i]);
141 kfree(buddy->num_free);
146 static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
150 for (i = 0; i <= buddy->max_order; ++i)
151 if (is_vmalloc_addr(buddy->bits[i]))
152 vfree(buddy->bits[i]);
154 kfree(buddy->bits[i]);
157 kfree(buddy->num_free);
160 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
162 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
167 seg_order = max_t(int, order - log_mtts_per_seg, 0);
169 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
173 offset = seg * (1 << log_mtts_per_seg);
175 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
176 offset + (1 << order) - 1)) {
177 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
184 static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
190 if (mlx4_is_mfunc(dev)) {
191 set_param_l(&in_param, order);
192 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
193 RES_OP_RESERVE_AND_MAP,
195 MLX4_CMD_TIME_CLASS_A,
199 return get_param_l(&out_param);
201 return __mlx4_alloc_mtt_range(dev, order);
204 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
205 struct mlx4_mtt *mtt)
211 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
214 mtt->page_shift = page_shift;
216 for (mtt->order = 0, i = 1; i < npages; i <<= 1)
219 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
220 if (mtt->offset == -1)
225 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
227 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
231 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
233 seg_order = max_t(int, order - log_mtts_per_seg, 0);
234 first_seg = offset / (1 << log_mtts_per_seg);
236 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
237 mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
238 offset + (1 << order) - 1);
241 static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
246 if (mlx4_is_mfunc(dev)) {
247 set_param_l(&in_param, offset);
248 set_param_h(&in_param, order);
249 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
251 MLX4_CMD_TIME_CLASS_A,
254 mlx4_warn(dev, "Failed to free mtt range at:"
255 "%d order:%d\n", offset, order);
258 __mlx4_free_mtt_range(dev, offset, order);
261 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
266 mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
268 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
270 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
272 return (u64) mtt->offset * dev->caps.mtt_entry_sz;
274 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
276 static u32 hw_index_to_key(u32 ind)
278 return (ind >> 24) | (ind << 8);
281 static u32 key_to_hw_index(u32 key)
283 return (key << 24) | (key >> 8);
286 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
289 return mlx4_cmd(dev, mailbox->dma, mpt_index,
290 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
294 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
297 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
298 !mailbox, MLX4_CMD_HW2SW_MPT,
299 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
302 static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
303 u64 iova, u64 size, u32 access, int npages,
304 int page_shift, struct mlx4_mr *mr)
310 mr->enabled = MLX4_MPT_DISABLED;
311 mr->key = hw_index_to_key(mridx);
313 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
316 static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
317 struct mlx4_cmd_mailbox *mailbox,
320 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
321 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
324 int __mlx4_mpt_reserve(struct mlx4_dev *dev)
326 struct mlx4_priv *priv = mlx4_priv(dev);
328 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
331 static int mlx4_mpt_reserve(struct mlx4_dev *dev)
335 if (mlx4_is_mfunc(dev)) {
336 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
338 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
340 return get_param_l(&out_param);
342 return __mlx4_mpt_reserve(dev);
345 void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
347 struct mlx4_priv *priv = mlx4_priv(dev);
349 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
352 static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
356 if (mlx4_is_mfunc(dev)) {
357 set_param_l(&in_param, index);
358 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
360 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
361 mlx4_warn(dev, "Failed to release mr index:%d\n",
365 __mlx4_mpt_release(dev, index);
368 int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
370 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
372 return mlx4_table_get(dev, &mr_table->dmpt_table, index);
375 static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
379 if (mlx4_is_mfunc(dev)) {
380 set_param_l(¶m, index);
381 return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM,
383 MLX4_CMD_TIME_CLASS_A,
386 return __mlx4_mpt_alloc_icm(dev, index);
389 void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
391 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
393 mlx4_table_put(dev, &mr_table->dmpt_table, index);
396 static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
400 if (mlx4_is_mfunc(dev)) {
401 set_param_l(&in_param, index);
402 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
403 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
405 mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
409 return __mlx4_mpt_free_icm(dev, index);
412 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
413 int npages, int page_shift, struct mlx4_mr *mr)
418 index = mlx4_mpt_reserve(dev);
422 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
423 access, npages, page_shift, mr);
425 mlx4_mpt_release(dev, index);
429 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
431 static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
435 if (mr->enabled == MLX4_MPT_EN_HW) {
436 err = mlx4_HW2SW_MPT(dev, NULL,
437 key_to_hw_index(mr->key) &
438 (dev->caps.num_mpts - 1));
440 mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
441 mlx4_warn(dev, "MR has MWs bound to it.\n");
445 mr->enabled = MLX4_MPT_EN_SW;
447 mlx4_mtt_cleanup(dev, &mr->mtt);
452 int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
456 ret = mlx4_mr_free_reserved(dev, mr);
460 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
461 mlx4_mpt_release(dev, key_to_hw_index(mr->key));
465 EXPORT_SYMBOL_GPL(mlx4_mr_free);
467 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
469 struct mlx4_cmd_mailbox *mailbox;
470 struct mlx4_mpt_entry *mpt_entry;
473 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
477 mailbox = mlx4_alloc_cmd_mailbox(dev);
478 if (IS_ERR(mailbox)) {
479 err = PTR_ERR(mailbox);
482 mpt_entry = mailbox->buf;
484 memset(mpt_entry, 0, sizeof *mpt_entry);
486 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
487 MLX4_MPT_FLAG_REGION |
490 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
491 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
492 mpt_entry->start = cpu_to_be64(mr->iova);
493 mpt_entry->length = cpu_to_be64(mr->size);
494 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
496 if (mr->mtt.order < 0) {
497 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
498 mpt_entry->mtt_addr = 0;
500 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
504 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
505 /* fast register MR in free state */
506 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
507 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
508 MLX4_MPT_PD_FLAG_RAE);
509 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
511 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
514 err = mlx4_SW2HW_MPT(dev, mailbox,
515 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
517 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
520 mr->enabled = MLX4_MPT_EN_HW;
522 mlx4_free_cmd_mailbox(dev, mailbox);
527 mlx4_free_cmd_mailbox(dev, mailbox);
530 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
533 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
535 static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
536 int start_index, int npages, u64 *page_list)
538 struct mlx4_priv *priv = mlx4_priv(dev);
540 dma_addr_t dma_handle;
543 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
544 start_index, &dma_handle);
549 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
550 npages * sizeof (u64), DMA_TO_DEVICE);
552 for (i = 0; i < npages; ++i)
553 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
555 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
556 npages * sizeof (u64), DMA_TO_DEVICE);
561 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
562 int start_index, int npages, u64 *page_list)
567 int max_mtts_first_page;
569 /* compute how may mtts fit in the first page */
570 mtts_per_page = PAGE_SIZE / sizeof(u64);
571 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
574 chunk = min_t(int, max_mtts_first_page, npages);
577 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
581 start_index += chunk;
584 chunk = min_t(int, mtts_per_page, npages);
589 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
590 int start_index, int npages, u64 *page_list)
592 struct mlx4_cmd_mailbox *mailbox = NULL;
593 __be64 *inbox = NULL;
601 if (mlx4_is_mfunc(dev)) {
602 mailbox = mlx4_alloc_cmd_mailbox(dev);
604 return PTR_ERR(mailbox);
605 inbox = mailbox->buf;
608 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
610 inbox[0] = cpu_to_be64(mtt->offset + start_index);
612 for (i = 0; i < chunk; ++i)
613 inbox[i + 2] = cpu_to_be64(page_list[i] |
614 MLX4_MTT_FLAG_PRESENT);
615 err = mlx4_WRITE_MTT(dev, mailbox, chunk);
617 mlx4_free_cmd_mailbox(dev, mailbox);
622 start_index += chunk;
625 mlx4_free_cmd_mailbox(dev, mailbox);
629 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
631 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
633 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
634 struct mlx4_buf *buf)
640 page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
644 for (i = 0; i < buf->npages; ++i)
646 page_list[i] = buf->direct.map + (i << buf->page_shift);
648 page_list[i] = buf->page_list[i].map;
650 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
655 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
657 int mlx4_init_mr_table(struct mlx4_dev *dev)
659 struct mlx4_priv *priv = mlx4_priv(dev);
660 struct mlx4_mr_table *mr_table = &priv->mr_table;
663 if (!is_power_of_2(dev->caps.num_mpts))
666 /* Nothing to do for slaves - all MR handling is forwarded
668 if (mlx4_is_slave(dev))
671 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
672 ~0, dev->caps.reserved_mrws, 0);
676 err = mlx4_buddy_init(&mr_table->mtt_buddy,
677 ilog2((u32)dev->caps.num_mtts /
678 (1 << log_mtts_per_seg)));
682 if (dev->caps.reserved_mtts) {
683 priv->reserved_mtts =
684 mlx4_alloc_mtt_range(dev,
685 fls(dev->caps.reserved_mtts - 1));
686 if (priv->reserved_mtts < 0) {
687 mlx4_warn(dev, "MTT table of order %u is too small.\n",
688 mr_table->mtt_buddy.max_order);
690 goto err_reserve_mtts;
697 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
700 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
705 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
707 struct mlx4_priv *priv = mlx4_priv(dev);
708 struct mlx4_mr_table *mr_table = &priv->mr_table;
710 if (mlx4_is_slave(dev))
712 if (priv->reserved_mtts >= 0)
713 mlx4_free_mtt_range(dev, priv->reserved_mtts,
714 fls(dev->caps.reserved_mtts - 1));
715 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
716 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
719 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
720 int npages, u64 iova)
724 if (npages > fmr->max_pages)
727 page_mask = (1 << fmr->page_shift) - 1;
729 /* We are getting page lists, so va must be page aligned. */
730 if (iova & page_mask)
733 /* Trust the user not to pass misaligned data in page_list */
735 for (i = 0; i < npages; ++i) {
736 if (page_list[i] & ~page_mask)
740 if (fmr->maps >= fmr->max_maps)
746 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
747 int npages, u64 iova, u32 *lkey, u32 *rkey)
752 err = mlx4_check_fmr(fmr, page_list, npages, iova);
758 key = key_to_hw_index(fmr->mr.key);
759 key += dev->caps.num_mpts;
760 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
762 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
764 /* Make sure MPT status is visible before writing MTT entries */
767 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
768 npages * sizeof(u64), DMA_TO_DEVICE);
770 for (i = 0; i < npages; ++i)
771 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
773 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
774 npages * sizeof(u64), DMA_TO_DEVICE);
776 fmr->mpt->key = cpu_to_be32(key);
777 fmr->mpt->lkey = cpu_to_be32(key);
778 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
779 fmr->mpt->start = cpu_to_be64(iova);
781 /* Make MTT entries are visible before setting MPT status */
784 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
786 /* Make sure MPT status is visible before consumer can use FMR */
791 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
793 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
794 int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
796 struct mlx4_priv *priv = mlx4_priv(dev);
799 if (max_maps > dev->caps.max_fmr_maps)
802 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
805 /* All MTTs must fit in the same page */
806 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
809 fmr->page_shift = page_shift;
810 fmr->max_pages = max_pages;
811 fmr->max_maps = max_maps;
814 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
815 page_shift, &fmr->mr);
819 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
831 (void) mlx4_mr_free(dev, &fmr->mr);
834 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
836 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
838 struct mlx4_priv *priv = mlx4_priv(dev);
841 err = mlx4_mr_enable(dev, &fmr->mr);
845 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
846 key_to_hw_index(fmr->mr.key), NULL);
852 EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
854 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
855 u32 *lkey, u32 *rkey)
857 struct mlx4_cmd_mailbox *mailbox;
865 mailbox = mlx4_alloc_cmd_mailbox(dev);
866 if (IS_ERR(mailbox)) {
867 err = PTR_ERR(mailbox);
868 printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
869 " failed (%d)\n", err);
873 err = mlx4_HW2SW_MPT(dev, NULL,
874 key_to_hw_index(fmr->mr.key) &
875 (dev->caps.num_mpts - 1));
876 mlx4_free_cmd_mailbox(dev, mailbox);
878 printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
882 fmr->mr.enabled = MLX4_MPT_EN_SW;
884 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
886 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
893 ret = mlx4_mr_free(dev, &fmr->mr);
896 fmr->mr.enabled = MLX4_MPT_DISABLED;
900 EXPORT_SYMBOL_GPL(mlx4_fmr_free);
902 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
904 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
907 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);