2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/mlx5/cq.h>
45 #include <linux/mlx5/qp.h>
46 #include <linux/mlx5/srq.h>
47 #include <linux/debugfs.h>
48 #include <linux/kmod.h>
49 #include <linux/delay.h>
50 #include <linux/mlx5/mlx5_ifc.h>
51 #include "mlx5_core.h"
53 #ifdef CONFIG_MLX5_CORE_EN
57 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
58 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
59 MODULE_LICENSE("Dual BSD/GPL");
60 MODULE_VERSION(DRIVER_VERSION);
62 int mlx5_core_debug_mask;
63 module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
64 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
66 #define MLX5_DEFAULT_PROF 2
67 static int prof_sel = MLX5_DEFAULT_PROF;
68 module_param_named(prof_sel, prof_sel, int, 0444);
69 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
71 static LIST_HEAD(intf_list);
72 static LIST_HEAD(dev_list);
73 static DEFINE_MUTEX(intf_mutex);
75 struct mlx5_device_context {
76 struct list_head list;
77 struct mlx5_interface *intf;
81 static struct mlx5_profile profile[] = {
86 .mask = MLX5_PROF_MASK_QP_SIZE,
90 .mask = MLX5_PROF_MASK_QP_SIZE |
91 MLX5_PROF_MASK_MR_CACHE,
160 #define FW_INIT_TIMEOUT_MILI 2000
161 #define FW_INIT_WAIT_MS 2
163 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
165 unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
168 while (fw_initializing(dev)) {
169 if (time_after(jiffies, end)) {
173 msleep(FW_INIT_WAIT_MS);
179 static int set_dma_caps(struct pci_dev *pdev)
183 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
185 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
186 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
188 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
193 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
196 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
197 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
200 "Can't set consistent PCI DMA mask, aborting\n");
205 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
209 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
211 struct pci_dev *pdev = dev->pdev;
214 mutex_lock(&dev->pci_status_mutex);
215 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
216 err = pci_enable_device(pdev);
218 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
220 mutex_unlock(&dev->pci_status_mutex);
225 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
227 struct pci_dev *pdev = dev->pdev;
229 mutex_lock(&dev->pci_status_mutex);
230 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
231 pci_disable_device(pdev);
232 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
234 mutex_unlock(&dev->pci_status_mutex);
237 static int request_bar(struct pci_dev *pdev)
241 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
242 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
246 err = pci_request_regions(pdev, DRIVER_NAME);
248 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
253 static void release_bar(struct pci_dev *pdev)
255 pci_release_regions(pdev);
258 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
260 struct mlx5_priv *priv = &dev->priv;
261 struct mlx5_eq_table *table = &priv->eq_table;
262 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
266 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
267 MLX5_EQ_VEC_COMP_BASE;
268 nvec = min_t(int, nvec, num_eqs);
269 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
272 priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
274 priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
275 if (!priv->msix_arr || !priv->irq_info)
278 for (i = 0; i < nvec; i++)
279 priv->msix_arr[i].entry = i;
281 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
282 MLX5_EQ_VEC_COMP_BASE + 1, nvec);
286 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
291 kfree(priv->irq_info);
292 kfree(priv->msix_arr);
296 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
298 struct mlx5_priv *priv = &dev->priv;
300 pci_disable_msix(dev->pdev);
301 kfree(priv->irq_info);
302 kfree(priv->msix_arr);
305 struct mlx5_reg_host_endianess {
311 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
314 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
315 MLX5_DEV_CAP_FLAG_DCT,
318 static u16 to_fw_pkey_sz(u32 size)
334 pr_warn("invalid pkey table size %d\n", size);
339 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
340 enum mlx5_cap_mode cap_mode)
342 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
343 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
344 void *out, *hca_caps;
345 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
348 memset(in, 0, sizeof(in));
349 out = kzalloc(out_sz, GFP_KERNEL);
353 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
354 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
355 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
359 err = mlx5_cmd_status_to_err_v2(out);
362 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
363 cap_type, cap_mode, err);
367 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
370 case HCA_CAP_OPMOD_GET_MAX:
371 memcpy(dev->hca_caps_max[cap_type], hca_caps,
372 MLX5_UN_SZ_BYTES(hca_cap_union));
374 case HCA_CAP_OPMOD_GET_CUR:
375 memcpy(dev->hca_caps_cur[cap_type], hca_caps,
376 MLX5_UN_SZ_BYTES(hca_cap_union));
380 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
390 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
392 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
395 memset(out, 0, sizeof(out));
397 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
398 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
402 err = mlx5_cmd_status_to_err_v2(out);
407 static int handle_hca_cap(struct mlx5_core_dev *dev)
409 void *set_ctx = NULL;
410 struct mlx5_profile *prof = dev->profile;
412 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
415 set_ctx = kzalloc(set_sz, GFP_KERNEL);
419 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
423 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
427 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
429 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
430 MLX5_ST_SZ_BYTES(cmd_hca_cap));
432 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
433 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
435 /* we limit the size of the pkey table to 128 entries for now */
436 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
439 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
440 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
443 /* disable cmdif checksum */
444 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
446 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
448 err = set_caps(dev, set_ctx, set_sz);
455 static int set_hca_ctrl(struct mlx5_core_dev *dev)
457 struct mlx5_reg_host_endianess he_in;
458 struct mlx5_reg_host_endianess he_out;
461 if (!mlx5_core_is_pf(dev))
464 memset(&he_in, 0, sizeof(he_in));
465 he_in.he = MLX5_SET_HOST_ENDIANNESS;
466 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
467 &he_out, sizeof(he_out),
468 MLX5_REG_HOST_ENDIANNESS, 0, 1);
472 int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
474 u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
475 u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
478 memset(in, 0, sizeof(in));
479 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
480 MLX5_SET(enable_hca_in, in, function_id, func_id);
481 memset(out, 0, sizeof(out));
483 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
487 return mlx5_cmd_status_to_err_v2(out);
490 int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
492 u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
493 u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
496 memset(in, 0, sizeof(in));
497 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
498 MLX5_SET(disable_hca_in, in, function_id, func_id);
499 memset(out, 0, sizeof(out));
500 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
504 return mlx5_cmd_status_to_err_v2(out);
507 cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev)
509 u32 timer_h, timer_h1, timer_l;
511 timer_h = ioread32be(&dev->iseg->internal_timer_h);
512 timer_l = ioread32be(&dev->iseg->internal_timer_l);
513 timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
514 if (timer_h != timer_h1) /* wrap around */
515 timer_l = ioread32be(&dev->iseg->internal_timer_l);
517 return (cycle_t)timer_l | (cycle_t)timer_h1 << 32;
520 static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
522 struct mlx5_priv *priv = &mdev->priv;
523 struct msix_entry *msix = priv->msix_arr;
524 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
525 int numa_node = priv->numa_node;
528 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
529 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
533 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
534 priv->irq_info[i].mask);
536 err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
538 mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
546 free_cpumask_var(priv->irq_info[i].mask);
550 static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
552 struct mlx5_priv *priv = &mdev->priv;
553 struct msix_entry *msix = priv->msix_arr;
554 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
556 irq_set_affinity_hint(irq, NULL);
557 free_cpumask_var(priv->irq_info[i].mask);
560 static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
565 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
566 err = mlx5_irq_set_affinity_hint(mdev, i);
574 for (i--; i >= 0; i--)
575 mlx5_irq_clear_affinity_hint(mdev, i);
580 static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
584 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
585 mlx5_irq_clear_affinity_hint(mdev, i);
588 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
591 struct mlx5_eq_table *table = &dev->priv.eq_table;
592 struct mlx5_eq *eq, *n;
595 spin_lock(&table->lock);
596 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
597 if (eq->index == vector) {
604 spin_unlock(&table->lock);
608 EXPORT_SYMBOL(mlx5_vector2eqn);
610 static void free_comp_eqs(struct mlx5_core_dev *dev)
612 struct mlx5_eq_table *table = &dev->priv.eq_table;
613 struct mlx5_eq *eq, *n;
615 spin_lock(&table->lock);
616 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
618 spin_unlock(&table->lock);
619 if (mlx5_destroy_unmap_eq(dev, eq))
620 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
623 spin_lock(&table->lock);
625 spin_unlock(&table->lock);
628 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
630 struct mlx5_eq_table *table = &dev->priv.eq_table;
631 char name[MLX5_MAX_IRQ_NAME];
638 INIT_LIST_HEAD(&table->comp_eqs_list);
639 ncomp_vec = table->num_comp_vectors;
640 nent = MLX5_COMP_EQ_SIZE;
641 for (i = 0; i < ncomp_vec; i++) {
642 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
648 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
649 err = mlx5_create_map_eq(dev, eq,
650 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
651 name, &dev->priv.uuari.uars[0]);
656 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
658 spin_lock(&table->lock);
659 list_add_tail(&eq->list, &table->comp_eqs_list);
660 spin_unlock(&table->lock);
670 #ifdef CONFIG_MLX5_CORE_EN
671 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
673 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
674 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
675 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
676 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
680 memset(query_in, 0, sizeof(query_in));
681 memset(query_out, 0, sizeof(query_out));
683 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
685 err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
686 query_out, sizeof(query_out));
688 if (((struct mlx5_outbox_hdr *)query_out)->status ==
689 MLX5_CMD_STAT_BAD_OP_ERR) {
690 pr_debug("Only ISSI 0 is supported\n");
694 pr_err("failed to query ISSI\n");
698 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
700 if (sup_issi & (1 << 1)) {
701 memset(set_in, 0, sizeof(set_in));
702 memset(set_out, 0, sizeof(set_out));
704 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
705 MLX5_SET(set_issi_in, set_in, current_issi, 1);
707 err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
708 set_out, sizeof(set_out));
710 pr_err("failed to set ISSI=1\n");
717 } else if (sup_issi & (1 << 0) || !sup_issi) {
725 static int map_bf_area(struct mlx5_core_dev *dev)
727 resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
728 resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
730 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
732 return dev->priv.bf_mapping ? 0 : -ENOMEM;
735 static void unmap_bf_area(struct mlx5_core_dev *dev)
737 if (dev->priv.bf_mapping)
738 io_mapping_free(dev->priv.bf_mapping);
741 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
743 struct mlx5_device_context *dev_ctx;
744 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
746 dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
750 dev_ctx->intf = intf;
751 dev_ctx->context = intf->add(dev);
753 if (dev_ctx->context) {
754 spin_lock_irq(&priv->ctx_lock);
755 list_add_tail(&dev_ctx->list, &priv->ctx_list);
756 spin_unlock_irq(&priv->ctx_lock);
762 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
764 struct mlx5_device_context *dev_ctx;
765 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
767 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
768 if (dev_ctx->intf == intf) {
769 spin_lock_irq(&priv->ctx_lock);
770 list_del(&dev_ctx->list);
771 spin_unlock_irq(&priv->ctx_lock);
773 intf->remove(dev, dev_ctx->context);
779 static int mlx5_register_device(struct mlx5_core_dev *dev)
781 struct mlx5_priv *priv = &dev->priv;
782 struct mlx5_interface *intf;
784 mutex_lock(&intf_mutex);
785 list_add_tail(&priv->dev_list, &dev_list);
786 list_for_each_entry(intf, &intf_list, list)
787 mlx5_add_device(intf, priv);
788 mutex_unlock(&intf_mutex);
793 static void mlx5_unregister_device(struct mlx5_core_dev *dev)
795 struct mlx5_priv *priv = &dev->priv;
796 struct mlx5_interface *intf;
798 mutex_lock(&intf_mutex);
799 list_for_each_entry(intf, &intf_list, list)
800 mlx5_remove_device(intf, priv);
801 list_del(&priv->dev_list);
802 mutex_unlock(&intf_mutex);
805 int mlx5_register_interface(struct mlx5_interface *intf)
807 struct mlx5_priv *priv;
809 if (!intf->add || !intf->remove)
812 mutex_lock(&intf_mutex);
813 list_add_tail(&intf->list, &intf_list);
814 list_for_each_entry(priv, &dev_list, dev_list)
815 mlx5_add_device(intf, priv);
816 mutex_unlock(&intf_mutex);
820 EXPORT_SYMBOL(mlx5_register_interface);
822 void mlx5_unregister_interface(struct mlx5_interface *intf)
824 struct mlx5_priv *priv;
826 mutex_lock(&intf_mutex);
827 list_for_each_entry(priv, &dev_list, dev_list)
828 mlx5_remove_device(intf, priv);
829 list_del(&intf->list);
830 mutex_unlock(&intf_mutex);
832 EXPORT_SYMBOL(mlx5_unregister_interface);
834 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
836 struct mlx5_priv *priv = &mdev->priv;
837 struct mlx5_device_context *dev_ctx;
841 spin_lock_irqsave(&priv->ctx_lock, flags);
843 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
844 if ((dev_ctx->intf->protocol == protocol) &&
845 dev_ctx->intf->get_dev) {
846 result = dev_ctx->intf->get_dev(dev_ctx->context);
850 spin_unlock_irqrestore(&priv->ctx_lock, flags);
854 EXPORT_SYMBOL(mlx5_get_protocol_dev);
856 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
858 struct pci_dev *pdev = dev->pdev;
861 pci_set_drvdata(dev->pdev, dev);
862 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
863 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
865 mutex_init(&priv->pgdir_mutex);
866 INIT_LIST_HEAD(&priv->pgdir_list);
867 spin_lock_init(&priv->mkey_lock);
869 mutex_init(&priv->alloc_mutex);
871 priv->numa_node = dev_to_node(&dev->pdev->dev);
873 priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
877 err = mlx5_pci_enable_device(dev);
879 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
883 err = request_bar(pdev);
885 dev_err(&pdev->dev, "error requesting BARs, aborting\n");
889 pci_set_master(pdev);
891 err = set_dma_caps(pdev);
893 dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
897 dev->iseg_base = pci_resource_start(dev->pdev, 0);
898 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
901 dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
908 pci_clear_master(dev->pdev);
909 release_bar(dev->pdev);
911 mlx5_pci_disable_device(dev);
914 debugfs_remove(priv->dbg_root);
918 static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
921 pci_clear_master(dev->pdev);
922 release_bar(dev->pdev);
923 mlx5_pci_disable_device(dev);
924 debugfs_remove(priv->dbg_root);
927 #define MLX5_IB_MOD "mlx5_ib"
928 static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
930 struct pci_dev *pdev = dev->pdev;
933 mutex_lock(&dev->intf_state_mutex);
934 if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
935 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
940 dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
941 fw_rev_min(dev), fw_rev_sub(dev));
943 /* on load removing any previous indication of internal error, device is
946 dev->state = MLX5_DEVICE_STATE_UP;
948 err = mlx5_cmd_init(dev);
950 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
954 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
956 dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
957 FW_INIT_TIMEOUT_MILI);
961 mlx5_pagealloc_init(dev);
963 err = mlx5_core_enable_hca(dev, 0);
965 dev_err(&pdev->dev, "enable hca failed\n");
966 goto err_pagealloc_cleanup;
969 #ifdef CONFIG_MLX5_CORE_EN
970 err = mlx5_core_set_issi(dev);
972 dev_err(&pdev->dev, "failed to set issi\n");
973 goto err_disable_hca;
977 err = mlx5_satisfy_startup_pages(dev, 1);
979 dev_err(&pdev->dev, "failed to allocate boot pages\n");
980 goto err_disable_hca;
983 err = set_hca_ctrl(dev);
985 dev_err(&pdev->dev, "set_hca_ctrl failed\n");
986 goto reclaim_boot_pages;
989 err = handle_hca_cap(dev);
991 dev_err(&pdev->dev, "handle_hca_cap failed\n");
992 goto reclaim_boot_pages;
995 err = mlx5_satisfy_startup_pages(dev, 0);
997 dev_err(&pdev->dev, "failed to allocate init pages\n");
998 goto reclaim_boot_pages;
1001 err = mlx5_pagealloc_start(dev);
1003 dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
1004 goto reclaim_boot_pages;
1007 err = mlx5_cmd_init_hca(dev);
1009 dev_err(&pdev->dev, "init hca failed\n");
1010 goto err_pagealloc_stop;
1013 mlx5_start_health_poll(dev);
1015 err = mlx5_query_hca_caps(dev);
1017 dev_err(&pdev->dev, "query hca failed\n");
1021 err = mlx5_query_board_id(dev);
1023 dev_err(&pdev->dev, "query board id failed\n");
1027 err = mlx5_enable_msix(dev);
1029 dev_err(&pdev->dev, "enable msix failed\n");
1033 err = mlx5_eq_init(dev);
1035 dev_err(&pdev->dev, "failed to initialize eq\n");
1039 err = mlx5_alloc_uuars(dev, &priv->uuari);
1041 dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
1042 goto err_eq_cleanup;
1045 err = mlx5_start_eqs(dev);
1047 dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
1051 err = alloc_comp_eqs(dev);
1053 dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
1057 if (map_bf_area(dev))
1058 dev_err(&pdev->dev, "Failed to map blue flame area\n");
1060 err = mlx5_irq_set_affinity_hints(dev);
1062 dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
1063 goto err_unmap_bf_area;
1066 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
1068 mlx5_init_cq_table(dev);
1069 mlx5_init_qp_table(dev);
1070 mlx5_init_srq_table(dev);
1071 mlx5_init_mr_table(dev);
1073 err = mlx5_init_fs(dev);
1075 dev_err(&pdev->dev, "Failed to init flow steering\n");
1078 #ifdef CONFIG_MLX5_CORE_EN
1079 err = mlx5_eswitch_init(dev);
1081 dev_err(&pdev->dev, "eswitch init failed %d\n", err);
1086 err = mlx5_sriov_init(dev);
1088 dev_err(&pdev->dev, "sriov init failed %d\n", err);
1092 err = mlx5_register_device(dev);
1094 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
1098 err = request_module_nowait(MLX5_IB_MOD);
1100 pr_info("failed request module on %s\n", MLX5_IB_MOD);
1102 dev->interface_state = MLX5_INTERFACE_STATE_UP;
1104 mutex_unlock(&dev->intf_state_mutex);
1109 if (mlx5_sriov_cleanup(dev))
1110 dev_err(&dev->pdev->dev, "sriov cleanup failed\n");
1112 #ifdef CONFIG_MLX5_CORE_EN
1113 mlx5_eswitch_cleanup(dev->priv.eswitch);
1116 mlx5_cleanup_fs(dev);
1118 mlx5_cleanup_mr_table(dev);
1119 mlx5_cleanup_srq_table(dev);
1120 mlx5_cleanup_qp_table(dev);
1121 mlx5_cleanup_cq_table(dev);
1122 mlx5_irq_clear_affinity_hints(dev);
1133 mlx5_free_uuars(dev, &priv->uuari);
1136 mlx5_eq_cleanup(dev);
1139 mlx5_disable_msix(dev);
1142 mlx5_stop_health_poll(dev);
1143 if (mlx5_cmd_teardown_hca(dev)) {
1144 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
1149 mlx5_pagealloc_stop(dev);
1152 mlx5_reclaim_startup_pages(dev);
1155 mlx5_core_disable_hca(dev, 0);
1157 err_pagealloc_cleanup:
1158 mlx5_pagealloc_cleanup(dev);
1159 mlx5_cmd_cleanup(dev);
1162 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1163 mutex_unlock(&dev->intf_state_mutex);
1168 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1172 err = mlx5_sriov_cleanup(dev);
1174 dev_warn(&dev->pdev->dev, "%s: sriov cleanup failed - abort\n",
1179 mutex_lock(&dev->intf_state_mutex);
1180 if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
1181 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
1185 mlx5_unregister_device(dev);
1186 #ifdef CONFIG_MLX5_CORE_EN
1187 mlx5_eswitch_cleanup(dev->priv.eswitch);
1190 mlx5_cleanup_fs(dev);
1191 mlx5_cleanup_mr_table(dev);
1192 mlx5_cleanup_srq_table(dev);
1193 mlx5_cleanup_qp_table(dev);
1194 mlx5_cleanup_cq_table(dev);
1195 mlx5_irq_clear_affinity_hints(dev);
1199 mlx5_free_uuars(dev, &priv->uuari);
1200 mlx5_eq_cleanup(dev);
1201 mlx5_disable_msix(dev);
1202 mlx5_stop_health_poll(dev);
1203 err = mlx5_cmd_teardown_hca(dev);
1205 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
1208 mlx5_pagealloc_stop(dev);
1209 mlx5_reclaim_startup_pages(dev);
1210 mlx5_core_disable_hca(dev, 0);
1211 mlx5_pagealloc_cleanup(dev);
1212 mlx5_cmd_cleanup(dev);
1215 dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
1216 mutex_unlock(&dev->intf_state_mutex);
1220 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1221 unsigned long param)
1223 struct mlx5_priv *priv = &dev->priv;
1224 struct mlx5_device_context *dev_ctx;
1225 unsigned long flags;
1227 spin_lock_irqsave(&priv->ctx_lock, flags);
1229 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
1230 if (dev_ctx->intf->event)
1231 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
1233 spin_unlock_irqrestore(&priv->ctx_lock, flags);
1236 struct mlx5_core_event_handler {
1237 void (*event)(struct mlx5_core_dev *dev,
1238 enum mlx5_dev_event event,
1243 static int init_one(struct pci_dev *pdev,
1244 const struct pci_device_id *id)
1246 struct mlx5_core_dev *dev;
1247 struct mlx5_priv *priv;
1250 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1252 dev_err(&pdev->dev, "kzalloc failed\n");
1256 priv->pci_dev_data = id->driver_data;
1258 pci_set_drvdata(pdev, dev);
1260 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
1261 pr_warn("selected profile out of range, selecting default (%d)\n",
1263 prof_sel = MLX5_DEFAULT_PROF;
1265 dev->profile = &profile[prof_sel];
1267 dev->event = mlx5_core_event;
1269 INIT_LIST_HEAD(&priv->ctx_list);
1270 spin_lock_init(&priv->ctx_lock);
1271 mutex_init(&dev->pci_status_mutex);
1272 mutex_init(&dev->intf_state_mutex);
1273 err = mlx5_pci_init(dev, priv);
1275 dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
1279 err = mlx5_health_init(dev);
1281 dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
1285 err = mlx5_load_one(dev, priv);
1287 dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
1294 mlx5_health_cleanup(dev);
1296 mlx5_pci_close(dev, priv);
1298 pci_set_drvdata(pdev, NULL);
1304 static void remove_one(struct pci_dev *pdev)
1306 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1307 struct mlx5_priv *priv = &dev->priv;
1309 if (mlx5_unload_one(dev, priv)) {
1310 dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
1311 mlx5_health_cleanup(dev);
1314 mlx5_health_cleanup(dev);
1315 mlx5_pci_close(dev, priv);
1316 pci_set_drvdata(pdev, NULL);
1320 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1321 pci_channel_state_t state)
1323 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1324 struct mlx5_priv *priv = &dev->priv;
1326 dev_info(&pdev->dev, "%s was called\n", __func__);
1327 mlx5_enter_error_state(dev);
1328 mlx5_unload_one(dev, priv);
1329 mlx5_pci_disable_device(dev);
1330 return state == pci_channel_io_perm_failure ?
1331 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1334 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1336 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1339 dev_info(&pdev->dev, "%s was called\n", __func__);
1341 err = mlx5_pci_enable_device(dev);
1343 dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
1345 return PCI_ERS_RESULT_DISCONNECT;
1347 pci_set_master(pdev);
1348 pci_set_power_state(pdev, PCI_D0);
1349 pci_restore_state(pdev);
1351 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1354 void mlx5_disable_device(struct mlx5_core_dev *dev)
1356 mlx5_pci_err_detected(dev->pdev, 0);
1359 /* wait for the device to show vital signs. For now we check
1360 * that we can read the device ID and that the health buffer
1361 * shows a non zero value which is different than 0xffffffff
1363 static void wait_vital(struct pci_dev *pdev)
1365 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1366 struct mlx5_core_health *health = &dev->priv.health;
1367 const int niter = 100;
1372 /* Wait for firmware to be ready after reset */
1374 for (i = 0; i < niter; i++) {
1375 if (pci_read_config_word(pdev, 2, &did)) {
1376 dev_warn(&pdev->dev, "failed reading config word\n");
1379 if (did == pdev->device) {
1380 dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
1386 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1388 for (i = 0; i < niter; i++) {
1389 count = ioread32be(health->health_counter);
1390 if (count && count != 0xffffffff) {
1391 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
1398 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1401 static void mlx5_pci_resume(struct pci_dev *pdev)
1403 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1404 struct mlx5_priv *priv = &dev->priv;
1407 dev_info(&pdev->dev, "%s was called\n", __func__);
1409 pci_save_state(pdev);
1412 err = mlx5_load_one(dev, priv);
1414 dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
1417 dev_info(&pdev->dev, "%s: device recovered\n", __func__);
1420 static const struct pci_error_handlers mlx5_err_handler = {
1421 .error_detected = mlx5_pci_err_detected,
1422 .slot_reset = mlx5_pci_slot_reset,
1423 .resume = mlx5_pci_resume
1426 static const struct pci_device_id mlx5_core_pci_table[] = {
1427 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
1428 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
1429 { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */
1430 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
1431 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
1432 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
1436 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1438 static struct pci_driver mlx5_core_driver = {
1439 .name = DRIVER_NAME,
1440 .id_table = mlx5_core_pci_table,
1442 .remove = remove_one,
1443 .err_handler = &mlx5_err_handler,
1444 .sriov_configure = mlx5_core_sriov_configure,
1447 static int __init init(void)
1451 mlx5_register_debugfs();
1453 err = pci_register_driver(&mlx5_core_driver);
1457 #ifdef CONFIG_MLX5_CORE_EN
1464 mlx5_unregister_debugfs();
1468 static void __exit cleanup(void)
1470 #ifdef CONFIG_MLX5_CORE_EN
1473 pci_unregister_driver(&mlx5_core_driver);
1474 mlx5_unregister_debugfs();
1478 module_exit(cleanup);