2 * drivers/net/ethernet/mellanox/mlxsw/core.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/u64_stats_sync.h>
46 #include <linux/netdevice.h>
47 #include <linux/wait.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/types.h>
51 #include <linux/string.h>
52 #include <linux/gfp.h>
53 #include <linux/random.h>
54 #include <linux/jiffies.h>
55 #include <linux/mutex.h>
56 #include <linux/rcupdate.h>
57 #include <linux/slab.h>
58 #include <asm/byteorder.h>
68 static LIST_HEAD(mlxsw_core_driver_list);
69 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
71 static const char mlxsw_core_driver_name[] = "mlxsw_core";
73 static struct dentry *mlxsw_core_dbg_root;
75 struct mlxsw_core_pcpu_stats {
76 u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
77 u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
78 u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
79 u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
80 struct u64_stats_sync syncp;
81 u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
82 u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
88 struct mlxsw_driver *driver;
89 const struct mlxsw_bus *bus;
91 const struct mlxsw_bus_info *bus_info;
92 struct list_head rx_listener_list;
93 struct list_head event_listener_list;
95 struct sk_buff *resp_skb;
97 wait_queue_head_t wait;
99 struct mutex lock; /* One EMAD transaction at a time. */
102 struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
103 struct dentry *dbg_dir;
105 struct debugfs_blob_wrapper vsd_blob;
106 struct debugfs_blob_wrapper psid_blob;
109 u8 *mapping; /* lag_id+port_index to local_port mapping */
111 struct mlxsw_hwmon *hwmon;
112 unsigned long driver_priv[0];
113 /* driver_priv has to be always the last item */
116 struct mlxsw_rx_listener_item {
117 struct list_head list;
118 struct mlxsw_rx_listener rxl;
122 struct mlxsw_event_listener_item {
123 struct list_head list;
124 struct mlxsw_event_listener el;
133 * Destination MAC in EMAD's Ethernet header.
134 * Must be set to 01:02:c9:00:00:01
136 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
139 * Source MAC in EMAD's Ethernet header.
140 * Must be set to 00:02:c9:01:02:03
142 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
144 /* emad_eth_hdr_ethertype
145 * Ethertype in EMAD's Ethernet header.
146 * Must be set to 0x8932
148 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
150 /* emad_eth_hdr_mlx_proto
152 * Must be set to 0x0.
154 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
157 * Mellanox protocol version.
158 * Must be set to 0x0.
160 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
164 * Must be set to 0x1 (operation TLV).
166 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
169 * Length of the operation TLV in u32.
170 * Must be set to 0x4.
172 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
175 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
176 * EMAD. DR TLV must follow.
178 * Note: Currently not supported and must not be set.
180 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
182 /* emad_op_tlv_status
183 * Returned status in case of EMAD response. Must be set to 0 in case
186 * 0x1 - device is busy. Requester should retry
187 * 0x2 - Mellanox protocol version not supported
189 * 0x4 - register not supported
190 * 0x5 - operation class not supported
191 * 0x6 - EMAD method not supported
192 * 0x7 - bad parameter (e.g. port out of range)
193 * 0x8 - resource not available
194 * 0x9 - message receipt acknowledgment. Requester should retry
195 * 0x70 - internal error
197 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
199 /* emad_op_tlv_register_id
200 * Register ID of register within register TLV.
202 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
205 * Response bit. Setting to 1 indicates Response, otherwise request.
207 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
209 /* emad_op_tlv_method
213 * 0x3 - send (currently not supported)
216 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
219 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
221 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
224 * EMAD transaction ID. Used for pairing request and response EMADs.
226 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
230 * Must be set to 0x3 (register TLV).
232 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
235 * Length of the operation TLV in u32.
237 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
241 * Must be set to 0x0 (end TLV).
243 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
246 * Length of the end TLV in u32.
249 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
251 enum mlxsw_core_reg_access_type {
252 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
253 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
256 static inline const char *
257 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
260 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
262 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
268 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
270 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
271 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
274 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
275 const struct mlxsw_reg_info *reg,
278 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
279 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
280 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
283 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
284 const struct mlxsw_reg_info *reg,
285 enum mlxsw_core_reg_access_type type,
286 struct mlxsw_core *mlxsw_core)
288 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
289 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
290 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
291 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
292 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
293 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
294 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
295 mlxsw_emad_op_tlv_method_set(op_tlv,
296 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
298 mlxsw_emad_op_tlv_method_set(op_tlv,
299 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
300 mlxsw_emad_op_tlv_class_set(op_tlv,
301 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
302 mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
305 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
307 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
309 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
310 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
311 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
312 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
313 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
315 skb_reset_mac_header(skb);
320 static void mlxsw_emad_construct(struct sk_buff *skb,
321 const struct mlxsw_reg_info *reg,
323 enum mlxsw_core_reg_access_type type,
324 struct mlxsw_core *mlxsw_core)
328 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
329 mlxsw_emad_pack_end_tlv(buf);
331 buf = skb_push(skb, reg->len + sizeof(u32));
332 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
334 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
335 mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
337 mlxsw_emad_construct_eth_hdr(skb);
340 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
342 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
345 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
347 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
348 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
351 static char *mlxsw_emad_reg_payload(const char *op_tlv)
353 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
356 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
360 op_tlv = mlxsw_emad_op_tlv(skb);
361 return mlxsw_emad_op_tlv_tid_get(op_tlv);
364 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
368 op_tlv = mlxsw_emad_op_tlv(skb);
369 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
372 #define MLXSW_EMAD_TIMEOUT_MS 200
374 static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
376 const struct mlxsw_tx_info *tx_info)
381 mlxsw_core->emad.trans_active = true;
383 err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
385 dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
386 mlxsw_core->emad.tid);
388 goto trans_inactive_out;
391 ret = wait_event_timeout(mlxsw_core->emad.wait,
392 !(mlxsw_core->emad.trans_active),
393 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
395 dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
396 mlxsw_core->emad.tid);
398 goto trans_inactive_out;
404 mlxsw_core->emad.trans_active = false;
408 static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
411 enum mlxsw_emad_op_tlv_status status;
414 status = mlxsw_emad_op_tlv_status_get(op_tlv);
415 tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
418 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
420 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
421 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
422 dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
423 tid, status, mlxsw_emad_op_tlv_status_str(status));
425 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
426 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
427 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
428 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
429 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
430 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
431 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
432 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
434 dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
435 tid, status, mlxsw_emad_op_tlv_status_str(status));
440 static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
443 return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
446 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
448 const struct mlxsw_tx_info *tx_info)
450 struct sk_buff *trans_skb;
456 /* We copy the EMAD to a new skb, since we might need
457 * to retransmit it in case of failure.
459 trans_skb = skb_copy(skb, GFP_KERNEL);
465 err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
467 struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
469 err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
471 dev_kfree_skb(resp_skb);
472 if (!err || err != -EAGAIN)
475 if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
480 mlxsw_core->emad.tid++;
484 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
487 struct mlxsw_core *mlxsw_core = priv;
489 if (mlxsw_emad_is_resp(skb) &&
490 mlxsw_core->emad.trans_active &&
491 mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
492 mlxsw_core->emad.resp_skb = skb;
493 mlxsw_core->emad.trans_active = false;
494 wake_up(&mlxsw_core->emad.wait);
500 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
501 .func = mlxsw_emad_rx_listener_func,
502 .local_port = MLXSW_PORT_DONT_CARE,
503 .trap_id = MLXSW_TRAP_ID_ETHEMAD,
506 static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
508 char htgt_pl[MLXSW_REG_HTGT_LEN];
509 char hpkt_pl[MLXSW_REG_HPKT_LEN];
512 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
513 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
517 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
518 MLXSW_TRAP_ID_ETHEMAD);
519 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
522 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
526 /* Set the upper 32 bits of the transaction ID field to a random
527 * number. This allows us to discard EMADs addressed to other
530 get_random_bytes(&mlxsw_core->emad.tid, 4);
531 mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
533 init_waitqueue_head(&mlxsw_core->emad.wait);
534 mlxsw_core->emad.trans_active = false;
535 mutex_init(&mlxsw_core->emad.lock);
537 err = mlxsw_core_rx_listener_register(mlxsw_core,
538 &mlxsw_emad_rx_listener,
543 err = mlxsw_emad_traps_set(mlxsw_core);
545 goto err_emad_trap_set;
547 mlxsw_core->emad.use_emad = true;
552 mlxsw_core_rx_listener_unregister(mlxsw_core,
553 &mlxsw_emad_rx_listener,
558 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
560 char hpkt_pl[MLXSW_REG_HPKT_LEN];
562 mlxsw_core->emad.use_emad = false;
563 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
564 MLXSW_TRAP_ID_ETHEMAD);
565 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
567 mlxsw_core_rx_listener_unregister(mlxsw_core,
568 &mlxsw_emad_rx_listener,
572 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
578 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
579 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
580 sizeof(u32) + mlxsw_core->driver->txhdr_len);
581 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
584 skb = netdev_alloc_skb(NULL, emad_len);
587 memset(skb->data, 0, emad_len);
588 skb_reserve(skb, emad_len);
597 static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
599 struct mlxsw_core *mlxsw_core = file->private;
600 struct mlxsw_core_pcpu_stats *p;
601 u64 rx_packets, rx_bytes;
602 u64 tmp_rx_packets, tmp_rx_bytes;
603 u32 rx_dropped, rx_invalid;
607 static const char hdr[] =
608 " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
610 seq_printf(file, hdr);
611 for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
615 for_each_possible_cpu(j) {
616 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
618 start = u64_stats_fetch_begin(&p->syncp);
619 tmp_rx_packets = p->trap_rx_packets[i];
620 tmp_rx_bytes = p->trap_rx_bytes[i];
621 } while (u64_stats_fetch_retry(&p->syncp, start));
623 rx_packets += tmp_rx_packets;
624 rx_bytes += tmp_rx_bytes;
625 rx_dropped += p->trap_rx_dropped[i];
627 seq_printf(file, "trap %3d %12llu %12llu %10u\n",
628 i, rx_packets, rx_bytes, rx_dropped);
631 for_each_possible_cpu(j) {
632 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
633 rx_invalid += p->trap_rx_invalid;
635 seq_printf(file, "trap INV %10u\n",
638 for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
642 for_each_possible_cpu(j) {
643 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
645 start = u64_stats_fetch_begin(&p->syncp);
646 tmp_rx_packets = p->port_rx_packets[i];
647 tmp_rx_bytes = p->port_rx_bytes[i];
648 } while (u64_stats_fetch_retry(&p->syncp, start));
650 rx_packets += tmp_rx_packets;
651 rx_bytes += tmp_rx_bytes;
652 rx_dropped += p->port_rx_dropped[i];
654 seq_printf(file, "port %3d %12llu %12llu %10u\n",
655 i, rx_packets, rx_bytes, rx_dropped);
658 for_each_possible_cpu(j) {
659 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
660 rx_invalid += p->port_rx_invalid;
662 seq_printf(file, "port INV %10u\n",
667 static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
669 struct mlxsw_core *mlxsw_core = inode->i_private;
671 return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
674 static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
675 .owner = THIS_MODULE,
676 .open = mlxsw_core_rx_stats_dbg_open,
677 .release = single_release,
682 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
683 const char *buf, size_t size)
685 __be32 *m = (__be32 *) buf;
687 int count = size / sizeof(__be32);
689 for (i = count - 1; i >= 0; i--)
694 for (i = 0; i < count; i += 4)
695 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
696 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
697 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
700 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
702 spin_lock(&mlxsw_core_driver_list_lock);
703 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
704 spin_unlock(&mlxsw_core_driver_list_lock);
707 EXPORT_SYMBOL(mlxsw_core_driver_register);
709 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
711 spin_lock(&mlxsw_core_driver_list_lock);
712 list_del(&mlxsw_driver->list);
713 spin_unlock(&mlxsw_core_driver_list_lock);
715 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
717 static struct mlxsw_driver *__driver_find(const char *kind)
719 struct mlxsw_driver *mlxsw_driver;
721 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
722 if (strcmp(mlxsw_driver->kind, kind) == 0)
728 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
730 struct mlxsw_driver *mlxsw_driver;
732 spin_lock(&mlxsw_core_driver_list_lock);
733 mlxsw_driver = __driver_find(kind);
735 spin_unlock(&mlxsw_core_driver_list_lock);
736 request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
737 spin_lock(&mlxsw_core_driver_list_lock);
738 mlxsw_driver = __driver_find(kind);
741 if (!try_module_get(mlxsw_driver->owner))
745 spin_unlock(&mlxsw_core_driver_list_lock);
749 static void mlxsw_core_driver_put(const char *kind)
751 struct mlxsw_driver *mlxsw_driver;
753 spin_lock(&mlxsw_core_driver_list_lock);
754 mlxsw_driver = __driver_find(kind);
755 spin_unlock(&mlxsw_core_driver_list_lock);
758 module_put(mlxsw_driver->owner);
761 static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
763 const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
765 mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
766 mlxsw_core_dbg_root);
767 if (!mlxsw_core->dbg_dir)
769 debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
770 mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
771 mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
772 mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
773 debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
774 &mlxsw_core->dbg.vsd_blob);
775 mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
776 mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
777 debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
778 &mlxsw_core->dbg.psid_blob);
782 static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
784 debugfs_remove_recursive(mlxsw_core->dbg_dir);
787 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
788 const struct mlxsw_bus *mlxsw_bus,
791 const char *device_kind = mlxsw_bus_info->device_kind;
792 struct mlxsw_core *mlxsw_core;
793 struct mlxsw_driver *mlxsw_driver;
797 mlxsw_driver = mlxsw_core_driver_get(device_kind);
800 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
801 mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
807 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
808 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
809 mlxsw_core->driver = mlxsw_driver;
810 mlxsw_core->bus = mlxsw_bus;
811 mlxsw_core->bus_priv = bus_priv;
812 mlxsw_core->bus_info = mlxsw_bus_info;
814 mlxsw_core->pcpu_stats =
815 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
816 if (!mlxsw_core->pcpu_stats) {
818 goto err_alloc_stats;
821 if (mlxsw_driver->profile->used_max_lag &&
822 mlxsw_driver->profile->used_max_port_per_lag) {
823 alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag *
824 mlxsw_driver->profile->max_port_per_lag;
825 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
826 if (!mlxsw_core->lag.mapping) {
828 goto err_alloc_lag_mapping;
832 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
836 err = mlxsw_emad_init(mlxsw_core);
840 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
844 err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
847 goto err_driver_init;
849 err = mlxsw_core_debugfs_init(mlxsw_core);
851 goto err_debugfs_init;
856 mlxsw_core->driver->fini(mlxsw_core->driver_priv);
859 mlxsw_emad_fini(mlxsw_core);
861 mlxsw_bus->fini(bus_priv);
863 kfree(mlxsw_core->lag.mapping);
864 err_alloc_lag_mapping:
865 free_percpu(mlxsw_core->pcpu_stats);
869 mlxsw_core_driver_put(device_kind);
872 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
874 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
876 const char *device_kind = mlxsw_core->bus_info->device_kind;
878 mlxsw_core_debugfs_fini(mlxsw_core);
879 mlxsw_core->driver->fini(mlxsw_core->driver_priv);
880 mlxsw_emad_fini(mlxsw_core);
881 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
882 kfree(mlxsw_core->lag.mapping);
883 free_percpu(mlxsw_core->pcpu_stats);
885 mlxsw_core_driver_put(device_kind);
887 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
889 static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
891 return container_of(driver_priv, struct mlxsw_core, driver_priv);
894 bool mlxsw_core_skb_transmit_busy(void *driver_priv,
895 const struct mlxsw_tx_info *tx_info)
897 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
899 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
902 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
904 int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
905 const struct mlxsw_tx_info *tx_info)
907 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
909 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
912 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
914 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
915 const struct mlxsw_rx_listener *rxl_b)
917 return (rxl_a->func == rxl_b->func &&
918 rxl_a->local_port == rxl_b->local_port &&
919 rxl_a->trap_id == rxl_b->trap_id);
922 static struct mlxsw_rx_listener_item *
923 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
924 const struct mlxsw_rx_listener *rxl,
927 struct mlxsw_rx_listener_item *rxl_item;
929 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
930 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
931 rxl_item->priv == priv)
937 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
938 const struct mlxsw_rx_listener *rxl,
941 struct mlxsw_rx_listener_item *rxl_item;
943 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
946 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
949 rxl_item->rxl = *rxl;
950 rxl_item->priv = priv;
952 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
955 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
957 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
958 const struct mlxsw_rx_listener *rxl,
961 struct mlxsw_rx_listener_item *rxl_item;
963 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
966 list_del_rcu(&rxl_item->list);
970 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
972 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
975 struct mlxsw_event_listener_item *event_listener_item = priv;
976 struct mlxsw_reg_info reg;
978 char *op_tlv = mlxsw_emad_op_tlv(skb);
979 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
981 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
982 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
983 payload = mlxsw_emad_reg_payload(op_tlv);
984 event_listener_item->el.func(®, payload, event_listener_item->priv);
988 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
989 const struct mlxsw_event_listener *el_b)
991 return (el_a->func == el_b->func &&
992 el_a->trap_id == el_b->trap_id);
995 static struct mlxsw_event_listener_item *
996 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
997 const struct mlxsw_event_listener *el,
1000 struct mlxsw_event_listener_item *el_item;
1002 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1003 if (__is_event_listener_equal(&el_item->el, el) &&
1004 el_item->priv == priv)
1010 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1011 const struct mlxsw_event_listener *el,
1015 struct mlxsw_event_listener_item *el_item;
1016 const struct mlxsw_rx_listener rxl = {
1017 .func = mlxsw_core_event_listener_func,
1018 .local_port = MLXSW_PORT_DONT_CARE,
1019 .trap_id = el->trap_id,
1022 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1025 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1029 el_item->priv = priv;
1031 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1033 goto err_rx_listener_register;
1035 /* No reason to save item if we did not manage to register an RX
1038 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1042 err_rx_listener_register:
1046 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1048 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1049 const struct mlxsw_event_listener *el,
1052 struct mlxsw_event_listener_item *el_item;
1053 const struct mlxsw_rx_listener rxl = {
1054 .func = mlxsw_core_event_listener_func,
1055 .local_port = MLXSW_PORT_DONT_CARE,
1056 .trap_id = el->trap_id,
1059 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1062 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1063 list_del(&el_item->list);
1066 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1068 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1069 const struct mlxsw_reg_info *reg,
1071 enum mlxsw_core_reg_access_type type)
1075 struct sk_buff *skb;
1076 struct mlxsw_tx_info tx_info = {
1077 .local_port = MLXSW_PORT_CPU_PORT,
1081 skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
1085 mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
1086 mlxsw_core->driver->txhdr_construct(skb, &tx_info);
1088 dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
1089 mlxsw_core->emad.tid);
1090 mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
1092 err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
1094 op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
1095 memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
1098 dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
1099 mlxsw_core->emad.tid - 1);
1100 mlxsw_core_buf_dump_dbg(mlxsw_core,
1101 mlxsw_core->emad.resp_skb->data,
1102 mlxsw_core->emad.resp_skb->len);
1104 dev_kfree_skb(mlxsw_core->emad.resp_skb);
1110 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1111 const struct mlxsw_reg_info *reg,
1113 enum mlxsw_core_reg_access_type type)
1116 char *in_mbox, *out_mbox, *tmp;
1118 in_mbox = mlxsw_cmd_mbox_alloc();
1122 out_mbox = mlxsw_cmd_mbox_alloc();
1128 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
1129 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1130 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1134 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1136 err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
1137 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1142 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1145 mlxsw_core->emad.tid++;
1146 mlxsw_cmd_mbox_free(out_mbox);
1148 mlxsw_cmd_mbox_free(in_mbox);
1152 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1153 const struct mlxsw_reg_info *reg,
1155 enum mlxsw_core_reg_access_type type)
1160 if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
1161 dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
1162 reg->id, mlxsw_reg_id_str(reg->id),
1163 mlxsw_core_reg_access_type_str(type));
1167 cur_tid = mlxsw_core->emad.tid;
1168 dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
1169 cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
1170 mlxsw_core_reg_access_type_str(type));
1172 /* During initialization EMAD interface is not available to us,
1173 * so we default to command interface. We switch to EMAD interface
1174 * after setting the appropriate traps.
1176 if (!mlxsw_core->emad.use_emad)
1177 err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1180 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1184 dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
1185 cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
1186 mlxsw_core_reg_access_type_str(type));
1188 mutex_unlock(&mlxsw_core->emad.lock);
1192 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1193 const struct mlxsw_reg_info *reg, char *payload)
1195 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1196 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1198 EXPORT_SYMBOL(mlxsw_reg_query);
1200 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1201 const struct mlxsw_reg_info *reg, char *payload)
1203 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1204 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1206 EXPORT_SYMBOL(mlxsw_reg_write);
1208 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1209 struct mlxsw_rx_info *rx_info)
1211 struct mlxsw_rx_listener_item *rxl_item;
1212 const struct mlxsw_rx_listener *rxl;
1213 struct mlxsw_core_pcpu_stats *pcpu_stats;
1217 if (rx_info->is_lag) {
1218 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1219 __func__, rx_info->u.lag_id,
1221 /* Upper layer does not care if the skb came from LAG or not,
1222 * so just get the local_port for the lag port and push it up.
1224 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1226 rx_info->lag_port_index);
1228 local_port = rx_info->u.sys_port;
1231 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1232 __func__, local_port, rx_info->trap_id);
1234 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1235 (local_port >= MLXSW_PORT_MAX_PORTS))
1239 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1240 rxl = &rxl_item->rxl;
1241 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1242 rxl->local_port == local_port) &&
1243 rxl->trap_id == rx_info->trap_id) {
1252 pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
1253 u64_stats_update_begin(&pcpu_stats->syncp);
1254 pcpu_stats->port_rx_packets[local_port]++;
1255 pcpu_stats->port_rx_bytes[local_port] += skb->len;
1256 pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
1257 pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
1258 u64_stats_update_end(&pcpu_stats->syncp);
1260 rxl->func(skb, local_port, rxl_item->priv);
1264 if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
1265 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
1267 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
1268 if (local_port >= MLXSW_PORT_MAX_PORTS)
1269 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
1271 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
1274 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1276 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1277 u16 lag_id, u8 port_index)
1279 return mlxsw_core->driver->profile->max_port_per_lag * lag_id +
1283 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1284 u16 lag_id, u8 port_index, u8 local_port)
1286 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1287 lag_id, port_index);
1289 mlxsw_core->lag.mapping[index] = local_port;
1291 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1293 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1294 u16 lag_id, u8 port_index)
1296 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1297 lag_id, port_index);
1299 return mlxsw_core->lag.mapping[index];
1301 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1303 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1304 u16 lag_id, u8 local_port)
1308 for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) {
1309 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1312 if (mlxsw_core->lag.mapping[index] == local_port)
1313 mlxsw_core->lag.mapping[index] = 0;
1316 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1318 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1319 u32 in_mod, bool out_mbox_direct,
1320 char *in_mbox, size_t in_mbox_size,
1321 char *out_mbox, size_t out_mbox_size)
1326 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1327 if (!mlxsw_core->bus->cmd_exec)
1330 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1331 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1333 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1334 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1337 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1338 opcode_mod, in_mod, out_mbox_direct,
1339 in_mbox, in_mbox_size,
1340 out_mbox, out_mbox_size, &status);
1342 if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1343 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1344 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1345 in_mod, status, mlxsw_cmd_status_str(status));
1346 } else if (err == -ETIMEDOUT) {
1347 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1348 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1352 if (!err && out_mbox) {
1353 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1354 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1358 EXPORT_SYMBOL(mlxsw_cmd_exec);
1360 static int __init mlxsw_core_module_init(void)
1362 mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
1363 if (!mlxsw_core_dbg_root)
1368 static void __exit mlxsw_core_module_exit(void)
1370 debugfs_remove_recursive(mlxsw_core_dbg_root);
1373 module_init(mlxsw_core_module_init);
1374 module_exit(mlxsw_core_module_exit);
1376 MODULE_LICENSE("Dual BSD/GPL");
1377 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1378 MODULE_DESCRIPTION("Mellanox switch device core driver");